LLVM  4.0.0
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
14 #include "Utils/AArch64BaseInfo.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/StringMap.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/StringSwitch.h"
24 #include "llvm/ADT/Twine.h"
25 #include "llvm/MC/MCContext.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCInst.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/MC/MCStreamer.h"
38 #include "llvm/MC/MCSymbol.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/Compiler.h"
45 #include "llvm/Support/SMLoc.h"
49 #include <cassert>
50 #include <cctype>
51 #include <cstdint>
52 #include <cstdio>
53 #include <string>
54 #include <tuple>
55 #include <utility>
56 #include <vector>
57 
58 using namespace llvm;
59 
60 namespace {
61 
62 class AArch64AsmParser : public MCTargetAsmParser {
63 private:
64  StringRef Mnemonic; ///< Instruction mnemonic.
65 
66  // Map of register aliases registers via the .req directive.
68 
69  AArch64TargetStreamer &getTargetStreamer() {
70  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
71  return static_cast<AArch64TargetStreamer &>(TS);
72  }
73 
74  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
75 
76  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
77  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
78  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
79  unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
80  int tryParseRegister();
81  int tryMatchVectorRegister(StringRef &Kind, bool expected);
82  bool parseRegister(OperandVector &Operands);
83  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
84  bool parseVectorList(OperandVector &Operands);
85  bool parseOperand(OperandVector &Operands, bool isCondCode,
86  bool invertCondCode);
87 
88  bool showMatchError(SMLoc Loc, unsigned ErrCode);
89 
90  bool parseDirectiveArch(SMLoc L);
91  bool parseDirectiveCPU(SMLoc L);
92  bool parseDirectiveWord(unsigned Size, SMLoc L);
93  bool parseDirectiveInst(SMLoc L);
94 
95  bool parseDirectiveTLSDescCall(SMLoc L);
96 
97  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
98  bool parseDirectiveLtorg(SMLoc L);
99 
100  bool parseDirectiveReq(StringRef Name, SMLoc L);
101  bool parseDirectiveUnreq(SMLoc L);
102 
103  bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
104  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
105  OperandVector &Operands, MCStreamer &Out,
106  uint64_t &ErrorInfo,
107  bool MatchingInlineAsm) override;
108 /// @name Auto-generated Match Functions
109 /// {
110 
111 #define GET_ASSEMBLER_HEADER
112 #include "AArch64GenAsmMatcher.inc"
113 
114  /// }
115 
116  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
117  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
118  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
119  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
120  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
121  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
122  OperandMatchResultTy tryParsePSBHint(OperandVector &Operands);
123  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
124  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
125  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
126  OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
127  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
128  bool tryParseVectorRegister(OperandVector &Operands);
129  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
130 
131 public:
132  enum AArch64MatchResultTy {
133  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
134 #define GET_OPERAND_DIAGNOSTIC_TYPES
135 #include "AArch64GenAsmMatcher.inc"
136  };
137  bool IsILP32;
138 
139  AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
140  const MCInstrInfo &MII, const MCTargetOptions &Options)
141  : MCTargetAsmParser(Options, STI) {
142  IsILP32 = Options.getABIName() == "ilp32";
144  MCStreamer &S = getParser().getStreamer();
145  if (S.getTargetStreamer() == nullptr)
146  new AArch64TargetStreamer(S);
147 
148  // Initialize the set of available features.
149  setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
150  }
151 
152  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
153  SMLoc NameLoc, OperandVector &Operands) override;
154  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
155  bool ParseDirective(AsmToken DirectiveID) override;
156  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
157  unsigned Kind) override;
158 
159  static bool classifySymbolRef(const MCExpr *Expr,
160  AArch64MCExpr::VariantKind &ELFRefKind,
161  MCSymbolRefExpr::VariantKind &DarwinRefKind,
162  int64_t &Addend);
163 };
164 
165 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
166 /// instruction.
167 class AArch64Operand : public MCParsedAsmOperand {
168 private:
169  enum KindTy {
170  k_Immediate,
171  k_ShiftedImm,
172  k_CondCode,
173  k_Register,
174  k_VectorList,
175  k_VectorIndex,
176  k_Token,
177  k_SysReg,
178  k_SysCR,
179  k_Prefetch,
180  k_ShiftExtend,
181  k_FPImm,
182  k_Barrier,
183  k_PSBHint,
184  } Kind;
185 
186  SMLoc StartLoc, EndLoc;
187 
188  struct TokOp {
189  const char *Data;
190  unsigned Length;
191  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
192  };
193 
194  struct RegOp {
195  unsigned RegNum;
196  bool isVector;
197  };
198 
199  struct VectorListOp {
200  unsigned RegNum;
201  unsigned Count;
202  unsigned NumElements;
203  unsigned ElementKind;
204  };
205 
206  struct VectorIndexOp {
207  unsigned Val;
208  };
209 
210  struct ImmOp {
211  const MCExpr *Val;
212  };
213 
214  struct ShiftedImmOp {
215  const MCExpr *Val;
216  unsigned ShiftAmount;
217  };
218 
219  struct CondCodeOp {
221  };
222 
223  struct FPImmOp {
224  unsigned Val; // Encoded 8-bit representation.
225  };
226 
227  struct BarrierOp {
228  const char *Data;
229  unsigned Length;
230  unsigned Val; // Not the enum since not all values have names.
231  };
232 
233  struct SysRegOp {
234  const char *Data;
235  unsigned Length;
236  uint32_t MRSReg;
237  uint32_t MSRReg;
238  uint32_t PStateField;
239  };
240 
241  struct SysCRImmOp {
242  unsigned Val;
243  };
244 
245  struct PrefetchOp {
246  const char *Data;
247  unsigned Length;
248  unsigned Val;
249  };
250 
251  struct PSBHintOp {
252  const char *Data;
253  unsigned Length;
254  unsigned Val;
255  };
256 
257  struct ShiftExtendOp {
259  unsigned Amount;
260  bool HasExplicitAmount;
261  };
262 
263  struct ExtendOp {
264  unsigned Val;
265  };
266 
267  union {
268  struct TokOp Tok;
269  struct RegOp Reg;
270  struct VectorListOp VectorList;
271  struct VectorIndexOp VectorIndex;
272  struct ImmOp Imm;
273  struct ShiftedImmOp ShiftedImm;
274  struct CondCodeOp CondCode;
275  struct FPImmOp FPImm;
276  struct BarrierOp Barrier;
277  struct SysRegOp SysReg;
278  struct SysCRImmOp SysCRImm;
279  struct PrefetchOp Prefetch;
280  struct PSBHintOp PSBHint;
281  struct ShiftExtendOp ShiftExtend;
282  };
283 
284  // Keep the MCContext around as the MCExprs may need manipulated during
285  // the add<>Operands() calls.
286  MCContext &Ctx;
287 
288 public:
289  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
290 
291  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
292  Kind = o.Kind;
293  StartLoc = o.StartLoc;
294  EndLoc = o.EndLoc;
295  switch (Kind) {
296  case k_Token:
297  Tok = o.Tok;
298  break;
299  case k_Immediate:
300  Imm = o.Imm;
301  break;
302  case k_ShiftedImm:
303  ShiftedImm = o.ShiftedImm;
304  break;
305  case k_CondCode:
306  CondCode = o.CondCode;
307  break;
308  case k_FPImm:
309  FPImm = o.FPImm;
310  break;
311  case k_Barrier:
312  Barrier = o.Barrier;
313  break;
314  case k_Register:
315  Reg = o.Reg;
316  break;
317  case k_VectorList:
318  VectorList = o.VectorList;
319  break;
320  case k_VectorIndex:
321  VectorIndex = o.VectorIndex;
322  break;
323  case k_SysReg:
324  SysReg = o.SysReg;
325  break;
326  case k_SysCR:
327  SysCRImm = o.SysCRImm;
328  break;
329  case k_Prefetch:
330  Prefetch = o.Prefetch;
331  break;
332  case k_PSBHint:
333  PSBHint = o.PSBHint;
334  break;
335  case k_ShiftExtend:
336  ShiftExtend = o.ShiftExtend;
337  break;
338  }
339  }
340 
341  /// getStartLoc - Get the location of the first token of this operand.
342  SMLoc getStartLoc() const override { return StartLoc; }
343  /// getEndLoc - Get the location of the last token of this operand.
344  SMLoc getEndLoc() const override { return EndLoc; }
345 
346  StringRef getToken() const {
347  assert(Kind == k_Token && "Invalid access!");
348  return StringRef(Tok.Data, Tok.Length);
349  }
350 
351  bool isTokenSuffix() const {
352  assert(Kind == k_Token && "Invalid access!");
353  return Tok.IsSuffix;
354  }
355 
356  const MCExpr *getImm() const {
357  assert(Kind == k_Immediate && "Invalid access!");
358  return Imm.Val;
359  }
360 
361  const MCExpr *getShiftedImmVal() const {
362  assert(Kind == k_ShiftedImm && "Invalid access!");
363  return ShiftedImm.Val;
364  }
365 
366  unsigned getShiftedImmShift() const {
367  assert(Kind == k_ShiftedImm && "Invalid access!");
368  return ShiftedImm.ShiftAmount;
369  }
370 
371  AArch64CC::CondCode getCondCode() const {
372  assert(Kind == k_CondCode && "Invalid access!");
373  return CondCode.Code;
374  }
375 
376  unsigned getFPImm() const {
377  assert(Kind == k_FPImm && "Invalid access!");
378  return FPImm.Val;
379  }
380 
381  unsigned getBarrier() const {
382  assert(Kind == k_Barrier && "Invalid access!");
383  return Barrier.Val;
384  }
385 
386  StringRef getBarrierName() const {
387  assert(Kind == k_Barrier && "Invalid access!");
388  return StringRef(Barrier.Data, Barrier.Length);
389  }
390 
391  unsigned getReg() const override {
392  assert(Kind == k_Register && "Invalid access!");
393  return Reg.RegNum;
394  }
395 
396  unsigned getVectorListStart() const {
397  assert(Kind == k_VectorList && "Invalid access!");
398  return VectorList.RegNum;
399  }
400 
401  unsigned getVectorListCount() const {
402  assert(Kind == k_VectorList && "Invalid access!");
403  return VectorList.Count;
404  }
405 
406  unsigned getVectorIndex() const {
407  assert(Kind == k_VectorIndex && "Invalid access!");
408  return VectorIndex.Val;
409  }
410 
411  StringRef getSysReg() const {
412  assert(Kind == k_SysReg && "Invalid access!");
413  return StringRef(SysReg.Data, SysReg.Length);
414  }
415 
416  unsigned getSysCR() const {
417  assert(Kind == k_SysCR && "Invalid access!");
418  return SysCRImm.Val;
419  }
420 
421  unsigned getPrefetch() const {
422  assert(Kind == k_Prefetch && "Invalid access!");
423  return Prefetch.Val;
424  }
425 
426  unsigned getPSBHint() const {
427  assert(Kind == k_PSBHint && "Invalid access!");
428  return PSBHint.Val;
429  }
430 
431  StringRef getPSBHintName() const {
432  assert(Kind == k_PSBHint && "Invalid access!");
433  return StringRef(PSBHint.Data, PSBHint.Length);
434  }
435 
436  StringRef getPrefetchName() const {
437  assert(Kind == k_Prefetch && "Invalid access!");
438  return StringRef(Prefetch.Data, Prefetch.Length);
439  }
440 
441  AArch64_AM::ShiftExtendType getShiftExtendType() const {
442  assert(Kind == k_ShiftExtend && "Invalid access!");
443  return ShiftExtend.Type;
444  }
445 
446  unsigned getShiftExtendAmount() const {
447  assert(Kind == k_ShiftExtend && "Invalid access!");
448  return ShiftExtend.Amount;
449  }
450 
451  bool hasShiftExtendAmount() const {
452  assert(Kind == k_ShiftExtend && "Invalid access!");
453  return ShiftExtend.HasExplicitAmount;
454  }
455 
456  bool isImm() const override { return Kind == k_Immediate; }
457  bool isMem() const override { return false; }
458  bool isSImm9() const {
459  if (!isImm())
460  return false;
461  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
462  if (!MCE)
463  return false;
464  int64_t Val = MCE->getValue();
465  return (Val >= -256 && Val < 256);
466  }
467  bool isSImm7s4() const {
468  if (!isImm())
469  return false;
470  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
471  if (!MCE)
472  return false;
473  int64_t Val = MCE->getValue();
474  return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
475  }
476  bool isSImm7s8() const {
477  if (!isImm())
478  return false;
479  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
480  if (!MCE)
481  return false;
482  int64_t Val = MCE->getValue();
483  return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
484  }
485  bool isSImm7s16() const {
486  if (!isImm())
487  return false;
488  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
489  if (!MCE)
490  return false;
491  int64_t Val = MCE->getValue();
492  return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
493  }
494 
495  bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
496  AArch64MCExpr::VariantKind ELFRefKind;
497  MCSymbolRefExpr::VariantKind DarwinRefKind;
498  int64_t Addend;
499  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
500  Addend)) {
501  // If we don't understand the expression, assume the best and
502  // let the fixup and relocation code deal with it.
503  return true;
504  }
505 
506  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
507  ELFRefKind == AArch64MCExpr::VK_LO12 ||
508  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
509  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
510  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
511  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
512  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
513  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
514  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
515  // Note that we don't range-check the addend. It's adjusted modulo page
516  // size when converted, so there is no "out of range" condition when using
517  // @pageoff.
518  return Addend >= 0 && (Addend % Scale) == 0;
519  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
520  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
521  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
522  return Addend == 0;
523  }
524 
525  return false;
526  }
527 
528  template <int Scale> bool isUImm12Offset() const {
529  if (!isImm())
530  return false;
531 
532  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
533  if (!MCE)
534  return isSymbolicUImm12Offset(getImm(), Scale);
535 
536  int64_t Val = MCE->getValue();
537  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
538  }
539 
540  bool isImm0_1() const {
541  if (!isImm())
542  return false;
543  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
544  if (!MCE)
545  return false;
546  int64_t Val = MCE->getValue();
547  return (Val >= 0 && Val < 2);
548  }
549 
550  bool isImm0_7() const {
551  if (!isImm())
552  return false;
553  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
554  if (!MCE)
555  return false;
556  int64_t Val = MCE->getValue();
557  return (Val >= 0 && Val < 8);
558  }
559 
560  bool isImm1_8() const {
561  if (!isImm())
562  return false;
563  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
564  if (!MCE)
565  return false;
566  int64_t Val = MCE->getValue();
567  return (Val > 0 && Val < 9);
568  }
569 
570  bool isImm0_15() const {
571  if (!isImm())
572  return false;
573  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
574  if (!MCE)
575  return false;
576  int64_t Val = MCE->getValue();
577  return (Val >= 0 && Val < 16);
578  }
579 
580  bool isImm1_16() const {
581  if (!isImm())
582  return false;
583  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
584  if (!MCE)
585  return false;
586  int64_t Val = MCE->getValue();
587  return (Val > 0 && Val < 17);
588  }
589 
590  bool isImm0_31() const {
591  if (!isImm())
592  return false;
593  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
594  if (!MCE)
595  return false;
596  int64_t Val = MCE->getValue();
597  return (Val >= 0 && Val < 32);
598  }
599 
600  bool isImm1_31() const {
601  if (!isImm())
602  return false;
603  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
604  if (!MCE)
605  return false;
606  int64_t Val = MCE->getValue();
607  return (Val >= 1 && Val < 32);
608  }
609 
610  bool isImm1_32() const {
611  if (!isImm())
612  return false;
613  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
614  if (!MCE)
615  return false;
616  int64_t Val = MCE->getValue();
617  return (Val >= 1 && Val < 33);
618  }
619 
620  bool isImm0_63() const {
621  if (!isImm())
622  return false;
623  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
624  if (!MCE)
625  return false;
626  int64_t Val = MCE->getValue();
627  return (Val >= 0 && Val < 64);
628  }
629 
630  bool isImm1_63() const {
631  if (!isImm())
632  return false;
633  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
634  if (!MCE)
635  return false;
636  int64_t Val = MCE->getValue();
637  return (Val >= 1 && Val < 64);
638  }
639 
640  bool isImm1_64() const {
641  if (!isImm())
642  return false;
643  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
644  if (!MCE)
645  return false;
646  int64_t Val = MCE->getValue();
647  return (Val >= 1 && Val < 65);
648  }
649 
650  bool isImm0_127() const {
651  if (!isImm())
652  return false;
653  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
654  if (!MCE)
655  return false;
656  int64_t Val = MCE->getValue();
657  return (Val >= 0 && Val < 128);
658  }
659 
660  bool isImm0_255() const {
661  if (!isImm())
662  return false;
663  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
664  if (!MCE)
665  return false;
666  int64_t Val = MCE->getValue();
667  return (Val >= 0 && Val < 256);
668  }
669 
670  bool isImm0_65535() const {
671  if (!isImm())
672  return false;
673  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
674  if (!MCE)
675  return false;
676  int64_t Val = MCE->getValue();
677  return (Val >= 0 && Val < 65536);
678  }
679 
680  bool isImm32_63() const {
681  if (!isImm())
682  return false;
683  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
684  if (!MCE)
685  return false;
686  int64_t Val = MCE->getValue();
687  return (Val >= 32 && Val < 64);
688  }
689 
690  bool isLogicalImm32() const {
691  if (!isImm())
692  return false;
693  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
694  if (!MCE)
695  return false;
696  int64_t Val = MCE->getValue();
697  if (Val >> 32 != 0 && Val >> 32 != ~0LL)
698  return false;
699  Val &= 0xFFFFFFFF;
700  return AArch64_AM::isLogicalImmediate(Val, 32);
701  }
702 
703  bool isLogicalImm64() const {
704  if (!isImm())
705  return false;
706  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
707  if (!MCE)
708  return false;
709  return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
710  }
711 
712  bool isLogicalImm32Not() const {
713  if (!isImm())
714  return false;
715  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
716  if (!MCE)
717  return false;
718  int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
719  return AArch64_AM::isLogicalImmediate(Val, 32);
720  }
721 
722  bool isLogicalImm64Not() const {
723  if (!isImm())
724  return false;
725  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
726  if (!MCE)
727  return false;
728  return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
729  }
730 
731  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
732 
733  bool isAddSubImm() const {
734  if (!isShiftedImm() && !isImm())
735  return false;
736 
737  const MCExpr *Expr;
738 
739  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
740  if (isShiftedImm()) {
741  unsigned Shift = ShiftedImm.ShiftAmount;
742  Expr = ShiftedImm.Val;
743  if (Shift != 0 && Shift != 12)
744  return false;
745  } else {
746  Expr = getImm();
747  }
748 
749  AArch64MCExpr::VariantKind ELFRefKind;
750  MCSymbolRefExpr::VariantKind DarwinRefKind;
751  int64_t Addend;
752  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
753  DarwinRefKind, Addend)) {
754  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
755  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
756  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
757  || ELFRefKind == AArch64MCExpr::VK_LO12
758  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
759  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
760  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
761  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
762  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
763  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
764  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
765  }
766 
767  // If it's a constant, it should be a real immediate in range:
768  if (auto *CE = dyn_cast<MCConstantExpr>(Expr))
769  return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
770 
771  // If it's an expression, we hope for the best and let the fixup/relocation
772  // code deal with it.
773  return true;
774  }
775 
776  bool isAddSubImmNeg() const {
777  if (!isShiftedImm() && !isImm())
778  return false;
779 
780  const MCExpr *Expr;
781 
782  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
783  if (isShiftedImm()) {
784  unsigned Shift = ShiftedImm.ShiftAmount;
785  Expr = ShiftedImm.Val;
786  if (Shift != 0 && Shift != 12)
787  return false;
788  } else
789  Expr = getImm();
790 
791  // Otherwise it should be a real negative immediate in range:
792  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
793  return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
794  }
795 
796  bool isCondCode() const { return Kind == k_CondCode; }
797 
798  bool isSIMDImmType10() const {
799  if (!isImm())
800  return false;
801  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
802  if (!MCE)
803  return false;
805  }
806 
807  bool isBranchTarget26() const {
808  if (!isImm())
809  return false;
810  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
811  if (!MCE)
812  return true;
813  int64_t Val = MCE->getValue();
814  if (Val & 0x3)
815  return false;
816  return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
817  }
818 
819  bool isPCRelLabel19() const {
820  if (!isImm())
821  return false;
822  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
823  if (!MCE)
824  return true;
825  int64_t Val = MCE->getValue();
826  if (Val & 0x3)
827  return false;
828  return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
829  }
830 
831  bool isBranchTarget14() const {
832  if (!isImm())
833  return false;
834  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
835  if (!MCE)
836  return true;
837  int64_t Val = MCE->getValue();
838  if (Val & 0x3)
839  return false;
840  return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
841  }
842 
843  bool
844  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
845  if (!isImm())
846  return false;
847 
848  AArch64MCExpr::VariantKind ELFRefKind;
849  MCSymbolRefExpr::VariantKind DarwinRefKind;
850  int64_t Addend;
851  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
852  DarwinRefKind, Addend)) {
853  return false;
854  }
855  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
856  return false;
857 
858  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
859  if (ELFRefKind == AllowedModifiers[i])
860  return Addend == 0;
861  }
862 
863  return false;
864  }
865 
866  bool isMovZSymbolG3() const {
867  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
868  }
869 
870  bool isMovZSymbolG2() const {
874  }
875 
876  bool isMovZSymbolG1() const {
877  return isMovWSymbol({
881  });
882  }
883 
884  bool isMovZSymbolG0() const {
888  }
889 
890  bool isMovKSymbolG3() const {
891  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
892  }
893 
894  bool isMovKSymbolG2() const {
895  return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
896  }
897 
898  bool isMovKSymbolG1() const {
899  return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
902  }
903 
904  bool isMovKSymbolG0() const {
905  return isMovWSymbol(
908  }
909 
910  template<int RegWidth, int Shift>
911  bool isMOVZMovAlias() const {
912  if (!isImm()) return false;
913 
914  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
915  if (!CE) return false;
916  uint64_t Value = CE->getValue();
917 
918  return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
919  }
920 
921  template<int RegWidth, int Shift>
922  bool isMOVNMovAlias() const {
923  if (!isImm()) return false;
924 
925  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
926  if (!CE) return false;
927  uint64_t Value = CE->getValue();
928 
929  return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
930  }
931 
932  bool isFPImm() const { return Kind == k_FPImm; }
933  bool isBarrier() const { return Kind == k_Barrier; }
934  bool isSysReg() const { return Kind == k_SysReg; }
935 
936  bool isMRSSystemRegister() const {
937  if (!isSysReg()) return false;
938 
939  return SysReg.MRSReg != -1U;
940  }
941 
942  bool isMSRSystemRegister() const {
943  if (!isSysReg()) return false;
944  return SysReg.MSRReg != -1U;
945  }
946 
947  bool isSystemPStateFieldWithImm0_1() const {
948  if (!isSysReg()) return false;
949  return (SysReg.PStateField == AArch64PState::PAN ||
950  SysReg.PStateField == AArch64PState::UAO);
951  }
952 
953  bool isSystemPStateFieldWithImm0_15() const {
954  if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
955  return SysReg.PStateField != -1U;
956  }
957 
958  bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
959  bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
960 
961  bool isVectorRegLo() const {
962  return Kind == k_Register && Reg.isVector &&
963  AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
964  Reg.RegNum);
965  }
966 
967  bool isGPR32as64() const {
968  return Kind == k_Register && !Reg.isVector &&
969  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
970  }
971 
972  bool isWSeqPair() const {
973  return Kind == k_Register && !Reg.isVector &&
974  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
975  Reg.RegNum);
976  }
977 
978  bool isXSeqPair() const {
979  return Kind == k_Register && !Reg.isVector &&
980  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
981  Reg.RegNum);
982  }
983 
984  bool isGPR64sp0() const {
985  return Kind == k_Register && !Reg.isVector &&
986  AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
987  }
988 
989  /// Is this a vector list with the type implicit (presumably attached to the
990  /// instruction itself)?
991  template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
992  return Kind == k_VectorList && VectorList.Count == NumRegs &&
993  !VectorList.ElementKind;
994  }
995 
996  template <unsigned NumRegs, unsigned NumElements, char ElementKind>
997  bool isTypedVectorList() const {
998  if (Kind != k_VectorList)
999  return false;
1000  if (VectorList.Count != NumRegs)
1001  return false;
1002  if (VectorList.ElementKind != ElementKind)
1003  return false;
1004  return VectorList.NumElements == NumElements;
1005  }
1006 
1007  bool isVectorIndex1() const {
1008  return Kind == k_VectorIndex && VectorIndex.Val == 1;
1009  }
1010 
1011  bool isVectorIndexB() const {
1012  return Kind == k_VectorIndex && VectorIndex.Val < 16;
1013  }
1014 
1015  bool isVectorIndexH() const {
1016  return Kind == k_VectorIndex && VectorIndex.Val < 8;
1017  }
1018 
1019  bool isVectorIndexS() const {
1020  return Kind == k_VectorIndex && VectorIndex.Val < 4;
1021  }
1022 
1023  bool isVectorIndexD() const {
1024  return Kind == k_VectorIndex && VectorIndex.Val < 2;
1025  }
1026 
1027  bool isToken() const override { return Kind == k_Token; }
1028 
1029  bool isTokenEqual(StringRef Str) const {
1030  return Kind == k_Token && getToken() == Str;
1031  }
1032  bool isSysCR() const { return Kind == k_SysCR; }
1033  bool isPrefetch() const { return Kind == k_Prefetch; }
1034  bool isPSBHint() const { return Kind == k_PSBHint; }
1035  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1036  bool isShifter() const {
1037  if (!isShiftExtend())
1038  return false;
1039 
1040  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1041  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1042  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1043  ST == AArch64_AM::MSL);
1044  }
1045  bool isExtend() const {
1046  if (!isShiftExtend())
1047  return false;
1048 
1049  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1050  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1051  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1052  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1053  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1054  ET == AArch64_AM::LSL) &&
1055  getShiftExtendAmount() <= 4;
1056  }
1057 
1058  bool isExtend64() const {
1059  if (!isExtend())
1060  return false;
1061  // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
1062  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1063  return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
1064  }
1065 
1066  bool isExtendLSL64() const {
1067  if (!isExtend())
1068  return false;
1069  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1070  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1071  ET == AArch64_AM::LSL) &&
1072  getShiftExtendAmount() <= 4;
1073  }
1074 
1075  template<int Width> bool isMemXExtend() const {
1076  if (!isExtend())
1077  return false;
1078  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1079  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1080  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1081  getShiftExtendAmount() == 0);
1082  }
1083 
1084  template<int Width> bool isMemWExtend() const {
1085  if (!isExtend())
1086  return false;
1087  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1088  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1089  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1090  getShiftExtendAmount() == 0);
1091  }
1092 
1093  template <unsigned width>
1094  bool isArithmeticShifter() const {
1095  if (!isShifter())
1096  return false;
1097 
1098  // An arithmetic shifter is LSL, LSR, or ASR.
1099  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1100  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1101  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1102  }
1103 
1104  template <unsigned width>
1105  bool isLogicalShifter() const {
1106  if (!isShifter())
1107  return false;
1108 
1109  // A logical shifter is LSL, LSR, ASR or ROR.
1110  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1111  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1112  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1113  getShiftExtendAmount() < width;
1114  }
1115 
1116  bool isMovImm32Shifter() const {
1117  if (!isShifter())
1118  return false;
1119 
1120  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1121  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1122  if (ST != AArch64_AM::LSL)
1123  return false;
1124  uint64_t Val = getShiftExtendAmount();
1125  return (Val == 0 || Val == 16);
1126  }
1127 
1128  bool isMovImm64Shifter() const {
1129  if (!isShifter())
1130  return false;
1131 
1132  // A MOVi shifter is LSL of 0 or 16.
1133  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1134  if (ST != AArch64_AM::LSL)
1135  return false;
1136  uint64_t Val = getShiftExtendAmount();
1137  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1138  }
1139 
1140  bool isLogicalVecShifter() const {
1141  if (!isShifter())
1142  return false;
1143 
1144  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1145  unsigned Shift = getShiftExtendAmount();
1146  return getShiftExtendType() == AArch64_AM::LSL &&
1147  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1148  }
1149 
1150  bool isLogicalVecHalfWordShifter() const {
1151  if (!isLogicalVecShifter())
1152  return false;
1153 
1154  // A logical vector shifter is a left shift by 0 or 8.
1155  unsigned Shift = getShiftExtendAmount();
1156  return getShiftExtendType() == AArch64_AM::LSL &&
1157  (Shift == 0 || Shift == 8);
1158  }
1159 
1160  bool isMoveVecShifter() const {
1161  if (!isShiftExtend())
1162  return false;
1163 
1164  // A logical vector shifter is a left shift by 8 or 16.
1165  unsigned Shift = getShiftExtendAmount();
1166  return getShiftExtendType() == AArch64_AM::MSL &&
1167  (Shift == 8 || Shift == 16);
1168  }
1169 
1170  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1171  // to LDUR/STUR when the offset is not legal for the former but is for
1172  // the latter. As such, in addition to checking for being a legal unscaled
1173  // address, also check that it is not a legal scaled address. This avoids
1174  // ambiguity in the matcher.
1175  template<int Width>
1176  bool isSImm9OffsetFB() const {
1177  return isSImm9() && !isUImm12Offset<Width / 8>();
1178  }
1179 
1180  bool isAdrpLabel() const {
1181  // Validation was handled during parsing, so we just sanity check that
1182  // something didn't go haywire.
1183  if (!isImm())
1184  return false;
1185 
1186  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1187  int64_t Val = CE->getValue();
1188  int64_t Min = - (4096 * (1LL << (21 - 1)));
1189  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1190  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1191  }
1192 
1193  return true;
1194  }
1195 
1196  bool isAdrLabel() const {
1197  // Validation was handled during parsing, so we just sanity check that
1198  // something didn't go haywire.
1199  if (!isImm())
1200  return false;
1201 
1202  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1203  int64_t Val = CE->getValue();
1204  int64_t Min = - (1LL << (21 - 1));
1205  int64_t Max = ((1LL << (21 - 1)) - 1);
1206  return Val >= Min && Val <= Max;
1207  }
1208 
1209  return true;
1210  }
1211 
1212  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1213  // Add as immediates when possible. Null MCExpr = 0.
1214  if (!Expr)
1216  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1218  else
1219  Inst.addOperand(MCOperand::createExpr(Expr));
1220  }
1221 
1222  void addRegOperands(MCInst &Inst, unsigned N) const {
1223  assert(N == 1 && "Invalid number of operands!");
1225  }
1226 
1227  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1228  assert(N == 1 && "Invalid number of operands!");
1229  assert(
1230  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1231 
1232  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1233  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1234  RI->getEncodingValue(getReg()));
1235 
1236  Inst.addOperand(MCOperand::createReg(Reg));
1237  }
1238 
1239  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1240  assert(N == 1 && "Invalid number of operands!");
1241  assert(
1242  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1243  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1244  }
1245 
1246  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1247  assert(N == 1 && "Invalid number of operands!");
1248  assert(
1249  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1251  }
1252 
1253  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1254  assert(N == 1 && "Invalid number of operands!");
1256  }
1257 
1258  template <unsigned NumRegs>
1259  void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1260  assert(N == 1 && "Invalid number of operands!");
1261  static const unsigned FirstRegs[] = { AArch64::D0,
1262  AArch64::D0_D1,
1263  AArch64::D0_D1_D2,
1264  AArch64::D0_D1_D2_D3 };
1265  unsigned FirstReg = FirstRegs[NumRegs - 1];
1266 
1267  Inst.addOperand(
1268  MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1269  }
1270 
1271  template <unsigned NumRegs>
1272  void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1273  assert(N == 1 && "Invalid number of operands!");
1274  static const unsigned FirstRegs[] = { AArch64::Q0,
1275  AArch64::Q0_Q1,
1276  AArch64::Q0_Q1_Q2,
1277  AArch64::Q0_Q1_Q2_Q3 };
1278  unsigned FirstReg = FirstRegs[NumRegs - 1];
1279 
1280  Inst.addOperand(
1281  MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1282  }
1283 
1284  void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1285  assert(N == 1 && "Invalid number of operands!");
1286  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1287  }
1288 
1289  void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1290  assert(N == 1 && "Invalid number of operands!");
1291  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1292  }
1293 
1294  void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1295  assert(N == 1 && "Invalid number of operands!");
1296  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1297  }
1298 
1299  void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1300  assert(N == 1 && "Invalid number of operands!");
1301  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1302  }
1303 
1304  void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1305  assert(N == 1 && "Invalid number of operands!");
1306  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1307  }
1308 
1309  void addImmOperands(MCInst &Inst, unsigned N) const {
1310  assert(N == 1 && "Invalid number of operands!");
1311  // If this is a pageoff symrefexpr with an addend, adjust the addend
1312  // to be only the page-offset portion. Otherwise, just add the expr
1313  // as-is.
1314  addExpr(Inst, getImm());
1315  }
1316 
1317  void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1318  assert(N == 2 && "Invalid number of operands!");
1319  if (isShiftedImm()) {
1320  addExpr(Inst, getShiftedImmVal());
1321  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1322  } else {
1323  addExpr(Inst, getImm());
1325  }
1326  }
1327 
1328  void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1329  assert(N == 2 && "Invalid number of operands!");
1330 
1331  const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1332  const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1333  int64_t Val = -CE->getValue();
1334  unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1335 
1336  Inst.addOperand(MCOperand::createImm(Val));
1337  Inst.addOperand(MCOperand::createImm(ShiftAmt));
1338  }
1339 
1340  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1341  assert(N == 1 && "Invalid number of operands!");
1342  Inst.addOperand(MCOperand::createImm(getCondCode()));
1343  }
1344 
1345  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1346  assert(N == 1 && "Invalid number of operands!");
1347  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1348  if (!MCE)
1349  addExpr(Inst, getImm());
1350  else
1351  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1352  }
1353 
1354  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1355  addImmOperands(Inst, N);
1356  }
1357 
1358  template<int Scale>
1359  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1360  assert(N == 1 && "Invalid number of operands!");
1361  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1362 
1363  if (!MCE) {
1364  Inst.addOperand(MCOperand::createExpr(getImm()));
1365  return;
1366  }
1367  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1368  }
1369 
1370  void addSImm9Operands(MCInst &Inst, unsigned N) const {
1371  assert(N == 1 && "Invalid number of operands!");
1372  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1374  }
1375 
1376  void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1377  assert(N == 1 && "Invalid number of operands!");
1378  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1379  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1380  }
1381 
1382  void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1383  assert(N == 1 && "Invalid number of operands!");
1384  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1385  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1386  }
1387 
1388  void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1389  assert(N == 1 && "Invalid number of operands!");
1390  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1391  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1392  }
1393 
1394  void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1395  assert(N == 1 && "Invalid number of operands!");
1396  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1398  }
1399 
1400  void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1401  assert(N == 1 && "Invalid number of operands!");
1402  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1404  }
1405 
1406  void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1407  assert(N == 1 && "Invalid number of operands!");
1408  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1410  }
1411 
1412  void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1413  assert(N == 1 && "Invalid number of operands!");
1414  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1416  }
1417 
1418  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1419  assert(N == 1 && "Invalid number of operands!");
1420  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1421  assert(MCE && "Invalid constant immediate operand!");
1423  }
1424 
1425  void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1426  assert(N == 1 && "Invalid number of operands!");
1427  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1429  }
1430 
1431  void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1432  assert(N == 1 && "Invalid number of operands!");
1433  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1435  }
1436 
1437  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1438  assert(N == 1 && "Invalid number of operands!");
1439  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1441  }
1442 
1443  void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1444  assert(N == 1 && "Invalid number of operands!");
1445  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1447  }
1448 
1449  void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1450  assert(N == 1 && "Invalid number of operands!");
1451  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1453  }
1454 
1455  void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1456  assert(N == 1 && "Invalid number of operands!");
1457  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1459  }
1460 
1461  void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1462  assert(N == 1 && "Invalid number of operands!");
1463  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1465  }
1466 
1467  void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1468  assert(N == 1 && "Invalid number of operands!");
1469  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1471  }
1472 
1473  void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1474  assert(N == 1 && "Invalid number of operands!");
1475  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1477  }
1478 
1479  void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1480  assert(N == 1 && "Invalid number of operands!");
1481  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1483  }
1484 
1485  void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1486  assert(N == 1 && "Invalid number of operands!");
1487  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1488  uint64_t encoding =
1489  AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1490  Inst.addOperand(MCOperand::createImm(encoding));
1491  }
1492 
1493  void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1494  assert(N == 1 && "Invalid number of operands!");
1495  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1496  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1497  Inst.addOperand(MCOperand::createImm(encoding));
1498  }
1499 
1500  void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1501  assert(N == 1 && "Invalid number of operands!");
1502  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1503  int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1504  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1505  Inst.addOperand(MCOperand::createImm(encoding));
1506  }
1507 
1508  void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1509  assert(N == 1 && "Invalid number of operands!");
1510  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1511  uint64_t encoding =
1513  Inst.addOperand(MCOperand::createImm(encoding));
1514  }
1515 
1516  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1517  assert(N == 1 && "Invalid number of operands!");
1518  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1519  uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1520  Inst.addOperand(MCOperand::createImm(encoding));
1521  }
1522 
1523  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1524  // Branch operands don't encode the low bits, so shift them off
1525  // here. If it's a label, however, just put it on directly as there's
1526  // not enough information now to do anything.
1527  assert(N == 1 && "Invalid number of operands!");
1528  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1529  if (!MCE) {
1530  addExpr(Inst, getImm());
1531  return;
1532  }
1533  assert(MCE && "Invalid constant immediate operand!");
1534  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1535  }
1536 
1537  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1538  // Branch operands don't encode the low bits, so shift them off
1539  // here. If it's a label, however, just put it on directly as there's
1540  // not enough information now to do anything.
1541  assert(N == 1 && "Invalid number of operands!");
1542  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1543  if (!MCE) {
1544  addExpr(Inst, getImm());
1545  return;
1546  }
1547  assert(MCE && "Invalid constant immediate operand!");
1548  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1549  }
1550 
1551  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1552  // Branch operands don't encode the low bits, so shift them off
1553  // here. If it's a label, however, just put it on directly as there's
1554  // not enough information now to do anything.
1555  assert(N == 1 && "Invalid number of operands!");
1556  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1557  if (!MCE) {
1558  addExpr(Inst, getImm());
1559  return;
1560  }
1561  assert(MCE && "Invalid constant immediate operand!");
1562  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1563  }
1564 
1565  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1566  assert(N == 1 && "Invalid number of operands!");
1567  Inst.addOperand(MCOperand::createImm(getFPImm()));
1568  }
1569 
1570  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1571  assert(N == 1 && "Invalid number of operands!");
1572  Inst.addOperand(MCOperand::createImm(getBarrier()));
1573  }
1574 
1575  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1576  assert(N == 1 && "Invalid number of operands!");
1577 
1578  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1579  }
1580 
1581  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1582  assert(N == 1 && "Invalid number of operands!");
1583 
1584  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1585  }
1586 
1587  void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1588  assert(N == 1 && "Invalid number of operands!");
1589 
1590  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1591  }
1592 
1593  void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1594  assert(N == 1 && "Invalid number of operands!");
1595 
1596  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1597  }
1598 
1599  void addSysCROperands(MCInst &Inst, unsigned N) const {
1600  assert(N == 1 && "Invalid number of operands!");
1601  Inst.addOperand(MCOperand::createImm(getSysCR()));
1602  }
1603 
1604  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1605  assert(N == 1 && "Invalid number of operands!");
1606  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1607  }
1608 
1609  void addPSBHintOperands(MCInst &Inst, unsigned N) const {
1610  assert(N == 1 && "Invalid number of operands!");
1611  Inst.addOperand(MCOperand::createImm(getPSBHint()));
1612  }
1613 
1614  void addShifterOperands(MCInst &Inst, unsigned N) const {
1615  assert(N == 1 && "Invalid number of operands!");
1616  unsigned Imm =
1617  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1618  Inst.addOperand(MCOperand::createImm(Imm));
1619  }
1620 
1621  void addExtendOperands(MCInst &Inst, unsigned N) const {
1622  assert(N == 1 && "Invalid number of operands!");
1623  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1624  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1625  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1626  Inst.addOperand(MCOperand::createImm(Imm));
1627  }
1628 
1629  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1630  assert(N == 1 && "Invalid number of operands!");
1631  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1632  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1633  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1634  Inst.addOperand(MCOperand::createImm(Imm));
1635  }
1636 
1637  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1638  assert(N == 2 && "Invalid number of operands!");
1639  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1640  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1641  Inst.addOperand(MCOperand::createImm(IsSigned));
1642  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1643  }
1644 
1645  // For 8-bit load/store instructions with a register offset, both the
1646  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1647  // they're disambiguated by whether the shift was explicit or implicit rather
1648  // than its size.
1649  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1650  assert(N == 2 && "Invalid number of operands!");
1651  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1652  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1653  Inst.addOperand(MCOperand::createImm(IsSigned));
1654  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1655  }
1656 
1657  template<int Shift>
1658  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1659  assert(N == 1 && "Invalid number of operands!");
1660 
1661  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1662  uint64_t Value = CE->getValue();
1663  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1664  }
1665 
1666  template<int Shift>
1667  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1668  assert(N == 1 && "Invalid number of operands!");
1669 
1670  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1671  uint64_t Value = CE->getValue();
1672  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1673  }
1674 
1675  void print(raw_ostream &OS) const override;
1676 
1677  static std::unique_ptr<AArch64Operand>
1678  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1679  auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1680  Op->Tok.Data = Str.data();
1681  Op->Tok.Length = Str.size();
1682  Op->Tok.IsSuffix = IsSuffix;
1683  Op->StartLoc = S;
1684  Op->EndLoc = S;
1685  return Op;
1686  }
1687 
1688  static std::unique_ptr<AArch64Operand>
1689  CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1690  auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1691  Op->Reg.RegNum = RegNum;
1692  Op->Reg.isVector = isVector;
1693  Op->StartLoc = S;
1694  Op->EndLoc = E;
1695  return Op;
1696  }
1697 
1698  static std::unique_ptr<AArch64Operand>
1699  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1700  char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1701  auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1702  Op->VectorList.RegNum = RegNum;
1703  Op->VectorList.Count = Count;
1704  Op->VectorList.NumElements = NumElements;
1705  Op->VectorList.ElementKind = ElementKind;
1706  Op->StartLoc = S;
1707  Op->EndLoc = E;
1708  return Op;
1709  }
1710 
1711  static std::unique_ptr<AArch64Operand>
1712  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1713  auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1714  Op->VectorIndex.Val = Idx;
1715  Op->StartLoc = S;
1716  Op->EndLoc = E;
1717  return Op;
1718  }
1719 
1720  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1721  SMLoc E, MCContext &Ctx) {
1722  auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1723  Op->Imm.Val = Val;
1724  Op->StartLoc = S;
1725  Op->EndLoc = E;
1726  return Op;
1727  }
1728 
1729  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1730  unsigned ShiftAmount,
1731  SMLoc S, SMLoc E,
1732  MCContext &Ctx) {
1733  auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1734  Op->ShiftedImm .Val = Val;
1735  Op->ShiftedImm.ShiftAmount = ShiftAmount;
1736  Op->StartLoc = S;
1737  Op->EndLoc = E;
1738  return Op;
1739  }
1740 
1741  static std::unique_ptr<AArch64Operand>
1742  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1743  auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1744  Op->CondCode.Code = Code;
1745  Op->StartLoc = S;
1746  Op->EndLoc = E;
1747  return Op;
1748  }
1749 
1750  static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1751  MCContext &Ctx) {
1752  auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1753  Op->FPImm.Val = Val;
1754  Op->StartLoc = S;
1755  Op->EndLoc = S;
1756  return Op;
1757  }
1758 
1759  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1760  StringRef Str,
1761  SMLoc S,
1762  MCContext &Ctx) {
1763  auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1764  Op->Barrier.Val = Val;
1765  Op->Barrier.Data = Str.data();
1766  Op->Barrier.Length = Str.size();
1767  Op->StartLoc = S;
1768  Op->EndLoc = S;
1769  return Op;
1770  }
1771 
1772  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1773  uint32_t MRSReg,
1774  uint32_t MSRReg,
1775  uint32_t PStateField,
1776  MCContext &Ctx) {
1777  auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1778  Op->SysReg.Data = Str.data();
1779  Op->SysReg.Length = Str.size();
1780  Op->SysReg.MRSReg = MRSReg;
1781  Op->SysReg.MSRReg = MSRReg;
1782  Op->SysReg.PStateField = PStateField;
1783  Op->StartLoc = S;
1784  Op->EndLoc = S;
1785  return Op;
1786  }
1787 
1788  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1789  SMLoc E, MCContext &Ctx) {
1790  auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1791  Op->SysCRImm.Val = Val;
1792  Op->StartLoc = S;
1793  Op->EndLoc = E;
1794  return Op;
1795  }
1796 
1797  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1798  StringRef Str,
1799  SMLoc S,
1800  MCContext &Ctx) {
1801  auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1802  Op->Prefetch.Val = Val;
1803  Op->Barrier.Data = Str.data();
1804  Op->Barrier.Length = Str.size();
1805  Op->StartLoc = S;
1806  Op->EndLoc = S;
1807  return Op;
1808  }
1809 
1810  static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
1811  StringRef Str,
1812  SMLoc S,
1813  MCContext &Ctx) {
1814  auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1815  Op->PSBHint.Val = Val;
1816  Op->PSBHint.Data = Str.data();
1817  Op->PSBHint.Length = Str.size();
1818  Op->StartLoc = S;
1819  Op->EndLoc = S;
1820  return Op;
1821  }
1822 
1823  static std::unique_ptr<AArch64Operand>
1824  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1825  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1826  auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1827  Op->ShiftExtend.Type = ShOp;
1828  Op->ShiftExtend.Amount = Val;
1829  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1830  Op->StartLoc = S;
1831  Op->EndLoc = E;
1832  return Op;
1833  }
1834 };
1835 
1836 } // end anonymous namespace.
1837 
1838 void AArch64Operand::print(raw_ostream &OS) const {
1839  switch (Kind) {
1840  case k_FPImm:
1841  OS << "<fpimm " << getFPImm() << "("
1842  << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1843  break;
1844  case k_Barrier: {
1845  StringRef Name = getBarrierName();
1846  if (!Name.empty())
1847  OS << "<barrier " << Name << ">";
1848  else
1849  OS << "<barrier invalid #" << getBarrier() << ">";
1850  break;
1851  }
1852  case k_Immediate:
1853  OS << *getImm();
1854  break;
1855  case k_ShiftedImm: {
1856  unsigned Shift = getShiftedImmShift();
1857  OS << "<shiftedimm ";
1858  OS << *getShiftedImmVal();
1859  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1860  break;
1861  }
1862  case k_CondCode:
1863  OS << "<condcode " << getCondCode() << ">";
1864  break;
1865  case k_Register:
1866  OS << "<register " << getReg() << ">";
1867  break;
1868  case k_VectorList: {
1869  OS << "<vectorlist ";
1870  unsigned Reg = getVectorListStart();
1871  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1872  OS << Reg + i << " ";
1873  OS << ">";
1874  break;
1875  }
1876  case k_VectorIndex:
1877  OS << "<vectorindex " << getVectorIndex() << ">";
1878  break;
1879  case k_SysReg:
1880  OS << "<sysreg: " << getSysReg() << '>';
1881  break;
1882  case k_Token:
1883  OS << "'" << getToken() << "'";
1884  break;
1885  case k_SysCR:
1886  OS << "c" << getSysCR();
1887  break;
1888  case k_Prefetch: {
1889  StringRef Name = getPrefetchName();
1890  if (!Name.empty())
1891  OS << "<prfop " << Name << ">";
1892  else
1893  OS << "<prfop invalid #" << getPrefetch() << ">";
1894  break;
1895  }
1896  case k_PSBHint:
1897  OS << getPSBHintName();
1898  break;
1899  case k_ShiftExtend:
1900  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1901  << getShiftExtendAmount();
1902  if (!hasShiftExtendAmount())
1903  OS << "<imp>";
1904  OS << '>';
1905  break;
1906  }
1907 }
1908 
1909 /// @name Auto-generated Match Functions
1910 /// {
1911 
1912 static unsigned MatchRegisterName(StringRef Name);
1913 
1914 /// }
1915 
1916 static unsigned matchVectorRegName(StringRef Name) {
1917  return StringSwitch<unsigned>(Name.lower())
1918  .Case("v0", AArch64::Q0)
1919  .Case("v1", AArch64::Q1)
1920  .Case("v2", AArch64::Q2)
1921  .Case("v3", AArch64::Q3)
1922  .Case("v4", AArch64::Q4)
1923  .Case("v5", AArch64::Q5)
1924  .Case("v6", AArch64::Q6)
1925  .Case("v7", AArch64::Q7)
1926  .Case("v8", AArch64::Q8)
1927  .Case("v9", AArch64::Q9)
1928  .Case("v10", AArch64::Q10)
1929  .Case("v11", AArch64::Q11)
1930  .Case("v12", AArch64::Q12)
1931  .Case("v13", AArch64::Q13)
1932  .Case("v14", AArch64::Q14)
1933  .Case("v15", AArch64::Q15)
1934  .Case("v16", AArch64::Q16)
1935  .Case("v17", AArch64::Q17)
1936  .Case("v18", AArch64::Q18)
1937  .Case("v19", AArch64::Q19)
1938  .Case("v20", AArch64::Q20)
1939  .Case("v21", AArch64::Q21)
1940  .Case("v22", AArch64::Q22)
1941  .Case("v23", AArch64::Q23)
1942  .Case("v24", AArch64::Q24)
1943  .Case("v25", AArch64::Q25)
1944  .Case("v26", AArch64::Q26)
1945  .Case("v27", AArch64::Q27)
1946  .Case("v28", AArch64::Q28)
1947  .Case("v29", AArch64::Q29)
1948  .Case("v30", AArch64::Q30)
1949  .Case("v31", AArch64::Q31)
1950  .Default(0);
1951 }
1952 
1953 static bool isValidVectorKind(StringRef Name) {
1954  return StringSwitch<bool>(Name.lower())
1955  .Case(".8b", true)
1956  .Case(".16b", true)
1957  .Case(".4h", true)
1958  .Case(".8h", true)
1959  .Case(".2s", true)
1960  .Case(".4s", true)
1961  .Case(".1d", true)
1962  .Case(".2d", true)
1963  .Case(".1q", true)
1964  // Accept the width neutral ones, too, for verbose syntax. If those
1965  // aren't used in the right places, the token operand won't match so
1966  // all will work out.
1967  .Case(".b", true)
1968  .Case(".h", true)
1969  .Case(".s", true)
1970  .Case(".d", true)
1971  // Needed for fp16 scalar pairwise reductions
1972  .Case(".2h", true)
1973  .Default(false);
1974 }
1975 
1976 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1977  char &ElementKind) {
1978  assert(isValidVectorKind(Name));
1979 
1980  ElementKind = Name.lower()[Name.size() - 1];
1981  NumElements = 0;
1982 
1983  if (Name.size() == 2)
1984  return;
1985 
1986  // Parse the lane count
1987  Name = Name.drop_front();
1988  while (isdigit(Name.front())) {
1989  NumElements = 10 * NumElements + (Name.front() - '0');
1990  Name = Name.drop_front();
1991  }
1992 }
1993 
1994 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1995  SMLoc &EndLoc) {
1996  StartLoc = getLoc();
1997  RegNo = tryParseRegister();
1998  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1999  return (RegNo == (unsigned)-1);
2000 }
2001 
2002 // Matches a register name or register alias previously defined by '.req'
2003 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2004  bool isVector) {
2005  unsigned RegNum = isVector ? matchVectorRegName(Name)
2006  : MatchRegisterName(Name);
2007 
2008  if (RegNum == 0) {
2009  // Check for aliases registered via .req. Canonicalize to lower case.
2010  // That's more consistent since register names are case insensitive, and
2011  // it's how the original entry was passed in from MC/MCParser/AsmParser.
2012  auto Entry = RegisterReqs.find(Name.lower());
2013  if (Entry == RegisterReqs.end())
2014  return 0;
2015  // set RegNum if the match is the right kind of register
2016  if (isVector == Entry->getValue().first)
2017  RegNum = Entry->getValue().second;
2018  }
2019  return RegNum;
2020 }
2021 
2022 /// tryParseRegister - Try to parse a register name. The token must be an
2023 /// Identifier when called, and if it is a register name the token is eaten and
2024 /// the register is added to the operand list.
2025 int AArch64AsmParser::tryParseRegister() {
2026  MCAsmParser &Parser = getParser();
2027  const AsmToken &Tok = Parser.getTok();
2028  if (Tok.isNot(AsmToken::Identifier))
2029  return -1;
2030 
2031  std::string lowerCase = Tok.getString().lower();
2032  unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
2033  // Also handle a few aliases of registers.
2034  if (RegNum == 0)
2035  RegNum = StringSwitch<unsigned>(lowerCase)
2036  .Case("fp", AArch64::FP)
2037  .Case("lr", AArch64::LR)
2038  .Case("x31", AArch64::XZR)
2039  .Case("w31", AArch64::WZR)
2040  .Default(0);
2041 
2042  if (RegNum == 0)
2043  return -1;
2044 
2045  Parser.Lex(); // Eat identifier token.
2046  return RegNum;
2047 }
2048 
2049 /// tryMatchVectorRegister - Try to parse a vector register name with optional
2050 /// kind specifier. If it is a register specifier, eat the token and return it.
2051 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
2052  MCAsmParser &Parser = getParser();
2053  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2054  TokError("vector register expected");
2055  return -1;
2056  }
2057 
2058  StringRef Name = Parser.getTok().getString();
2059  // If there is a kind specifier, it's separated from the register name by
2060  // a '.'.
2061  size_t Start = 0, Next = Name.find('.');
2062  StringRef Head = Name.slice(Start, Next);
2063  unsigned RegNum = matchRegisterNameAlias(Head, true);
2064 
2065  if (RegNum) {
2066  if (Next != StringRef::npos) {
2067  Kind = Name.slice(Next, StringRef::npos);
2068  if (!isValidVectorKind(Kind)) {
2069  TokError("invalid vector kind qualifier");
2070  return -1;
2071  }
2072  }
2073  Parser.Lex(); // Eat the register token.
2074  return RegNum;
2075  }
2076 
2077  if (expected)
2078  TokError("vector register expected");
2079  return -1;
2080 }
2081 
2082 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2084 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2085  MCAsmParser &Parser = getParser();
2086  SMLoc S = getLoc();
2087 
2088  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2089  Error(S, "Expected cN operand where 0 <= N <= 15");
2090  return MatchOperand_ParseFail;
2091  }
2092 
2093  StringRef Tok = Parser.getTok().getIdentifier();
2094  if (Tok[0] != 'c' && Tok[0] != 'C') {
2095  Error(S, "Expected cN operand where 0 <= N <= 15");
2096  return MatchOperand_ParseFail;
2097  }
2098 
2099  uint32_t CRNum;
2100  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2101  if (BadNum || CRNum > 15) {
2102  Error(S, "Expected cN operand where 0 <= N <= 15");
2103  return MatchOperand_ParseFail;
2104  }
2105 
2106  Parser.Lex(); // Eat identifier token.
2107  Operands.push_back(
2108  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2109  return MatchOperand_Success;
2110 }
2111 
2112 /// tryParsePrefetch - Try to parse a prefetch operand.
2114 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2115  MCAsmParser &Parser = getParser();
2116  SMLoc S = getLoc();
2117  const AsmToken &Tok = Parser.getTok();
2118  // Either an identifier for named values or a 5-bit immediate.
2119  // Eat optional hash.
2120  if (parseOptionalToken(AsmToken::Hash) ||
2121  Tok.is(AsmToken::Integer)) {
2122  const MCExpr *ImmVal;
2123  if (getParser().parseExpression(ImmVal))
2124  return MatchOperand_ParseFail;
2125 
2126  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2127  if (!MCE) {
2128  TokError("immediate value expected for prefetch operand");
2129  return MatchOperand_ParseFail;
2130  }
2131  unsigned prfop = MCE->getValue();
2132  if (prfop > 31) {
2133  TokError("prefetch operand out of range, [0,31] expected");
2134  return MatchOperand_ParseFail;
2135  }
2136 
2137  auto PRFM = AArch64PRFM::lookupPRFMByEncoding(MCE->getValue());
2138  Operands.push_back(AArch64Operand::CreatePrefetch(
2139  prfop, PRFM ? PRFM->Name : "", S, getContext()));
2140  return MatchOperand_Success;
2141  }
2142 
2143  if (Tok.isNot(AsmToken::Identifier)) {
2144  TokError("pre-fetch hint expected");
2145  return MatchOperand_ParseFail;
2146  }
2147 
2148  auto PRFM = AArch64PRFM::lookupPRFMByName(Tok.getString());
2149  if (!PRFM) {
2150  TokError("pre-fetch hint expected");
2151  return MatchOperand_ParseFail;
2152  }
2153 
2154  Parser.Lex(); // Eat identifier token.
2155  Operands.push_back(AArch64Operand::CreatePrefetch(
2156  PRFM->Encoding, Tok.getString(), S, getContext()));
2157  return MatchOperand_Success;
2158 }
2159 
2160 /// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
2162 AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
2163  MCAsmParser &Parser = getParser();
2164  SMLoc S = getLoc();
2165  const AsmToken &Tok = Parser.getTok();
2166  if (Tok.isNot(AsmToken::Identifier)) {
2167  TokError("invalid operand for instruction");
2168  return MatchOperand_ParseFail;
2169  }
2170 
2171  auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
2172  if (!PSB) {
2173  TokError("invalid operand for instruction");
2174  return MatchOperand_ParseFail;
2175  }
2176 
2177  Parser.Lex(); // Eat identifier token.
2178  Operands.push_back(AArch64Operand::CreatePSBHint(
2179  PSB->Encoding, Tok.getString(), S, getContext()));
2180  return MatchOperand_Success;
2181 }
2182 
2183 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2184 /// instruction.
2186 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2187  MCAsmParser &Parser = getParser();
2188  SMLoc S = getLoc();
2189  const MCExpr *Expr;
2190 
2191  if (Parser.getTok().is(AsmToken::Hash)) {
2192  Parser.Lex(); // Eat hash token.
2193  }
2194 
2195  if (parseSymbolicImmVal(Expr))
2196  return MatchOperand_ParseFail;
2197 
2198  AArch64MCExpr::VariantKind ELFRefKind;
2199  MCSymbolRefExpr::VariantKind DarwinRefKind;
2200  int64_t Addend;
2201  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2202  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2203  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2204  // No modifier was specified at all; this is the syntax for an ELF basic
2205  // ADRP relocation (unfortunately).
2206  Expr =
2208  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2209  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2210  Addend != 0) {
2211  Error(S, "gotpage label reference not allowed an addend");
2212  return MatchOperand_ParseFail;
2213  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2214  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2215  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2216  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2217  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2218  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2219  // The operand must be an @page or @gotpage qualified symbolref.
2220  Error(S, "page or gotpage label reference expected");
2221  return MatchOperand_ParseFail;
2222  }
2223  }
2224 
2225  // We have either a label reference possibly with addend or an immediate. The
2226  // addend is a raw value here. The linker will adjust it to only reference the
2227  // page.
2228  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2229  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2230 
2231  return MatchOperand_Success;
2232 }
2233 
2234 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2235 /// instruction.
2237 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2238  SMLoc S = getLoc();
2239  const MCExpr *Expr;
2240 
2241  parseOptionalToken(AsmToken::Hash);
2242  if (getParser().parseExpression(Expr))
2243  return MatchOperand_ParseFail;
2244 
2245  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2246  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2247 
2248  return MatchOperand_Success;
2249 }
2250 
2251 /// tryParseFPImm - A floating point immediate expression operand.
2253 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2254  MCAsmParser &Parser = getParser();
2255  SMLoc S = getLoc();
2256 
2257  bool Hash = parseOptionalToken(AsmToken::Hash);
2258 
2259  // Handle negation, as that still comes through as a separate token.
2260  bool isNegative = parseOptionalToken(AsmToken::Minus);
2261 
2262  const AsmToken &Tok = Parser.getTok();
2263  if (Tok.is(AsmToken::Real)) {
2264  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
2265  if (isNegative)
2266  RealVal.changeSign();
2267 
2268  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2269  int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2270  Parser.Lex(); // Eat the token.
2271  // Check for out of range values. As an exception, we let Zero through,
2272  // as we handle that special case in post-processing before matching in
2273  // order to use the zero register for it.
2274  if (Val == -1 && !RealVal.isPosZero()) {
2275  TokError("expected compatible register or floating-point constant");
2276  return MatchOperand_ParseFail;
2277  }
2278  Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2279  return MatchOperand_Success;
2280  }
2281  if (Tok.is(AsmToken::Integer)) {
2282  int64_t Val;
2283  if (!isNegative && Tok.getString().startswith("0x")) {
2284  Val = Tok.getIntVal();
2285  if (Val > 255 || Val < 0) {
2286  TokError("encoded floating point value out of range");
2287  return MatchOperand_ParseFail;
2288  }
2289  } else {
2290  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
2291  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2292  // If we had a '-' in front, toggle the sign bit.
2293  IntVal ^= (uint64_t)isNegative << 63;
2294  Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2295  }
2296  Parser.Lex(); // Eat the token.
2297  Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2298  return MatchOperand_Success;
2299  }
2300 
2301  if (!Hash)
2302  return MatchOperand_NoMatch;
2303 
2304  TokError("invalid floating point immediate");
2305  return MatchOperand_ParseFail;
2306 }
2307 
2308 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2310 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2311  MCAsmParser &Parser = getParser();
2312  SMLoc S = getLoc();
2313 
2314  if (Parser.getTok().is(AsmToken::Hash))
2315  Parser.Lex(); // Eat '#'
2316  else if (Parser.getTok().isNot(AsmToken::Integer))
2317  // Operand should start from # or should be integer, emit error otherwise.
2318  return MatchOperand_NoMatch;
2319 
2320  const MCExpr *Imm;
2321  if (parseSymbolicImmVal(Imm))
2322  return MatchOperand_ParseFail;
2323  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2324  uint64_t ShiftAmount = 0;
2325  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2326  if (MCE) {
2327  int64_t Val = MCE->getValue();
2328  if (Val > 0xfff && (Val & 0xfff) == 0) {
2329  Imm = MCConstantExpr::create(Val >> 12, getContext());
2330  ShiftAmount = 12;
2331  }
2332  }
2333  SMLoc E = Parser.getTok().getLoc();
2334  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2335  getContext()));
2336  return MatchOperand_Success;
2337  }
2338 
2339  // Eat ','
2340  Parser.Lex();
2341 
2342  // The optional operand must be "lsl #N" where N is non-negative.
2343  if (!Parser.getTok().is(AsmToken::Identifier) ||
2344  !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2345  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2346  return MatchOperand_ParseFail;
2347  }
2348 
2349  // Eat 'lsl'
2350  Parser.Lex();
2351 
2352  parseOptionalToken(AsmToken::Hash);
2353 
2354  if (Parser.getTok().isNot(AsmToken::Integer)) {
2355  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2356  return MatchOperand_ParseFail;
2357  }
2358 
2359  int64_t ShiftAmount = Parser.getTok().getIntVal();
2360 
2361  if (ShiftAmount < 0) {
2362  Error(Parser.getTok().getLoc(), "positive shift amount required");
2363  return MatchOperand_ParseFail;
2364  }
2365  Parser.Lex(); // Eat the number
2366 
2367  SMLoc E = Parser.getTok().getLoc();
2368  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2369  S, E, getContext()));
2370  return MatchOperand_Success;
2371 }
2372 
2373 /// parseCondCodeString - Parse a Condition Code string.
2374 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2376  .Case("eq", AArch64CC::EQ)
2377  .Case("ne", AArch64CC::NE)
2378  .Case("cs", AArch64CC::HS)
2379  .Case("hs", AArch64CC::HS)
2380  .Case("cc", AArch64CC::LO)
2381  .Case("lo", AArch64CC::LO)
2382  .Case("mi", AArch64CC::MI)
2383  .Case("pl", AArch64CC::PL)
2384  .Case("vs", AArch64CC::VS)
2385  .Case("vc", AArch64CC::VC)
2386  .Case("hi", AArch64CC::HI)
2387  .Case("ls", AArch64CC::LS)
2388  .Case("ge", AArch64CC::GE)
2389  .Case("lt", AArch64CC::LT)
2390  .Case("gt", AArch64CC::GT)
2391  .Case("le", AArch64CC::LE)
2392  .Case("al", AArch64CC::AL)
2393  .Case("nv", AArch64CC::NV)
2395  return CC;
2396 }
2397 
2398 /// parseCondCode - Parse a Condition Code operand.
2399 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2400  bool invertCondCode) {
2401  MCAsmParser &Parser = getParser();
2402  SMLoc S = getLoc();
2403  const AsmToken &Tok = Parser.getTok();
2404  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2405 
2406  StringRef Cond = Tok.getString();
2407  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2408  if (CC == AArch64CC::Invalid)
2409  return TokError("invalid condition code");
2410  Parser.Lex(); // Eat identifier token.
2411 
2412  if (invertCondCode) {
2413  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2414  return TokError("condition codes AL and NV are invalid for this instruction");
2416  }
2417 
2418  Operands.push_back(
2419  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2420  return false;
2421 }
2422 
2423 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2424 /// them if present.
2426 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2427  MCAsmParser &Parser = getParser();
2428  const AsmToken &Tok = Parser.getTok();
2429  std::string LowerID = Tok.getString().lower();
2432  .Case("lsl", AArch64_AM::LSL)
2433  .Case("lsr", AArch64_AM::LSR)
2434  .Case("asr", AArch64_AM::ASR)
2435  .Case("ror", AArch64_AM::ROR)
2436  .Case("msl", AArch64_AM::MSL)
2437  .Case("uxtb", AArch64_AM::UXTB)
2438  .Case("uxth", AArch64_AM::UXTH)
2439  .Case("uxtw", AArch64_AM::UXTW)
2440  .Case("uxtx", AArch64_AM::UXTX)
2441  .Case("sxtb", AArch64_AM::SXTB)
2442  .Case("sxth", AArch64_AM::SXTH)
2443  .Case("sxtw", AArch64_AM::SXTW)
2444  .Case("sxtx", AArch64_AM::SXTX)
2446 
2447  if (ShOp == AArch64_AM::InvalidShiftExtend)
2448  return MatchOperand_NoMatch;
2449 
2450  SMLoc S = Tok.getLoc();
2451  Parser.Lex();
2452 
2453  bool Hash = parseOptionalToken(AsmToken::Hash);
2454 
2455  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2456  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2457  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2458  ShOp == AArch64_AM::MSL) {
2459  // We expect a number here.
2460  TokError("expected #imm after shift specifier");
2461  return MatchOperand_ParseFail;
2462  }
2463 
2464  // "extend" type operations don't need an immediate, #0 is implicit.
2465  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2466  Operands.push_back(
2467  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2468  return MatchOperand_Success;
2469  }
2470 
2471  // Make sure we do actually have a number, identifier or a parenthesized
2472  // expression.
2473  SMLoc E = Parser.getTok().getLoc();
2474  if (!Parser.getTok().is(AsmToken::Integer) &&
2475  !Parser.getTok().is(AsmToken::LParen) &&
2476  !Parser.getTok().is(AsmToken::Identifier)) {
2477  Error(E, "expected integer shift amount");
2478  return MatchOperand_ParseFail;
2479  }
2480 
2481  const MCExpr *ImmVal;
2482  if (getParser().parseExpression(ImmVal))
2483  return MatchOperand_ParseFail;
2484 
2485  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2486  if (!MCE) {
2487  Error(E, "expected constant '#imm' after shift specifier");
2488  return MatchOperand_ParseFail;
2489  }
2490 
2491  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2492  Operands.push_back(AArch64Operand::CreateShiftExtend(
2493  ShOp, MCE->getValue(), true, S, E, getContext()));
2494  return MatchOperand_Success;
2495 }
2496 
2497 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2498 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2499 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2500  OperandVector &Operands) {
2501  if (Name.find('.') != StringRef::npos)
2502  return TokError("invalid operand");
2503 
2504  Mnemonic = Name;
2505  Operands.push_back(
2506  AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2507 
2508  MCAsmParser &Parser = getParser();
2509  const AsmToken &Tok = Parser.getTok();
2510  StringRef Op = Tok.getString();
2511  SMLoc S = Tok.getLoc();
2512 
2513  const MCExpr *Expr = nullptr;
2514 
2515 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2516  do { \
2517  Expr = MCConstantExpr::create(op1, getContext()); \
2518  Operands.push_back( \
2519  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2520  Operands.push_back( \
2521  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2522  Operands.push_back( \
2523  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2524  Expr = MCConstantExpr::create(op2, getContext()); \
2525  Operands.push_back( \
2526  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2527  } while (false)
2528 
2529  if (Mnemonic == "ic") {
2530  if (!Op.compare_lower("ialluis")) {
2531  // SYS #0, C7, C1, #0
2532  SYS_ALIAS(0, 7, 1, 0);
2533  } else if (!Op.compare_lower("iallu")) {
2534  // SYS #0, C7, C5, #0
2535  SYS_ALIAS(0, 7, 5, 0);
2536  } else if (!Op.compare_lower("ivau")) {
2537  // SYS #3, C7, C5, #1
2538  SYS_ALIAS(3, 7, 5, 1);
2539  } else {
2540  return TokError("invalid operand for IC instruction");
2541  }
2542  } else if (Mnemonic == "dc") {
2543  if (!Op.compare_lower("zva")) {
2544  // SYS #3, C7, C4, #1
2545  SYS_ALIAS(3, 7, 4, 1);
2546  } else if (!Op.compare_lower("ivac")) {
2547  // SYS #3, C7, C6, #1
2548  SYS_ALIAS(0, 7, 6, 1);
2549  } else if (!Op.compare_lower("isw")) {
2550  // SYS #0, C7, C6, #2
2551  SYS_ALIAS(0, 7, 6, 2);
2552  } else if (!Op.compare_lower("cvac")) {
2553  // SYS #3, C7, C10, #1
2554  SYS_ALIAS(3, 7, 10, 1);
2555  } else if (!Op.compare_lower("csw")) {
2556  // SYS #0, C7, C10, #2
2557  SYS_ALIAS(0, 7, 10, 2);
2558  } else if (!Op.compare_lower("cvau")) {
2559  // SYS #3, C7, C11, #1
2560  SYS_ALIAS(3, 7, 11, 1);
2561  } else if (!Op.compare_lower("civac")) {
2562  // SYS #3, C7, C14, #1
2563  SYS_ALIAS(3, 7, 14, 1);
2564  } else if (!Op.compare_lower("cisw")) {
2565  // SYS #0, C7, C14, #2
2566  SYS_ALIAS(0, 7, 14, 2);
2567  } else if (!Op.compare_lower("cvap")) {
2568  if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2569  // SYS #3, C7, C12, #1
2570  SYS_ALIAS(3, 7, 12, 1);
2571  } else {
2572  return TokError("DC CVAP requires ARMv8.2a");
2573  }
2574  } else {
2575  return TokError("invalid operand for DC instruction");
2576  }
2577  } else if (Mnemonic == "at") {
2578  if (!Op.compare_lower("s1e1r")) {
2579  // SYS #0, C7, C8, #0
2580  SYS_ALIAS(0, 7, 8, 0);
2581  } else if (!Op.compare_lower("s1e2r")) {
2582  // SYS #4, C7, C8, #0
2583  SYS_ALIAS(4, 7, 8, 0);
2584  } else if (!Op.compare_lower("s1e3r")) {
2585  // SYS #6, C7, C8, #0
2586  SYS_ALIAS(6, 7, 8, 0);
2587  } else if (!Op.compare_lower("s1e1w")) {
2588  // SYS #0, C7, C8, #1
2589  SYS_ALIAS(0, 7, 8, 1);
2590  } else if (!Op.compare_lower("s1e2w")) {
2591  // SYS #4, C7, C8, #1
2592  SYS_ALIAS(4, 7, 8, 1);
2593  } else if (!Op.compare_lower("s1e3w")) {
2594  // SYS #6, C7, C8, #1
2595  SYS_ALIAS(6, 7, 8, 1);
2596  } else if (!Op.compare_lower("s1e0r")) {
2597  // SYS #0, C7, C8, #3
2598  SYS_ALIAS(0, 7, 8, 2);
2599  } else if (!Op.compare_lower("s1e0w")) {
2600  // SYS #0, C7, C8, #3
2601  SYS_ALIAS(0, 7, 8, 3);
2602  } else if (!Op.compare_lower("s12e1r")) {
2603  // SYS #4, C7, C8, #4
2604  SYS_ALIAS(4, 7, 8, 4);
2605  } else if (!Op.compare_lower("s12e1w")) {
2606  // SYS #4, C7, C8, #5
2607  SYS_ALIAS(4, 7, 8, 5);
2608  } else if (!Op.compare_lower("s12e0r")) {
2609  // SYS #4, C7, C8, #6
2610  SYS_ALIAS(4, 7, 8, 6);
2611  } else if (!Op.compare_lower("s12e0w")) {
2612  // SYS #4, C7, C8, #7
2613  SYS_ALIAS(4, 7, 8, 7);
2614  } else if (!Op.compare_lower("s1e1rp")) {
2615  if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2616  // SYS #0, C7, C9, #0
2617  SYS_ALIAS(0, 7, 9, 0);
2618  } else {
2619  return TokError("AT S1E1RP requires ARMv8.2a");
2620  }
2621  } else if (!Op.compare_lower("s1e1wp")) {
2622  if (getSTI().getFeatureBits()[AArch64::HasV8_2aOps]) {
2623  // SYS #0, C7, C9, #1
2624  SYS_ALIAS(0, 7, 9, 1);
2625  } else {
2626  return TokError("AT S1E1WP requires ARMv8.2a");
2627  }
2628  } else {
2629  return TokError("invalid operand for AT instruction");
2630  }
2631  } else if (Mnemonic == "tlbi") {
2632  if (!Op.compare_lower("vmalle1is")) {
2633  // SYS #0, C8, C3, #0
2634  SYS_ALIAS(0, 8, 3, 0);
2635  } else if (!Op.compare_lower("alle2is")) {
2636  // SYS #4, C8, C3, #0
2637  SYS_ALIAS(4, 8, 3, 0);
2638  } else if (!Op.compare_lower("alle3is")) {
2639  // SYS #6, C8, C3, #0
2640  SYS_ALIAS(6, 8, 3, 0);
2641  } else if (!Op.compare_lower("vae1is")) {
2642  // SYS #0, C8, C3, #1
2643  SYS_ALIAS(0, 8, 3, 1);
2644  } else if (!Op.compare_lower("vae2is")) {
2645  // SYS #4, C8, C3, #1
2646  SYS_ALIAS(4, 8, 3, 1);
2647  } else if (!Op.compare_lower("vae3is")) {
2648  // SYS #6, C8, C3, #1
2649  SYS_ALIAS(6, 8, 3, 1);
2650  } else if (!Op.compare_lower("aside1is")) {
2651  // SYS #0, C8, C3, #2
2652  SYS_ALIAS(0, 8, 3, 2);
2653  } else if (!Op.compare_lower("vaae1is")) {
2654  // SYS #0, C8, C3, #3
2655  SYS_ALIAS(0, 8, 3, 3);
2656  } else if (!Op.compare_lower("alle1is")) {
2657  // SYS #4, C8, C3, #4
2658  SYS_ALIAS(4, 8, 3, 4);
2659  } else if (!Op.compare_lower("vale1is")) {
2660  // SYS #0, C8, C3, #5
2661  SYS_ALIAS(0, 8, 3, 5);
2662  } else if (!Op.compare_lower("vaale1is")) {
2663  // SYS #0, C8, C3, #7
2664  SYS_ALIAS(0, 8, 3, 7);
2665  } else if (!Op.compare_lower("vmalle1")) {
2666  // SYS #0, C8, C7, #0
2667  SYS_ALIAS(0, 8, 7, 0);
2668  } else if (!Op.compare_lower("alle2")) {
2669  // SYS #4, C8, C7, #0
2670  SYS_ALIAS(4, 8, 7, 0);
2671  } else if (!Op.compare_lower("vale2is")) {
2672  // SYS #4, C8, C3, #5
2673  SYS_ALIAS(4, 8, 3, 5);
2674  } else if (!Op.compare_lower("vale3is")) {
2675  // SYS #6, C8, C3, #5
2676  SYS_ALIAS(6, 8, 3, 5);
2677  } else if (!Op.compare_lower("alle3")) {
2678  // SYS #6, C8, C7, #0
2679  SYS_ALIAS(6, 8, 7, 0);
2680  } else if (!Op.compare_lower("vae1")) {
2681  // SYS #0, C8, C7, #1
2682  SYS_ALIAS(0, 8, 7, 1);
2683  } else if (!Op.compare_lower("vae2")) {
2684  // SYS #4, C8, C7, #1
2685  SYS_ALIAS(4, 8, 7, 1);
2686  } else if (!Op.compare_lower("vae3")) {
2687  // SYS #6, C8, C7, #1
2688  SYS_ALIAS(6, 8, 7, 1);
2689  } else if (!Op.compare_lower("aside1")) {
2690  // SYS #0, C8, C7, #2
2691  SYS_ALIAS(0, 8, 7, 2);
2692  } else if (!Op.compare_lower("vaae1")) {
2693  // SYS #0, C8, C7, #3
2694  SYS_ALIAS(0, 8, 7, 3);
2695  } else if (!Op.compare_lower("alle1")) {
2696  // SYS #4, C8, C7, #4
2697  SYS_ALIAS(4, 8, 7, 4);
2698  } else if (!Op.compare_lower("vale1")) {
2699  // SYS #0, C8, C7, #5
2700  SYS_ALIAS(0, 8, 7, 5);
2701  } else if (!Op.compare_lower("vale2")) {
2702  // SYS #4, C8, C7, #5
2703  SYS_ALIAS(4, 8, 7, 5);
2704  } else if (!Op.compare_lower("vale3")) {
2705  // SYS #6, C8, C7, #5
2706  SYS_ALIAS(6, 8, 7, 5);
2707  } else if (!Op.compare_lower("vaale1")) {
2708  // SYS #0, C8, C7, #7
2709  SYS_ALIAS(0, 8, 7, 7);
2710  } else if (!Op.compare_lower("ipas2e1")) {
2711  // SYS #4, C8, C4, #1
2712  SYS_ALIAS(4, 8, 4, 1);
2713  } else if (!Op.compare_lower("ipas2le1")) {
2714  // SYS #4, C8, C4, #5
2715  SYS_ALIAS(4, 8, 4, 5);
2716  } else if (!Op.compare_lower("ipas2e1is")) {
2717  // SYS #4, C8, C4, #1
2718  SYS_ALIAS(4, 8, 0, 1);
2719  } else if (!Op.compare_lower("ipas2le1is")) {
2720  // SYS #4, C8, C4, #5
2721  SYS_ALIAS(4, 8, 0, 5);
2722  } else if (!Op.compare_lower("vmalls12e1")) {
2723  // SYS #4, C8, C7, #6
2724  SYS_ALIAS(4, 8, 7, 6);
2725  } else if (!Op.compare_lower("vmalls12e1is")) {
2726  // SYS #4, C8, C3, #6
2727  SYS_ALIAS(4, 8, 3, 6);
2728  } else {
2729  return TokError("invalid operand for TLBI instruction");
2730  }
2731  }
2732 
2733 #undef SYS_ALIAS
2734 
2735  Parser.Lex(); // Eat operand.
2736 
2737  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2738  bool HasRegister = false;
2739 
2740  // Check for the optional register operand.
2741  if (parseOptionalToken(AsmToken::Comma)) {
2742  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2743  return TokError("expected register operand");
2744  HasRegister = true;
2745  }
2746 
2747  if (ExpectRegister && !HasRegister) {
2748  return TokError("specified " + Mnemonic + " op requires a register");
2749  }
2750  else if (!ExpectRegister && HasRegister) {
2751  return TokError("specified " + Mnemonic + " op does not use a register");
2752  }
2753 
2754  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
2755  return true;
2756 
2757  return false;
2758 }
2759 
2761 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2762  MCAsmParser &Parser = getParser();
2763  const AsmToken &Tok = Parser.getTok();
2764 
2765  // Can be either a #imm style literal or an option name
2766  if (parseOptionalToken(AsmToken::Hash) ||
2767  Tok.is(AsmToken::Integer)) {
2768  // Immediate operand.
2769  const MCExpr *ImmVal;
2770  SMLoc ExprLoc = getLoc();
2771  if (getParser().parseExpression(ImmVal))
2772  return MatchOperand_ParseFail;
2773  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2774  if (!MCE) {
2775  Error(ExprLoc, "immediate value expected for barrier operand");
2776  return MatchOperand_ParseFail;
2777  }
2778  if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2779  Error(ExprLoc, "barrier operand out of range");
2780  return MatchOperand_ParseFail;
2781  }
2782  auto DB = AArch64DB::lookupDBByEncoding(MCE->getValue());
2783  Operands.push_back(AArch64Operand::CreateBarrier(
2784  MCE->getValue(), DB ? DB->Name : "", ExprLoc, getContext()));
2785  return MatchOperand_Success;
2786  }
2787 
2788  if (Tok.isNot(AsmToken::Identifier)) {
2789  TokError("invalid operand for instruction");
2790  return MatchOperand_ParseFail;
2791  }
2792 
2793  auto DB = AArch64DB::lookupDBByName(Tok.getString());
2794  if (!DB) {
2795  TokError("invalid barrier option name");
2796  return MatchOperand_ParseFail;
2797  }
2798 
2799  // The only valid named option for ISB is 'sy'
2800  if (Mnemonic == "isb" && DB->Encoding != AArch64DB::sy) {
2801  TokError("'sy' or #imm operand expected");
2802  return MatchOperand_ParseFail;
2803  }
2804 
2805  Operands.push_back(AArch64Operand::CreateBarrier(
2806  DB->Encoding, Tok.getString(), getLoc(), getContext()));
2807  Parser.Lex(); // Consume the option
2808 
2809  return MatchOperand_Success;
2810 }
2811 
2813 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2814  MCAsmParser &Parser = getParser();
2815  const AsmToken &Tok = Parser.getTok();
2816 
2817  if (Tok.isNot(AsmToken::Identifier))
2818  return MatchOperand_NoMatch;
2819 
2820  int MRSReg, MSRReg;
2821  auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
2822  if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
2823  MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
2824  MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
2825  } else
2826  MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
2827 
2828  auto PState = AArch64PState::lookupPStateByName(Tok.getString());
2829  unsigned PStateImm = -1;
2830  if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
2831  PStateImm = PState->Encoding;
2832 
2833  Operands.push_back(
2834  AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
2835  PStateImm, getContext()));
2836  Parser.Lex(); // Eat identifier
2837 
2838  return MatchOperand_Success;
2839 }
2840 
2841 /// tryParseVectorRegister - Parse a vector register operand.
2842 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2843  MCAsmParser &Parser = getParser();
2844  if (Parser.getTok().isNot(AsmToken::Identifier))
2845  return true;
2846 
2847  SMLoc S = getLoc();
2848  // Check for a vector register specifier first.
2849  StringRef Kind;
2850  int64_t Reg = tryMatchVectorRegister(Kind, false);
2851  if (Reg == -1)
2852  return true;
2853  Operands.push_back(
2854  AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2855  // If there was an explicit qualifier, that goes on as a literal text
2856  // operand.
2857  if (!Kind.empty())
2858  Operands.push_back(
2859  AArch64Operand::CreateToken(Kind, false, S, getContext()));
2860 
2861  // If there is an index specifier following the register, parse that too.
2862  SMLoc SIdx = getLoc();
2863  if (parseOptionalToken(AsmToken::LBrac)) {
2864  const MCExpr *ImmVal;
2865  if (getParser().parseExpression(ImmVal))
2866  return false;
2867  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2868  if (!MCE) {
2869  TokError("immediate value expected for vector index");
2870  return false;
2871  }
2872 
2873  SMLoc E = getLoc();
2874 
2875  if (parseToken(AsmToken::RBrac, "']' expected"))
2876  return false;
2877 
2878  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2879  E, getContext()));
2880  }
2881 
2882  return false;
2883 }
2884 
2885 /// parseRegister - Parse a non-vector register operand.
2886 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2887  MCAsmParser &Parser = getParser();
2888  SMLoc S = getLoc();
2889  // Try for a vector register.
2890  if (!tryParseVectorRegister(Operands))
2891  return false;
2892 
2893  // Try for a scalar register.
2894  int64_t Reg = tryParseRegister();
2895  if (Reg == -1)
2896  return true;
2897  Operands.push_back(
2898  AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2899 
2900  // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2901  // as a string token in the instruction itself.
2902  SMLoc LBracS = getLoc();
2903  const AsmToken &Tok = Parser.getTok();
2904  if (parseOptionalToken(AsmToken::LBrac)) {
2905  if (Tok.is(AsmToken::Integer)) {
2906  SMLoc IntS = getLoc();
2907  int64_t Val = Tok.getIntVal();
2908  if (Val == 1) {
2909  Parser.Lex();
2910  SMLoc RBracS = getLoc();
2911  if (parseOptionalToken(AsmToken::RBrac)) {
2912  Operands.push_back(
2913  AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2914  Operands.push_back(
2915  AArch64Operand::CreateToken("1", false, IntS, getContext()));
2916  Operands.push_back(
2917  AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2918  return false;
2919  }
2920  }
2921  }
2922  }
2923 
2924  return false;
2925 }
2926 
2927 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2928  MCAsmParser &Parser = getParser();
2929  bool HasELFModifier = false;
2931 
2932  if (parseOptionalToken(AsmToken::Colon)) {
2933  HasELFModifier = true;
2934 
2935  if (Parser.getTok().isNot(AsmToken::Identifier))
2936  return TokError("expect relocation specifier in operand after ':'");
2937 
2938  std::string LowerCase = Parser.getTok().getIdentifier().lower();
2939  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2940  .Case("lo12", AArch64MCExpr::VK_LO12)
2941  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2942  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2943  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2944  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2945  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2946  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2947  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2948  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2949  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2950  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2951  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2952  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2953  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2954  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2955  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2956  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2957  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2958  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2959  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2960  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2961  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2962  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2963  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2964  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2965  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2966  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2967  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2969  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2971  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2972  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2973  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2976 
2977  if (RefKind == AArch64MCExpr::VK_INVALID)
2978  return TokError("expect relocation specifier in operand after ':'");
2979 
2980  Parser.Lex(); // Eat identifier
2981 
2982  if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
2983  return true;
2984  }
2985 
2986  if (getParser().parseExpression(ImmVal))
2987  return true;
2988 
2989  if (HasELFModifier)
2990  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2991 
2992  return false;
2993 }
2994 
2995 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2996 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2997  MCAsmParser &Parser = getParser();
2998  assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2999  SMLoc S = getLoc();
3000  Parser.Lex(); // Eat left bracket token.
3001  StringRef Kind;
3002  int64_t FirstReg = tryMatchVectorRegister(Kind, true);
3003  if (FirstReg == -1)
3004  return true;
3005  int64_t PrevReg = FirstReg;
3006  unsigned Count = 1;
3007 
3008  if (parseOptionalToken(AsmToken::Minus)) {
3009  SMLoc Loc = getLoc();
3010  StringRef NextKind;
3011  int64_t Reg = tryMatchVectorRegister(NextKind, true);
3012  if (Reg == -1)
3013  return true;
3014  // Any Kind suffices must match on all regs in the list.
3015  if (Kind != NextKind)
3016  return Error(Loc, "mismatched register size suffix");
3017 
3018  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3019 
3020  if (Space == 0 || Space > 3) {
3021  return Error(Loc, "invalid number of vectors");
3022  }
3023 
3024  Count += Space;
3025  }
3026  else {
3027  while (parseOptionalToken(AsmToken::Comma)) {
3028  SMLoc Loc = getLoc();
3029  StringRef NextKind;
3030  int64_t Reg = tryMatchVectorRegister(NextKind, true);
3031  if (Reg == -1)
3032  return true;
3033  // Any Kind suffices must match on all regs in the list.
3034  if (Kind != NextKind)
3035  return Error(Loc, "mismatched register size suffix");
3036 
3037  // Registers must be incremental (with wraparound at 31)
3038  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3039  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3040  return Error(Loc, "registers must be sequential");
3041 
3042  PrevReg = Reg;
3043  ++Count;
3044  }
3045  }
3046 
3047  if (parseToken(AsmToken::RCurly, "'}' expected"))
3048  return true;
3049 
3050  if (Count > 4)
3051  return Error(S, "invalid number of vectors");
3052 
3053  unsigned NumElements = 0;
3054  char ElementKind = 0;
3055  if (!Kind.empty())
3056  parseValidVectorKind(Kind, NumElements, ElementKind);
3057 
3058  Operands.push_back(AArch64Operand::CreateVectorList(
3059  FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3060 
3061  // If there is an index specifier following the list, parse that too.
3062  SMLoc SIdx = getLoc();
3063  if (parseOptionalToken(AsmToken::LBrac)) { // Eat left bracket token.
3064  const MCExpr *ImmVal;
3065  if (getParser().parseExpression(ImmVal))
3066  return false;
3067  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3068  if (!MCE) {
3069  TokError("immediate value expected for vector index");
3070  return false;
3071  }
3072 
3073  SMLoc E = getLoc();
3074  if (parseToken(AsmToken::RBrac, "']' expected"))
3075  return false;
3076 
3077  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3078  E, getContext()));
3079  }
3080  return false;
3081 }
3082 
3084 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3085  MCAsmParser &Parser = getParser();
3086  const AsmToken &Tok = Parser.getTok();
3087  if (!Tok.is(AsmToken::Identifier))
3088  return MatchOperand_NoMatch;
3089 
3090  unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3091 
3092  MCContext &Ctx = getContext();
3093  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3094  if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3095  return MatchOperand_NoMatch;
3096 
3097  SMLoc S = getLoc();
3098  Parser.Lex(); // Eat register
3099 
3100  if (!parseOptionalToken(AsmToken::Comma)) {
3101  Operands.push_back(
3102  AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3103  return MatchOperand_Success;
3104  }
3105 
3106  parseOptionalToken(AsmToken::Hash);
3107 
3108  if (Parser.getTok().isNot(AsmToken::Integer)) {
3109  Error(getLoc(), "index must be absent or #0");
3110  return MatchOperand_ParseFail;
3111  }
3112 
3113  const MCExpr *ImmVal;
3114  if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3115  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3116  Error(getLoc(), "index must be absent or #0");
3117  return MatchOperand_ParseFail;
3118  }
3119 
3120  Operands.push_back(
3121  AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3122  return MatchOperand_Success;
3123 }
3124 
3125 /// parseOperand - Parse a arm instruction operand. For now this parses the
3126 /// operand regardless of the mnemonic.
3127 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3128  bool invertCondCode) {
3129  MCAsmParser &Parser = getParser();
3130  // Check if the current operand has a custom associated parser, if so, try to
3131  // custom parse the operand, or fallback to the general approach.
3132  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3133  if (ResTy == MatchOperand_Success)
3134  return false;
3135  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3136  // there was a match, but an error occurred, in which case, just return that
3137  // the operand parsing failed.
3138  if (ResTy == MatchOperand_ParseFail)
3139  return true;
3140 
3141  // Nothing custom, so do general case parsing.
3142  SMLoc S, E;
3143  switch (getLexer().getKind()) {
3144  default: {
3145  SMLoc S = getLoc();
3146  const MCExpr *Expr;
3147  if (parseSymbolicImmVal(Expr))
3148  return Error(S, "invalid operand");
3149 
3150  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3151  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3152  return false;
3153  }
3154  case AsmToken::LBrac: {
3155  SMLoc Loc = Parser.getTok().getLoc();
3156  Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3157  getContext()));
3158  Parser.Lex(); // Eat '['
3159 
3160  // There's no comma after a '[', so we can parse the next operand
3161  // immediately.
3162  return parseOperand(Operands, false, false);
3163  }
3164  case AsmToken::LCurly:
3165  return parseVectorList(Operands);
3166  case AsmToken::Identifier: {
3167  // If we're expecting a Condition Code operand, then just parse that.
3168  if (isCondCode)
3169  return parseCondCode(Operands, invertCondCode);
3170 
3171  // If it's a register name, parse it.
3172  if (!parseRegister(Operands))
3173  return false;
3174 
3175  // This could be an optional "shift" or "extend" operand.
3176  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3177  // We can only continue if no tokens were eaten.
3178  if (GotShift != MatchOperand_NoMatch)
3179  return GotShift;
3180 
3181  // This was not a register so parse other operands that start with an
3182  // identifier (like labels) as expressions and create them as immediates.
3183  const MCExpr *IdVal;
3184  S = getLoc();
3185  if (getParser().parseExpression(IdVal))
3186  return true;
3187  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3188  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3189  return false;
3190  }
3191  case AsmToken::Integer:
3192  case AsmToken::Real:
3193  case AsmToken::Hash: {
3194  // #42 -> immediate.
3195  S = getLoc();
3196 
3197  parseOptionalToken(AsmToken::Hash);
3198 
3199  // Parse a negative sign
3200  bool isNegative = false;
3201  if (Parser.getTok().is(AsmToken::Minus)) {
3202  isNegative = true;
3203  // We need to consume this token only when we have a Real, otherwise
3204  // we let parseSymbolicImmVal take care of it
3205  if (Parser.getLexer().peekTok().is(AsmToken::Real))
3206  Parser.Lex();
3207  }
3208 
3209  // The only Real that should come through here is a literal #0.0 for
3210  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3211  // so convert the value.
3212  const AsmToken &Tok = Parser.getTok();
3213  if (Tok.is(AsmToken::Real)) {
3214  APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
3215  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3216  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3217  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3218  Mnemonic != "fcmlt")
3219  return TokError("unexpected floating point literal");
3220  else if (IntVal != 0 || isNegative)
3221  return TokError("expected floating-point constant #0.0");
3222  Parser.Lex(); // Eat the token.
3223 
3224  Operands.push_back(
3225  AArch64Operand::CreateToken("#0", false, S, getContext()));
3226  Operands.push_back(
3227  AArch64Operand::CreateToken(".0", false, S, getContext()));
3228  return false;
3229  }
3230 
3231  const MCExpr *ImmVal;
3232  if (parseSymbolicImmVal(ImmVal))
3233  return true;
3234 
3235  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3236  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3237  return false;
3238  }
3239  case AsmToken::Equal: {
3240  SMLoc Loc = getLoc();
3241  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3242  return TokError("unexpected token in operand");
3243  Parser.Lex(); // Eat '='
3244  const MCExpr *SubExprVal;
3245  if (getParser().parseExpression(SubExprVal))
3246  return true;
3247 
3248  if (Operands.size() < 2 ||
3249  !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3250  return Error(Loc, "Only valid when first operand is register");
3251 
3252  bool IsXReg =
3253  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3254  Operands[1]->getReg());
3255 
3256  MCContext& Ctx = getContext();
3257  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3258  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3259  if (isa<MCConstantExpr>(SubExprVal)) {
3260  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3261  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3262  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3263  ShiftAmt += 16;
3264  Imm >>= 16;
3265  }
3266  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3267  Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3268  Operands.push_back(AArch64Operand::CreateImm(
3269  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3270  if (ShiftAmt)
3271  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3272  ShiftAmt, true, S, E, Ctx));
3273  return false;
3274  }
3275  APInt Simm = APInt(64, Imm << ShiftAmt);
3276  // check if the immediate is an unsigned or signed 32-bit int for W regs
3277  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3278  return Error(Loc, "Immediate too large for register");
3279  }
3280  // If it is a label or an imm that cannot fit in a movz, put it into CP.
3281  const MCExpr *CPLoc =
3282  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3283  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3284  return false;
3285  }
3286  }
3287 }
3288 
3289 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3290 /// operands.
3291 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3292  StringRef Name, SMLoc NameLoc,
3293  OperandVector &Operands) {
3294  MCAsmParser &Parser = getParser();
3295  Name = StringSwitch<StringRef>(Name.lower())
3296  .Case("beq", "b.eq")
3297  .Case("bne", "b.ne")
3298  .Case("bhs", "b.hs")
3299  .Case("bcs", "b.cs")
3300  .Case("blo", "b.lo")
3301  .Case("bcc", "b.cc")
3302  .Case("bmi", "b.mi")
3303  .Case("bpl", "b.pl")
3304  .Case("bvs", "b.vs")
3305  .Case("bvc", "b.vc")
3306  .Case("bhi", "b.hi")
3307  .Case("bls", "b.ls")
3308  .Case("bge", "b.ge")
3309  .Case("blt", "b.lt")
3310  .Case("bgt", "b.gt")
3311  .Case("ble", "b.le")
3312  .Case("bal", "b.al")
3313  .Case("bnv", "b.nv")
3314  .Default(Name);
3315 
3316  // First check for the AArch64-specific .req directive.
3317  if (Parser.getTok().is(AsmToken::Identifier) &&
3318  Parser.getTok().getIdentifier() == ".req") {
3319  parseDirectiveReq(Name, NameLoc);
3320  // We always return 'error' for this, as we're done with this
3321  // statement and don't need to match the 'instruction."
3322  return true;
3323  }
3324 
3325  // Create the leading tokens for the mnemonic, split by '.' characters.
3326  size_t Start = 0, Next = Name.find('.');
3327  StringRef Head = Name.slice(Start, Next);
3328 
3329  // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3330  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3331  return parseSysAlias(Head, NameLoc, Operands);
3332 
3333  Operands.push_back(
3334  AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3335  Mnemonic = Head;
3336 
3337  // Handle condition codes for a branch mnemonic
3338  if (Head == "b" && Next != StringRef::npos) {
3339  Start = Next;
3340  Next = Name.find('.', Start + 1);
3341  Head = Name.slice(Start + 1, Next);
3342 
3343  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3344  (Head.data() - Name.data()));
3345  AArch64CC::CondCode CC = parseCondCodeString(Head);
3346  if (CC == AArch64CC::Invalid)
3347  return Error(SuffixLoc, "invalid condition code");
3348  Operands.push_back(
3349  AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3350  Operands.push_back(
3351  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3352  }
3353 
3354  // Add the remaining tokens in the mnemonic.
3355  while (Next != StringRef::npos) {
3356  Start = Next;
3357  Next = Name.find('.', Start + 1);
3358  Head = Name.slice(Start, Next);
3359  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3360  (Head.data() - Name.data()) + 1);
3361  Operands.push_back(
3362  AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3363  }
3364 
3365  // Conditional compare instructions have a Condition Code operand, which needs
3366  // to be parsed and an immediate operand created.
3367  bool condCodeFourthOperand =
3368  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3369  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3370  Head == "csinc" || Head == "csinv" || Head == "csneg");
3371 
3372  // These instructions are aliases to some of the conditional select
3373  // instructions. However, the condition code is inverted in the aliased
3374  // instruction.
3375  //
3376  // FIXME: Is this the correct way to handle these? Or should the parser
3377  // generate the aliased instructions directly?
3378  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3379  bool condCodeThirdOperand =
3380  (Head == "cinc" || Head == "cinv" || Head == "cneg");
3381 
3382  // Read the remaining operands.
3383  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3384  // Read the first operand.
3385  if (parseOperand(Operands, false, false)) {
3386  return true;
3387  }
3388 
3389  unsigned N = 2;
3390  while (parseOptionalToken(AsmToken::Comma)) {
3391  // Parse and remember the operand.
3392  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3393  (N == 3 && condCodeThirdOperand) ||
3394  (N == 2 && condCodeSecondOperand),
3395  condCodeSecondOperand || condCodeThirdOperand)) {
3396  return true;
3397  }
3398 
3399  // After successfully parsing some operands there are two special cases to
3400  // consider (i.e. notional operands not separated by commas). Both are due
3401  // to memory specifiers:
3402  // + An RBrac will end an address for load/store/prefetch
3403  // + An '!' will indicate a pre-indexed operation.
3404  //
3405  // It's someone else's responsibility to make sure these tokens are sane
3406  // in the given context!
3407 
3408  SMLoc RLoc = Parser.getTok().getLoc();
3409  if (parseOptionalToken(AsmToken::RBrac))
3410  Operands.push_back(
3411  AArch64Operand::CreateToken("]", false, RLoc, getContext()));
3412  SMLoc ELoc = Parser.getTok().getLoc();
3413  if (parseOptionalToken(AsmToken::Exclaim))
3414  Operands.push_back(
3415  AArch64Operand::CreateToken("!", false, ELoc, getContext()));
3416 
3417  ++N;
3418  }
3419  }
3420 
3421  if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3422  return true;
3423 
3424  return false;
3425 }
3426 
3427 // FIXME: This entire function is a giant hack to provide us with decent
3428 // operand range validation/diagnostics until TableGen/MC can be extended
3429 // to support autogeneration of this kind of validation.
3430 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3431  SmallVectorImpl<SMLoc> &Loc) {
3432  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3433  // Check for indexed addressing modes w/ the base register being the
3434  // same as a destination/source register or pair load where
3435  // the Rt == Rt2. All of those are undefined behaviour.
3436  switch (Inst.getOpcode()) {
3437  case AArch64::LDPSWpre:
3438  case AArch64::LDPWpost:
3439  case AArch64::LDPWpre:
3440  case AArch64::LDPXpost:
3441  case AArch64::LDPXpre: {
3442  unsigned Rt = Inst.getOperand(1).getReg();
3443  unsigned Rt2 = Inst.getOperand(2).getReg();
3444  unsigned Rn = Inst.getOperand(3).getReg();
3445  if (RI->isSubRegisterEq(Rn, Rt))
3446  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3447  "is also a destination");
3448  if (RI->isSubRegisterEq(Rn, Rt2))
3449  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3450  "is also a destination");
3452  }
3453  case AArch64::LDPDi:
3454  case AArch64::LDPQi:
3455  case AArch64::LDPSi:
3456  case AArch64::LDPSWi:
3457  case AArch64::LDPWi:
3458  case AArch64::LDPXi: {
3459  unsigned Rt = Inst.getOperand(0).getReg();
3460  unsigned Rt2 = Inst.getOperand(1).getReg();
3461  if (Rt == Rt2)
3462  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3463  break;
3464  }
3465  case AArch64::LDPDpost:
3466  case AArch64::LDPDpre:
3467  case AArch64::LDPQpost:
3468  case AArch64::LDPQpre:
3469  case AArch64::LDPSpost:
3470  case AArch64::LDPSpre:
3471  case AArch64::LDPSWpost: {
3472  unsigned Rt = Inst.getOperand(1).getReg();
3473  unsigned Rt2 = Inst.getOperand(2).getReg();
3474  if (Rt == Rt2)
3475  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3476  break;
3477  }
3478  case AArch64::STPDpost:
3479  case AArch64::STPDpre:
3480  case AArch64::STPQpost:
3481  case AArch64::STPQpre:
3482  case AArch64::STPSpost:
3483  case AArch64::STPSpre:
3484  case AArch64::STPWpost:
3485  case AArch64::STPWpre:
3486  case AArch64::STPXpost:
3487  case AArch64::STPXpre: {
3488  unsigned Rt = Inst.getOperand(1).getReg();
3489  unsigned Rt2 = Inst.getOperand(2).getReg();
3490  unsigned Rn = Inst.getOperand(3).getReg();
3491  if (RI->isSubRegisterEq(Rn, Rt))
3492  return Error(Loc[0], "unpredictable STP instruction, writeback base "
3493  "is also a source");
3494  if (RI->isSubRegisterEq(Rn, Rt2))
3495  return Error(Loc[1], "unpredictable STP instruction, writeback base "
3496  "is also a source");
3497  break;
3498  }
3499  case AArch64::LDRBBpre:
3500  case AArch64::LDRBpre:
3501  case AArch64::LDRHHpre:
3502  case AArch64::LDRHpre:
3503  case AArch64::LDRSBWpre:
3504  case AArch64::LDRSBXpre:
3505  case AArch64::LDRSHWpre:
3506  case AArch64::LDRSHXpre:
3507  case AArch64::LDRSWpre:
3508  case AArch64::LDRWpre:
3509  case AArch64::LDRXpre:
3510  case AArch64::LDRBBpost:
3511  case AArch64::LDRBpost:
3512  case AArch64::LDRHHpost:
3513  case AArch64::LDRHpost:
3514  case AArch64::LDRSBWpost:
3515  case AArch64::LDRSBXpost:
3516  case AArch64::LDRSHWpost:
3517  case AArch64::LDRSHXpost:
3518  case AArch64::LDRSWpost:
3519  case AArch64::LDRWpost:
3520  case AArch64::LDRXpost: {
3521  unsigned Rt = Inst.getOperand(1).getReg();
3522  unsigned Rn = Inst.getOperand(2).getReg();
3523  if (RI->isSubRegisterEq(Rn, Rt))
3524  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3525  "is also a source");
3526  break;
3527  }
3528  case AArch64::STRBBpost:
3529  case AArch64::STRBpost:
3530  case AArch64::STRHHpost:
3531  case AArch64::STRHpost:
3532  case AArch64::STRWpost:
3533  case AArch64::STRXpost:
3534  case AArch64::STRBBpre:
3535  case AArch64::STRBpre:
3536  case AArch64::STRHHpre:
3537  case AArch64::STRHpre:
3538  case AArch64::STRWpre:
3539  case AArch64::STRXpre: {
3540  unsigned Rt = Inst.getOperand(1).getReg();
3541  unsigned Rn = Inst.getOperand(2).getReg();
3542  if (RI->isSubRegisterEq(Rn, Rt))
3543  return Error(Loc[0], "unpredictable STR instruction, writeback base "
3544  "is also a source");
3545  break;
3546  }
3547  }
3548 
3549  // Now check immediate ranges. Separate from the above as there is overlap
3550  // in the instructions being checked and this keeps the nested conditionals
3551  // to a minimum.
3552  switch (Inst.getOpcode()) {
3553  case AArch64::ADDSWri:
3554  case AArch64::ADDSXri:
3555  case AArch64::ADDWri:
3556  case AArch64::ADDXri:
3557  case AArch64::SUBSWri:
3558  case AArch64::SUBSXri:
3559  case AArch64::SUBWri:
3560  case AArch64::SUBXri: {
3561  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3562  // some slight duplication here.
3563  if (Inst.getOperand(2).isExpr()) {
3564  const MCExpr *Expr = Inst.getOperand(2).getExpr();
3565  AArch64MCExpr::VariantKind ELFRefKind;
3566  MCSymbolRefExpr::VariantKind DarwinRefKind;
3567  int64_t Addend;
3568  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3569 
3570  // Only allow these with ADDXri.
3571  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3572  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3573  Inst.getOpcode() == AArch64::ADDXri)
3574  return false;
3575 
3576  // Only allow these with ADDXri/ADDWri
3577  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3578  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3579  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3580  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3581  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3582  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3583  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3584  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3585  (Inst.getOpcode() == AArch64::ADDXri ||
3586  Inst.getOpcode() == AArch64::ADDWri))
3587  return false;
3588 
3589  // Don't allow symbol refs in the immediate field otherwise
3590  // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
3591  // operands of the original instruction (i.e. 'add w0, w1, borked' vs
3592  // 'cmp w0, 'borked')
3593  return Error(Loc.back(), "invalid immediate expression");
3594  }
3595  // We don't validate more complex expressions here
3596  }
3597  return false;
3598  }
3599  default:
3600  return false;
3601  }
3602 }
3603 
3604 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3605  switch (ErrCode) {
3606  case Match_MissingFeature:
3607  return Error(Loc,
3608  "instruction requires a CPU feature not currently enabled");
3609  case Match_InvalidOperand:
3610  return Error(Loc, "invalid operand for instruction");
3611  case Match_InvalidSuffix:
3612  return Error(Loc, "invalid type suffix for instruction");
3613  case Match_InvalidCondCode:
3614  return Error(Loc, "expected AArch64 condition code");
3615  case Match_AddSubRegExtendSmall:
3616  return Error(Loc,
3617  "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3618  case Match_AddSubRegExtendLarge:
3619  return Error(Loc,
3620  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3621  case Match_AddSubSecondSource:
3622  return Error(Loc,
3623  "expected compatible register, symbol or integer in range [0, 4095]");
3624  case Match_LogicalSecondSource:
3625  return Error(Loc, "expected compatible register or logical immediate");
3626  case Match_InvalidMovImm32Shift:
3627  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3628  case Match_InvalidMovImm64Shift:
3629  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3630  case Match_AddSubRegShift32:
3631  return Error(Loc,
3632  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3633  case Match_AddSubRegShift64:
3634  return Error(Loc,
3635  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3636  case Match_InvalidFPImm:
3637  return Error(Loc,
3638  "expected compatible register or floating-point constant");
3639  case Match_InvalidMemoryIndexedSImm9:
3640  return Error(Loc, "index must be an integer in range [-256, 255].");
3641  case Match_InvalidMemoryIndexed4SImm7:
3642  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3643  case Match_InvalidMemoryIndexed8SImm7:
3644  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3645  case Match_InvalidMemoryIndexed16SImm7:
3646  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3647  case Match_InvalidMemoryWExtend8:
3648  return Error(Loc,
3649  "expected 'uxtw' or 'sxtw' with optional shift of #0");
3650  case Match_InvalidMemoryWExtend16:
3651  return Error(Loc,
3652  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3653  case Match_InvalidMemoryWExtend32:
3654  return Error(Loc,
3655  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3656  case Match_InvalidMemoryWExtend64:
3657  return Error(Loc,
3658  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3659  case Match_InvalidMemoryWExtend128:
3660  return Error(Loc,
3661  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3662  case Match_InvalidMemoryXExtend8:
3663  return Error(Loc,
3664  "expected 'lsl' or 'sxtx' with optional shift of #0");
3665  case Match_InvalidMemoryXExtend16:
3666  return Error(Loc,
3667  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3668  case Match_InvalidMemoryXExtend32:
3669  return Error(Loc,
3670  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3671  case Match_InvalidMemoryXExtend64:
3672  return Error(Loc,
3673  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3674  case Match_InvalidMemoryXExtend128:
3675  return Error(Loc,
3676  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3677  case Match_InvalidMemoryIndexed1:
3678  return Error(Loc, "index must be an integer in range [0, 4095].");
3679  case Match_InvalidMemoryIndexed2:
3680  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3681  case Match_InvalidMemoryIndexed4:
3682  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3683  case Match_InvalidMemoryIndexed8:
3684  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3685  case Match_InvalidMemoryIndexed16:
3686  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3687  case Match_InvalidImm0_1:
3688  return Error(Loc, "immediate must be an integer in range [0, 1].");
3689  case Match_InvalidImm0_7:
3690  return Error(Loc, "immediate must be an integer in range [0, 7].");
3691  case Match_InvalidImm0_15:
3692  return Error(Loc, "immediate must be an integer in range [0, 15].");
3693  case Match_InvalidImm0_31:
3694  return Error(Loc, "immediate must be an integer in range [0, 31].");
3695  case Match_InvalidImm0_63:
3696  return Error(Loc, "immediate must be an integer in range [0, 63].");
3697  case Match_InvalidImm0_127:
3698  return Error(Loc, "immediate must be an integer in range [0, 127].");
3699  case Match_InvalidImm0_65535:
3700  return Error(Loc, "immediate must be an integer in range [0, 65535].");
3701  case Match_InvalidImm1_8:
3702  return Error(Loc, "immediate must be an integer in range [1, 8].");
3703  case Match_InvalidImm1_16:
3704  return Error(Loc, "immediate must be an integer in range [1, 16].");
3705  case Match_InvalidImm1_32:
3706  return Error(Loc, "immediate must be an integer in range [1, 32].");
3707  case Match_InvalidImm1_64:
3708  return Error(Loc, "immediate must be an integer in range [1, 64].");
3709  case Match_InvalidIndex1:
3710  return Error(Loc, "expected lane specifier '[1]'");
3711  case Match_InvalidIndexB:
3712  return Error(Loc, "vector lane must be an integer in range [0, 15].");
3713  case Match_InvalidIndexH:
3714  return Error(Loc, "vector lane must be an integer in range [0, 7].");
3715  case Match_InvalidIndexS:
3716  return Error(Loc, "vector lane must be an integer in range [0, 3].");
3717  case Match_InvalidIndexD:
3718  return Error(Loc, "vector lane must be an integer in range [0, 1].");
3719  case Match_InvalidLabel:
3720  return Error(Loc, "expected label or encodable integer pc offset");
3721  case Match_MRS:
3722  return Error(Loc, "expected readable system register");
3723  case Match_MSR:
3724  return Error(Loc, "expected writable system register or pstate");
3725  case Match_MnemonicFail:
3726  return Error(Loc, "unrecognized instruction mnemonic");
3727  default:
3728  llvm_unreachable("unexpected error code!");
3729  }
3730 }
3731 
3732 static const char *getSubtargetFeatureName(uint64_t Val);
3733 
3734 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3735  OperandVector &Operands,
3736  MCStreamer &Out,
3737  uint64_t &ErrorInfo,
3738  bool MatchingInlineAsm) {
3739  assert(!Operands.empty() && "Unexpect empty operand list!");
3740  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3741  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3742 
3743  StringRef Tok = Op.getToken();
3744  unsigned NumOperands = Operands.size();
3745 
3746  if (NumOperands == 4 && Tok == "lsl") {
3747  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3748  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3749  if (Op2.isReg() && Op3.isImm()) {
3750  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3751  if (Op3CE) {
3752  uint64_t Op3Val = Op3CE->getValue();
3753  uint64_t NewOp3Val = 0;
3754  uint64_t NewOp4Val = 0;
3755  if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3756  Op2.getReg())) {
3757  NewOp3Val = (32 - Op3Val) & 0x1f;
3758  NewOp4Val = 31 - Op3Val;
3759  } else {
3760  NewOp3Val = (64 - Op3Val) & 0x3f;
3761  NewOp4Val = 63 - Op3Val;
3762  }
3763 
3764  const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3765  const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3766 
3767  Operands[0] = AArch64Operand::CreateToken(
3768  "ubfm", false, Op.getStartLoc(), getContext());
3769  Operands.push_back(AArch64Operand::CreateImm(
3770  NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3771  Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3772  Op3.getEndLoc(), getContext());
3773  }
3774  }
3775  } else if (NumOperands == 4 && Tok == "bfc") {
3776  // FIXME: Horrible hack to handle BFC->BFM alias.
3777  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3778  AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3779  AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3780 
3781  if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3782  const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3783  const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3784 
3785  if (LSBCE && WidthCE) {
3786  uint64_t LSB = LSBCE->getValue();
3787  uint64_t Width = WidthCE->getValue();
3788 
3789  uint64_t RegWidth = 0;
3790  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3791  Op1.getReg()))
3792  RegWidth = 64;
3793  else
3794  RegWidth = 32;
3795 
3796  if (LSB >= RegWidth)
3797  return Error(LSBOp.getStartLoc(),
3798  "expected integer in range [0, 31]");
3799  if (Width < 1 || Width > RegWidth)
3800  return Error(WidthOp.getStartLoc(),
3801  "expected integer in range [1, 32]");
3802 
3803  uint64_t ImmR = 0;
3804  if (RegWidth == 32)
3805  ImmR = (32 - LSB) & 0x1f;
3806  else
3807  ImmR = (64 - LSB) & 0x3f;
3808 
3809  uint64_t ImmS = Width - 1;
3810 
3811  if (ImmR != 0 && ImmS >= ImmR)
3812  return Error(WidthOp.getStartLoc(),
3813  "requested insert overflows register");
3814 
3815  const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3816  const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3817  Operands[0] = AArch64Operand::CreateToken(
3818  "bfm", false, Op.getStartLoc(), getContext());
3819  Operands[2] = AArch64Operand::CreateReg(
3820  RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3821  SMLoc(), getContext());
3822  Operands[3] = AArch64Operand::CreateImm(
3823  ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3824  Operands.emplace_back(
3825  AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3826  WidthOp.getEndLoc(), getContext()));
3827  }
3828  }
3829  } else if (NumOperands == 5) {
3830  // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3831  // UBFIZ -> UBFM aliases.
3832  if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3833  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3834  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3835  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3836 
3837  if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3838  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3839  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3840 
3841  if (Op3CE && Op4CE) {
3842  uint64_t Op3Val = Op3CE->getValue();
3843  uint64_t Op4Val = Op4CE->getValue();
3844 
3845  uint64_t RegWidth = 0;
3846  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3847  Op1.getReg()))
3848  RegWidth = 64;
3849  else
3850  RegWidth = 32;
3851 
3852  if (Op3Val >= RegWidth)
3853  return Error(Op3.getStartLoc(),
3854  "expected integer in range [0, 31]");
3855  if (Op4Val < 1 || Op4Val > RegWidth)
3856  return Error(Op4.getStartLoc(),
3857  "expected integer in range [1, 32]");
3858 
3859  uint64_t NewOp3Val = 0;
3860  if (RegWidth == 32)
3861  NewOp3Val = (32 - Op3Val) & 0x1f;
3862  else
3863  NewOp3Val = (64 - Op3Val) & 0x3f;
3864 
3865  uint64_t NewOp4Val = Op4Val - 1;
3866 
3867  if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3868  return Error(Op4.getStartLoc(),
3869  "requested insert overflows register");
3870 
3871  const MCExpr *NewOp3 =
3872  MCConstantExpr::create(NewOp3Val, getContext());
3873  const MCExpr *NewOp4 =
3874  MCConstantExpr::create(NewOp4Val, getContext());
3875  Operands[3] = AArch64Operand::CreateImm(
3876  NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3877  Operands[4] = AArch64Operand::CreateImm(
3878  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3879  if (Tok == "bfi")
3880  Operands[0] = AArch64Operand::CreateToken(
3881  "bfm", false, Op.getStartLoc(), getContext());
3882  else if (Tok == "sbfiz")
3883  Operands[0] = AArch64Operand::CreateToken(
3884  "sbfm", false, Op.getStartLoc(), getContext());
3885  else if (Tok == "ubfiz")
3886  Operands[0] = AArch64Operand::CreateToken(
3887  "ubfm", false, Op.getStartLoc(), getContext());
3888  else
3889  llvm_unreachable("No valid mnemonic for alias?");
3890  }
3891  }
3892 
3893  // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3894  // UBFX -> UBFM aliases.
3895  } else if (NumOperands == 5 &&
3896  (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3897  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3898  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3899  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3900 
3901  if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3902  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3903  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3904 
3905  if (Op3CE && Op4CE) {
3906  uint64_t Op3Val = Op3CE->getValue();
3907  uint64_t Op4Val = Op4CE->getValue();
3908 
3909  uint64_t RegWidth = 0;
3910  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3911  Op1.getReg()))
3912  RegWidth = 64;
3913  else
3914  RegWidth = 32;
3915 
3916  if (Op3Val >= RegWidth)
3917  return Error(Op3.getStartLoc(),
3918  "expected integer in range [0, 31]");
3919  if (Op4Val < 1 || Op4Val > RegWidth)
3920  return Error(Op4.getStartLoc(),
3921  "expected integer in range [1, 32]");
3922 
3923  uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3924 
3925  if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3926  return Error(Op4.getStartLoc(),
3927  "requested extract overflows register");
3928 
3929  const MCExpr *NewOp4 =
3930  MCConstantExpr::create(NewOp4Val, getContext());
3931  Operands[4] = AArch64Operand::CreateImm(
3932  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3933  if (Tok == "bfxil")
3934  Operands[0] = AArch64Operand::CreateToken(
3935  "bfm", false, Op.getStartLoc(), getContext());
3936  else if (Tok == "sbfx")
3937  Operands[0] = AArch64Operand::CreateToken(
3938  "sbfm", false, Op.getStartLoc(), getContext());
3939  else if (Tok == "ubfx")
3940  Operands[0] = AArch64Operand::CreateToken(
3941  "ubfm", false, Op.getStartLoc(), getContext());
3942  else
3943  llvm_unreachable("No valid mnemonic for alias?");
3944  }
3945  }
3946  }
3947  }
3948  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3949  // InstAlias can't quite handle this since the reg classes aren't
3950  // subclasses.
3951  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3952  // The source register can be Wn here, but the matcher expects a
3953  // GPR64. Twiddle it here if necessary.
3954  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3955  if (Op.isReg()) {
3956  unsigned Reg = getXRegFromWReg(Op.getReg());
3957  Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3958  Op.getEndLoc(), getContext());
3959  }
3960  }
3961  // FIXME: Likewise for sxt[bh] with a Xd dst operand
3962  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3963  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3964  if (Op.isReg() &&
3965  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3966  Op.getReg())) {
3967  // The source register can be Wn here, but the matcher expects a
3968  // GPR64. Twiddle it here if necessary.
3969  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3970  if (Op.isReg()) {
3971  unsigned Reg = getXRegFromWReg(Op.getReg());
3972  Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3973  Op.getEndLoc(), getContext());
3974  }
3975  }
3976  }
3977  // FIXME: Likewise for uxt[bh] with a Xd dst operand
3978  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3979  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3980  if (Op.isReg() &&
3981  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3982  Op.getReg())) {
3983  // The source register can be Wn here, but the matcher expects a
3984  // GPR32. Twiddle it here if necessary.
3985  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3986  if (Op.isReg()) {
3987  unsigned Reg = getWRegFromXReg(Op.getReg());
3988  Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3989  Op.getEndLoc(), getContext());
3990  }
3991  }
3992  }
3993 
3994  // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3995  if (NumOperands == 3 && Tok == "fmov") {
3996  AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3997  AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3998  if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3999  unsigned zreg =
4000  !AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains(
4001  RegOp.getReg())
4002  ? AArch64::WZR
4003  : AArch64::XZR;
4004  Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
4005  Op.getEndLoc(), getContext());
4006  }
4007  }
4008 
4009  MCInst Inst;
4010  // First try to match against the secondary set of tables containing the
4011  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4012  unsigned MatchResult =
4013  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4014 
4015  // If that fails, try against the alternate table containing long-form NEON:
4016  // "fadd v0.2s, v1.2s, v2.2s"
4017  if (MatchResult != Match_Success) {
4018  // But first, save the short-form match result: we can use it in case the
4019  // long-form match also fails.
4020  auto ShortFormNEONErrorInfo = ErrorInfo;
4021  auto ShortFormNEONMatchResult = MatchResult;
4022 
4023  MatchResult =
4024  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4025 
4026  // Now, both matches failed, and the long-form match failed on the mnemonic
4027  // suffix token operand. The short-form match failure is probably more
4028  // relevant: use it instead.
4029  if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4030  Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4031  ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4032  MatchResult = ShortFormNEONMatchResult;
4033  ErrorInfo = ShortFormNEONErrorInfo;
4034  }
4035  }
4036 
4037  switch (MatchResult) {
4038  case Match_Success: {
4039  // Perform range checking and other semantic validations
4040  SmallVector<SMLoc, 8> OperandLocs;
4041  NumOperands = Operands.size();
4042  for (unsigned i = 1; i < NumOperands; ++i)
4043  OperandLocs.push_back(Operands[i]->getStartLoc());
4044  if (validateInstruction(Inst, OperandLocs))
4045  return true;
4046 
4047  Inst.setLoc(IDLoc);
4048  Out.EmitInstruction(Inst, getSTI());
4049  return false;
4050  }
4051  case Match_MissingFeature: {
4052  assert(ErrorInfo && "Unknown missing feature!");
4053  // Special case the error message for the very common case where only
4054  // a single subtarget feature is missing (neon, e.g.).
4055  std::string Msg = "instruction requires:";
4056  uint64_t Mask = 1;
4057  for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4058  if (ErrorInfo & Mask) {
4059  Msg += " ";
4060  Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4061  }
4062  Mask <<= 1;
4063  }
4064  return Error(IDLoc, Msg);
4065  }
4066  case Match_MnemonicFail:
4067  return showMatchError(IDLoc, MatchResult);
4068  case Match_InvalidOperand: {
4069  SMLoc ErrorLoc = IDLoc;
4070 
4071  if (ErrorInfo != ~0ULL) {
4072  if (ErrorInfo >= Operands.size())
4073  return Error(IDLoc, "too few operands for instruction",
4074  SMRange(IDLoc, getTok().getLoc()));
4075 
4076  ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4077  if (ErrorLoc == SMLoc())
4078  ErrorLoc = IDLoc;
4079  }
4080  // If the match failed on a suffix token operand, tweak the diagnostic
4081  // accordingly.
4082  if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4083  ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4084  MatchResult = Match_InvalidSuffix;
4085 
4086  return showMatchError(ErrorLoc, MatchResult);
4087  }
4088  case Match_InvalidMemoryIndexed1:
4089  case Match_InvalidMemoryIndexed2:
4090  case Match_InvalidMemoryIndexed4:
4091  case Match_InvalidMemoryIndexed8:
4092  case Match_InvalidMemoryIndexed16:
4093  case Match_InvalidCondCode:
4094  case Match_AddSubRegExtendSmall:
4095  case Match_AddSubRegExtendLarge:
4096  case Match_AddSubSecondSource:
4097  case Match_LogicalSecondSource:
4098  case Match_AddSubRegShift32:
4099  case Match_AddSubRegShift64:
4100  case Match_InvalidMovImm32Shift:
4101  case Match_InvalidMovImm64Shift:
4102  case Match_InvalidFPImm:
4103  case Match_InvalidMemoryWExtend8:
4104  case Match_InvalidMemoryWExtend16:
4105  case Match_InvalidMemoryWExtend32:
4106  case Match_InvalidMemoryWExtend64:
4107  case Match_InvalidMemoryWExtend128:
4108  case Match_InvalidMemoryXExtend8:
4109  case Match_InvalidMemoryXExtend16:
4110  case Match_InvalidMemoryXExtend32:
4111  case Match_InvalidMemoryXExtend64:
4112  case Match_InvalidMemoryXExtend128:
4113  case Match_InvalidMemoryIndexed4SImm7:
4114  case Match_InvalidMemoryIndexed8SImm7:
4115  case Match_InvalidMemoryIndexed16SImm7:
4116  case Match_InvalidMemoryIndexedSImm9:
4117  case Match_InvalidImm0_1:
4118  case Match_InvalidImm0_7:
4119  case Match_InvalidImm0_15:
4120  case Match_InvalidImm0_31:
4121  case Match_InvalidImm0_63:
4122  case Match_InvalidImm0_127:
4123  case Match_InvalidImm0_65535:
4124  case Match_InvalidImm1_8:
4125  case Match_InvalidImm1_16:
4126  case Match_InvalidImm1_32:
4127  case Match_InvalidImm1_64:
4128  case Match_InvalidIndex1:
4129  case Match_InvalidIndexB:
4130  case Match_InvalidIndexH:
4131  case Match_InvalidIndexS:
4132  case Match_InvalidIndexD:
4133  case Match_InvalidLabel:
4134  case Match_MSR:
4135  case Match_MRS: {
4136  if (ErrorInfo >= Operands.size())
4137  return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
4138  // Any time we get here, there's nothing fancy to do. Just get the
4139  // operand SMLoc and display the diagnostic.
4140  SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4141  if (ErrorLoc == SMLoc())
4142  ErrorLoc = IDLoc;
4143  return showMatchError(ErrorLoc, MatchResult);
4144  }
4145  }
4146 
4147  llvm_unreachable("Implement any new match types added!");
4148 }
4149 
4150 /// ParseDirective parses the arm specific directives
4151 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4153  getContext().getObjectFileInfo()->getObjectFileType();
4154  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4155  bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4156 
4157  StringRef IDVal = DirectiveID.getIdentifier();
4158  SMLoc Loc = DirectiveID.getLoc();
4159  if (IDVal == ".arch")
4160  parseDirectiveArch(Loc);
4161  else if (IDVal == ".cpu")
4162  parseDirectiveCPU(Loc);
4163  else if (IDVal == ".hword")
4164  parseDirectiveWord(2, Loc);
4165  else if (IDVal == ".word")
4166  parseDirectiveWord(4, Loc);
4167  else if (IDVal == ".xword")
4168  parseDirectiveWord(8, Loc);
4169  else if (IDVal == ".tlsdesccall")
4170  parseDirectiveTLSDescCall(Loc);
4171  else if (IDVal == ".ltorg" || IDVal == ".pool")
4172  parseDirectiveLtorg(Loc);
4173  else if (IDVal == ".unreq")
4174  parseDirectiveUnreq(Loc);
4175  else if (!IsMachO && !IsCOFF) {
4176  if (IDVal == ".inst")
4177  parseDirectiveInst(Loc);
4178  else
4179  return true;
4180  } else if (IDVal == MCLOHDirectiveName())
4181  parseDirectiveLOH(IDVal, Loc);
4182  else
4183  return true;
4184  return false;
4185 }
4186 
4187 static const struct {
4188  const char *Name;
4190 } ExtensionMap[] = {
4191  { "crc", {AArch64::FeatureCRC} },
4192  { "crypto", {AArch64::FeatureCrypto} },
4193  { "fp", {AArch64::FeatureFPARMv8} },
4194  { "simd", {AArch64::FeatureNEON} },
4195  { "ras", {AArch64::FeatureRAS} },
4196  { "lse", {AArch64::FeatureLSE} },
4197 
4198  // FIXME: Unsupported extensions
4199  { "pan", {} },
4200  { "lor", {} },
4201  { "rdma", {} },
4202  { "profile", {} },
4203 };
4204 
4205 /// parseDirectiveArch
4206 /// ::= .arch token
4207 bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
4208  SMLoc ArchLoc = getLoc();
4209 
4210  StringRef Arch, ExtensionString;
4211  std::tie(Arch, ExtensionString) =
4212  getParser().parseStringToEndOfStatement().trim().split('+');
4213 
4214  unsigned ID = AArch64::parseArch(Arch);
4215  if (ID == static_cast<unsigned>(AArch64::ArchKind::AK_INVALID))
4216  return Error(ArchLoc, "unknown arch name");
4217 
4218  if (parseToken(AsmToken::EndOfStatement))
4219  return true;
4220 
4221  // Get the architecture and extension features.
4222  std::vector<StringRef> AArch64Features;
4223  AArch64::getArchFeatures(ID, AArch64Features);
4225  AArch64Features);
4226 
4227  MCSubtargetInfo &STI = copySTI();
4228  std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
4229  STI.setDefaultFeatures("generic", join(ArchFeatures.begin(), ArchFeatures.end(), ","));
4230 
4231  SmallVector<StringRef, 4> RequestedExtensions;
4232  if (!ExtensionString.empty())
4233  ExtensionString.split(RequestedExtensions, '+');
4234 
4236  for (auto Name : RequestedExtensions) {
4237  bool EnableFeature = true;
4238 
4239  if (Name.startswith_lower("no")) {
4240  EnableFeature = false;
4241  Name = Name.substr(2);
4242  }
4243 
4244  for (const auto &Extension : ExtensionMap) {
4245  if (Extension.Name != Name)
4246  continue;
4247 
4248  if (Extension.Features.none())
4249  report_fatal_error("unsupported architectural extension: " + Name);
4250 
4251  FeatureBitset ToggleFeatures = EnableFeature
4252  ? (~Features & Extension.Features)
4253  : ( Features & Extension.Features);
4254  uint64_t Features =
4255  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4256  setAvailableFeatures(Features);
4257  break;
4258  }
4259  }
4260  return false;
4261 }
4262 
4263 /// parseDirectiveCPU
4264 /// ::= .cpu id
4265 bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
4266  SMLoc CPULoc = getLoc();
4267 
4268  StringRef CPU, ExtensionString;
4269  std::tie(CPU, ExtensionString) =
4270  getParser().parseStringToEndOfStatement().trim().split('+');
4271 
4272  if (parseToken(AsmToken::EndOfStatement))
4273  return true;
4274 
4275  SmallVector<StringRef, 4> RequestedExtensions;
4276  if (!ExtensionString.empty())
4277  ExtensionString.split(RequestedExtensions, '+');
4278 
4279  // FIXME This is using tablegen data, but should be moved to ARMTargetParser
4280  // once that is tablegen'ed
4281  if (!getSTI().isCPUStringValid(CPU)) {
4282  Error(CPULoc, "unknown CPU name");
4283  return false;
4284  }
4285 
4286  MCSubtargetInfo &STI = copySTI();
4287  STI.setDefaultFeatures(CPU, "");
4288 
4289  FeatureBitset Features = STI.getFeatureBits();
4290  for (auto Name : RequestedExtensions) {
4291  bool EnableFeature = true;
4292 
4293  if (Name.startswith_lower("no")) {
4294  EnableFeature = false;
4295  Name = Name.substr(2);
4296  }
4297 
4298  for (const auto &Extension : ExtensionMap) {
4299  if (Extension.Name != Name)
4300  continue;
4301 
4302  if (Extension.Features.none())
4303  report_fatal_error("unsupported architectural extension: " + Name);
4304 
4305  FeatureBitset ToggleFeatures = EnableFeature
4306  ? (~Features & Extension.Features)
4307  : ( Features & Extension.Features);
4308  uint64_t Features =
4309  ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
4310  setAvailableFeatures(Features);
4311 
4312  break;
4313  }
4314  }
4315  return false;
4316 }
4317 
4318 /// parseDirectiveWord
4319 /// ::= .word [ expression (, expression)* ]
4320 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4321  auto parseOp = [&]() -> bool {
4322  const MCExpr *Value;
4323  if (getParser().parseExpression(Value))
4324  return true;
4325  getParser().getStreamer().EmitValue(Value, Size, L);
4326  return false;
4327  };
4328 
4329  if (parseMany(parseOp))
4330  return true;
4331  return false;
4332 }
4333 
4334 /// parseDirectiveInst
4335 /// ::= .inst opcode [, ...]
4336 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4337  if (getLexer().is(AsmToken::EndOfStatement))
4338  return Error(Loc, "expected expression following '.inst' directive");
4339 
4340  auto parseOp = [&]() -> bool {
4341  SMLoc L = getLoc();
4342  const MCExpr *Expr;
4343  if (check(getParser().parseExpression(Expr), L, "expected expression"))
4344  return true;
4345  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4346  if (check(!Value, L, "expected constant expression"))
4347  return true;
4348  getTargetStreamer().emitInst(Value->getValue());
4349  return false;
4350  };
4351 
4352  if (parseMany(parseOp))
4353  return addErrorSuffix(" in '.inst' directive");
4354  return false;
4355 }
4356 
4357 // parseDirectiveTLSDescCall:
4358 // ::= .tlsdesccall symbol
4359 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4360  StringRef Name;
4361  if (check(getParser().parseIdentifier(Name), L,
4362  "expected symbol after directive") ||
4363  parseToken(AsmToken::EndOfStatement))
4364  return true;
4365 
4366  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4367  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4368  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4369 
4370  MCInst Inst;
4371  Inst.setOpcode(AArch64::TLSDESCCALL);
4372  Inst.addOperand(MCOperand::createExpr(Expr));
4373 
4374  getParser().getStreamer().EmitInstruction(Inst, getSTI());
4375  return false;
4376 }
4377 
4378 /// ::= .loh <lohName | lohId> label1, ..., labelN
4379 /// The number of arguments depends on the loh identifier.
4380 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4381  MCLOHType Kind;
4382  if (getParser().getTok().isNot(AsmToken::Identifier)) {
4383  if (getParser().getTok().isNot(AsmToken::Integer))
4384  return TokError("expected an identifier or a number in directive");
4385  // We successfully get a numeric value for the identifier.
4386  // Check if it is valid.
4387  int64_t Id = getParser().getTok().getIntVal();
4388  if (Id <= -1U && !isValidMCLOHType(Id))
4389  return TokError("invalid numeric identifier in directive");
4390  Kind = (MCLOHType)Id;
4391  } else {
4392  StringRef Name = getTok().getIdentifier();
4393  // We successfully parse an identifier.
4394  // Check if it is a recognized one.
4395  int Id = MCLOHNameToId(Name);
4396 
4397  if (Id == -1)
4398  return TokError("invalid identifier in directive");
4399  Kind = (MCLOHType)Id;
4400  }
4401  // Consume the identifier.
4402  Lex();
4403  // Get the number of arguments of this LOH.
4404  int NbArgs = MCLOHIdToNbArgs(Kind);
4405 
4406  assert(NbArgs != -1 && "Invalid number of arguments");
4407 
4409  for (int Idx = 0; Idx < NbArgs; ++Idx) {
4410  StringRef Name;
4411  if (getParser().parseIdentifier(Name))
4412  return TokError("expected identifier in directive");
4413  Args.push_back(getContext().getOrCreateSymbol(Name));
4414 
4415  if (Idx + 1 == NbArgs)
4416  break;
4417  if (parseToken(AsmToken::Comma,
4418  "unexpected token in '" + Twine(IDVal) + "' directive"))
4419  return true;
4420  }
4421  if (parseToken(AsmToken::EndOfStatement,
4422  "unexpected token in '" + Twine(IDVal) + "' directive"))
4423  return true;
4424 
4425  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4426  return false;
4427 }
4428 
4429 /// parseDirectiveLtorg
4430 /// ::= .ltorg | .pool
4431 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4432  if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
4433  return true;
4434  getTargetStreamer().emitCurrentConstantPool();
4435  return false;
4436 }
4437 
4438 /// parseDirectiveReq
4439 /// ::= name .req registername
4440 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4441  MCAsmParser &Parser = getParser();
4442  Parser.Lex(); // Eat the '.req' token.
4443  SMLoc SRegLoc = getLoc();
4444  unsigned RegNum = tryParseRegister();
4445  bool IsVector = false;
4446 
4447  if (RegNum == static_cast<unsigned>(-1)) {
4448  StringRef Kind;
4449  RegNum = tryMatchVectorRegister(Kind, false);
4450  if (!Kind.empty())
4451  return Error(SRegLoc, "vector register without type specifier expected");
4452  IsVector = true;
4453  }
4454 
4455  if (RegNum == static_cast<unsigned>(-1))
4456  return Error(SRegLoc, "register name or alias expected");
4457 
4458  // Shouldn't be anything else.
4459  if (parseToken(AsmToken::EndOfStatement,
4460  "unexpected input in .req directive"))
4461  return true;
4462 
4463  auto pair = std::make_pair(IsVector, RegNum);
4464  if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4465  Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4466 
4467  return false;
4468 }
4469 
4470 /// parseDirectiveUneq
4471 /// ::= .unreq registername
4472 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4473  MCAsmParser &Parser = getParser();
4474  if (getTok().isNot(AsmToken::Identifier))
4475  return TokError("unexpected input in .unreq directive.");
4476  RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4477  Parser.Lex(); // Eat the identifier.
4478  if (parseToken(AsmToken::EndOfStatement))
4479  return addErrorSuffix("in '.unreq' directive");
4480  return false;
4481 }
4482 
4483 bool
4484 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4485  AArch64MCExpr::VariantKind &ELFRefKind,
4486  MCSymbolRefExpr::VariantKind &DarwinRefKind,
4487  int64_t &Addend) {
4488  ELFRefKind = AArch64MCExpr::VK_INVALID;
4489  DarwinRefKind = MCSymbolRefExpr::VK_None;
4490  Addend = 0;
4491 
4492  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4493  ELFRefKind = AE->getKind();
4494  Expr = AE->getSubExpr();
4495  }
4496 
4497  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4498  if (SE) {
4499  // It's a simple symbol reference with no addend.
4500  DarwinRefKind = SE->getKind();
4501  return true;
4502  }
4503 
4504  const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4505  if (!BE)
4506  return false;
4507 
4508  SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4509  if (!SE)
4510  return false;
4511  DarwinRefKind = SE->getKind();
4512 
4513  if (BE->getOpcode() != MCBinaryExpr::Add &&
4514  BE->getOpcode() != MCBinaryExpr::Sub)
4515  return false;
4516 
4517  // See if the addend is is a constant, otherwise there's more going
4518  // on here than we can deal with.
4519  auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4520  if (!AddendExpr)
4521  return false;
4522 
4523  Addend = AddendExpr->getValue();
4524  if (BE->getOpcode() == MCBinaryExpr::Sub)
4525  Addend = -Addend;
4526 
4527  // It's some symbol reference + a constant addend, but really
4528  // shouldn't use both Darwin and ELF syntax.
4529  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4530  DarwinRefKind == MCSymbolRefExpr::VK_None;
4531 }
4532 
4533 /// Force static initialization.
4538 }
4539 
4540 #define GET_REGISTER_MATCHER
4541 #define GET_SUBTARGET_FEATURE_NAME
4542 #define GET_MATCHER_IMPLEMENTATION
4543 #include "AArch64GenAsmMatcher.inc"
4544 
4545 // Define this matcher function after the auto-generated include so we
4546 // have the match class enum definitions.
4547 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4548  unsigned Kind) {
4549  AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4550  // If the kind is a token for a literal immediate, check if our asm
4551  // operand matches. This is for InstAliases which have a fixed-value
4552  // immediate in the syntax.
4553  int64_t ExpectedVal;
4554  switch (Kind) {
4555  default:
4556  return Match_InvalidOperand;
4557  case MCK__35_0:
4558  ExpectedVal = 0;
4559  break;
4560  case MCK__35_1:
4561  ExpectedVal = 1;
4562  break;
4563  case MCK__35_12:
4564  ExpectedVal = 12;
4565  break;
4566  case MCK__35_16:
4567  ExpectedVal = 16;
4568  break;
4569  case MCK__35_2:
4570  ExpectedVal = 2;
4571  break;
4572  case MCK__35_24:
4573  ExpectedVal = 24;
4574  break;
4575  case MCK__35_3:
4576  ExpectedVal = 3;
4577  break;
4578  case MCK__35_32:
4579  ExpectedVal = 32;
4580  break;
4581  case MCK__35_4:
4582  ExpectedVal = 4;
4583  break;
4584  case MCK__35_48:
4585  ExpectedVal = 48;
4586  break;
4587  case MCK__35_6:
4588  ExpectedVal = 6;
4589  break;
4590  case MCK__35_64:
4591  ExpectedVal = 64;
4592  break;
4593  case MCK__35_8:
4594  ExpectedVal = 8;
4595  break;
4596  }
4597  if (!Op.isImm())
4598  return Match_InvalidOperand;
4599  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4600  if (!CE)
4601  return Match_InvalidOperand;
4602  if (CE->getValue() == ExpectedVal)
4603  return Match_Success;
4604  return Match_InvalidOperand;
4605 }
4606 
4608 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4609 
4610  SMLoc S = getLoc();
4611 
4612  if (getParser().getTok().isNot(AsmToken::Identifier)) {
4613  Error(S, "expected register");
4614  return MatchOperand_ParseFail;
4615  }
4616 
4617  int FirstReg = tryParseRegister();
4618  if (FirstReg == -1) {
4619  return MatchOperand_ParseFail;
4620  }
4621  const MCRegisterClass &WRegClass =
4622  AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4623  const MCRegisterClass &XRegClass =
4624  AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4625 
4626  bool isXReg = XRegClass.contains(FirstReg),
4627  isWReg = WRegClass.contains(FirstReg);
4628  if (!isXReg && !isWReg) {
4629  Error(S, "expected first even register of a "
4630  "consecutive same-size even/odd register pair");
4631  return MatchOperand_ParseFail;
4632  }
4633 
4634  const MCRegisterInfo *RI = getContext().getRegisterInfo();
4635  unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4636 
4637  if (FirstEncoding & 0x1) {
4638  Error(S, "expected first even register of a "
4639  "consecutive same-size even/odd register pair");
4640  return MatchOperand_ParseFail;
4641  }
4642 
4643  SMLoc M = getLoc();
4644  if (getParser().getTok().isNot(AsmToken::Comma)) {
4645  Error(M, "expected comma");
4646  return MatchOperand_ParseFail;
4647  }
4648  // Eat the comma
4649  getParser().Lex();
4650 
4651  SMLoc E = getLoc();
4652  int SecondReg = tryParseRegister();
4653  if (SecondReg ==-1) {
4654  return MatchOperand_ParseFail;
4655  }
4656 
4657  if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4658  (isXReg && !XRegClass.contains(SecondReg)) ||
4659  (isWReg && !WRegClass.contains(SecondReg))) {
4660  Error(E,"expected second odd register of a "
4661  "consecutive same-size even/odd register pair");
4662  return MatchOperand_ParseFail;
4663  }
4664 
4665  unsigned Pair = 0;
4666  if (isXReg) {
4667  Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4668  &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4669  } else {
4670  Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4671  &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4672  }
4673 
4674  Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4675  getContext()));
4676 
4677  return MatchOperand_Success;
4678 }
MachineLoop * L
static bool isValidVectorKind(StringRef Name)
static bool isReg(const MCInst &MI, unsigned OpNo)
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:634
std::enable_if< std::numeric_limits< T >::is_signed, bool >::type getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:494
Represents a range in source code.
Definition: SMLoc.h:49
void push_back(const T &Elt)
Definition: SmallVector.h:211
Target & getTheAArch64beTarget()
LLVM_NODISCARD int compare_lower(StringRef RHS) const
compare_lower - Compare two strings, ignoring case.
Definition: StringRef.cpp:52
static float getFPImmFloat(unsigned Imm)
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1309
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:298
const char * getPointer() const
Definition: SMLoc.h:35
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
size_t i
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:39
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:66
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Target & getTheAArch64leTarget()
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:129
MCTargetAsmParser - Generic interface to target specific assembly parsers.
static CondCode getInvertedCondCode(CondCode Code)
Target specific streamer interface.
Definition: MCStreamer.h:73
LLVM_NODISCARD bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
Definition: StringRef.h:173
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmLexer.h:114
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
bool isNot(TokenKind K) const
Definition: MCAsmLexer.h:87
OperandMatchResultTy
static unsigned getXRegFromWReg(unsigned Reg)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
#define SYS_ALIAS(op1, Cn, Cm, op2)
void changeSign()
Definition: APFloat.h:975
virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
Definition: MCStreamer.cpp:765
return AArch64::GPR64RegClass contains(Reg)
bool isSubRegisterEq(unsigned RegA, unsigned RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:111
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
static F t[256]
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(const T &Value) const
Definition: StringSwitch.h:244
Reg
All possible values of the reg field in the ModR/M byte.
Target independent representation for an assembler token.
Definition: MCAsmLexer.h:25
APInt bitcastToAPInt() const
Definition: APFloat.h:1012
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:161
bool getExtensionFeatures(unsigned Extensions, std::vector< StringRef > &Features)
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
Definition: StringExtras.h:232
Windows NT (Windows on ARM)
Target & getTheARM64Target()
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:377
static bool isMem(const MachineInstr &MI, unsigned Op)
Definition: X86InstrInfo.h:135
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand...
This file implements a class to represent arbitrary precision integral constant values and operations...
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:60
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
Context object for machine code objects.
Definition: MCContext.h:51
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch & Case(const char(&S)[N], const T &Value)
Definition: StringSwitch.h:74
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:264
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:63
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: APInt.h:33
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:699
bool getArchFeatures(unsigned ArchKind, std::vector< StringRef > &Features)
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:842
int64_t getIntVal() const
Definition: MCAsmLexer.h:119
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Definition: StringRef.h:135
MCRegisterClass - Base class of TargetRegisterClass.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:33
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:141
const char * Name
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:150
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
static unsigned getWRegFromXReg(unsigned Reg)
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
FeatureBitset ToggleFeature(uint64_t FB)
ToggleFeature - Toggle a feature and returns the re-computed feature bits.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
const MCExpr * getExpr() const
Definition: MCInst.h:93
const MCExpr * getLHS() const
Get the left-hand side expression of the binary operator.
Definition: MCExpr.h:514
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
Streaming machine code generation interface.
Definition: MCStreamer.h:161
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:223
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
Definition: MathExtras.h:111
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:295
static const struct @283 ExtensionMap[]
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
int64_t getValue() const
Definition: MCExpr.h:147
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:28
static int MCLOHNameToId(StringRef Name)
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:587
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:24
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
virtual MCAsmLexer & getLexer()=0
This file declares a class to represent arbitrary precision floating point values and provide a varie...
LLVM_NODISCARD StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:825
MCLOHType
Linker Optimization Hint Type.
bool isExpr() const
Definition: MCInst.h:59
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang","erlang-compatible garbage collector")
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:212
Binary assembler expressions.
Definition: MCExpr.h:388
void setLoc(SMLoc loc)
Definition: MCInst.h:161
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg...
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
unsigned parseArch(StringRef Arch)
void setOpcode(unsigned Op)
Definition: MCInst.h:158
bool contains(unsigned Reg) const
contains - Return true if the specified register is included in this register class.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:843
bool isVector(MCInstrInfo const &MCII, MCInst const &MCI)
const FeatureBitset & getFeatureBits() const
getFeatureBits - Return the feature bits.
bool is(TokenKind K) const
Definition: MCAsmLexer.h:86
static StringRef MCLOHDirectiveName()
unsigned Log2_32(uint32_t Value)
Log2_32 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
Definition: MathExtras.h:513
StringMap - This is an unconventional map that is specialized for handling keys that are "strings"...
Definition: StringMap.h:223
unsigned getOpcode() const
Definition: MCInst.h:159
Class for arbitrary precision integers.
Definition: APInt.h:77
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:383
LLVM_NODISCARD std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:716
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
const SysReg * lookupSysRegByName(StringRef)
Base class for user error types.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:130
uint32_t parseGenericRegister(StringRef Name)
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static const fltSemantics & IEEEdouble()
Definition: APFloat.cpp:103
loop data Loop Data Prefetch
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:37
const MCExpr * getRHS() const
Get the right-hand side expression of the binary operator.
Definition: MCExpr.h:517
static bool isAdvSIMDModImmType10(uint64_t Imm)
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string...
Definition: MCAsmLexer.h:103
RegisterMCAsmParser - Helper template for registering a target specific assembly parser, for use in the target machine initialization function.
static const size_t npos
Definition: StringRef.h:51
StringRef getABIName() const
getABIName - If this returns a non-empty string this represents the textual name of the ABI that we w...
#define N
Opcode getOpcode() const
Get the kind of this binary expression.
Definition: MCExpr.h:511
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:135
static bool isValidMCLOHType(unsigned Kind)
MCSubtargetInfo - Generic base class for all target subtargets.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:287
const unsigned Kind
VariantKind getKind() const
Definition: MCExpr.h:313
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static int MCLOHIdToNbArgs(MCLOHType Kind)
uint16_t getEncodingValue(unsigned RegNo) const
Returns the encoding for RegNo.
LLVM Value Representation.
Definition: Value.h:71
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:239
const FeatureBitset Features
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:81
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:44
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:125
static TraceState * TS
Subtraction.
Definition: MCExpr.h:412
std::string Hash(const Unit &U)
Definition: FuzzerSHA1.cpp:216
void addOperand(const MCOperand &Op)
Definition: MCInst.h:168
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:47
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
unsigned getDefaultExtensions(StringRef CPU, unsigned ArchKind)
Represents a location in source code.
Definition: SMLoc.h:24
static const char * getSubtargetFeatureName(uint64_t Val)
LLVM_NODISCARD bool startswith_lower(StringRef Prefix) const
Check if this string starts with the given Prefix, ignoring case.
Definition: StringRef.cpp:61
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:117
LLVM_NODISCARD std::string lower() const
Definition: StringRef.cpp:122
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx)
Definition: MCExpr.cpp:149
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:164
static void parseValidVectorKind(StringRef Name, unsigned &NumElements, char &ElementKind)
void LLVMInitializeAArch64AsmParser()
Force static initialization.
void setDefaultFeatures(StringRef CPU, StringRef FS)
Set the features to the default for the given CPU with an appended feature string.