LLVM  3.7.0
AArch64AsmParser.cpp
Go to the documentation of this file.
1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
30 #include "llvm/MC/MCSymbol.h"
33 #include "llvm/Support/SourceMgr.h"
36 #include <cstdio>
37 using namespace llvm;
38 
39 namespace {
40 
41 class AArch64Operand;
42 
43 class AArch64AsmParser : public MCTargetAsmParser {
44 private:
45  StringRef Mnemonic; ///< Instruction mnemonic.
46  MCSubtargetInfo &STI;
47 
48  // Map of register aliases registers via the .req directive.
50 
51  AArch64TargetStreamer &getTargetStreamer() {
52  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
53  return static_cast<AArch64TargetStreamer &>(TS);
54  }
55 
56  SMLoc getLoc() const { return getParser().getTok().getLoc(); }
57 
58  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
59  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
60  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
61  unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
62  int tryParseRegister();
63  int tryMatchVectorRegister(StringRef &Kind, bool expected);
64  bool parseRegister(OperandVector &Operands);
65  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
66  bool parseVectorList(OperandVector &Operands);
67  bool parseOperand(OperandVector &Operands, bool isCondCode,
68  bool invertCondCode);
69 
70  void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
71  bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
72  bool showMatchError(SMLoc Loc, unsigned ErrCode);
73 
74  bool parseDirectiveWord(unsigned Size, SMLoc L);
75  bool parseDirectiveInst(SMLoc L);
76 
77  bool parseDirectiveTLSDescCall(SMLoc L);
78 
79  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
80  bool parseDirectiveLtorg(SMLoc L);
81 
82  bool parseDirectiveReq(StringRef Name, SMLoc L);
83  bool parseDirectiveUnreq(SMLoc L);
84 
85  bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
86  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
88  uint64_t &ErrorInfo,
89  bool MatchingInlineAsm) override;
90 /// @name Auto-generated Match Functions
91 /// {
92 
93 #define GET_ASSEMBLER_HEADER
94 #include "AArch64GenAsmMatcher.inc"
95 
96  /// }
97 
98  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
99  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
100  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
101  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
102  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
103  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
104  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
105  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
106  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
107  OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
108  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
109  bool tryParseVectorRegister(OperandVector &Operands);
110  OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
111 
112 public:
113  enum AArch64MatchResultTy {
114  Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
115 #define GET_OPERAND_DIAGNOSTIC_TYPES
116 #include "AArch64GenAsmMatcher.inc"
117  };
118  AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
119  const MCInstrInfo &MII, const MCTargetOptions &Options)
120  : MCTargetAsmParser(), STI(STI) {
122  MCStreamer &S = getParser().getStreamer();
123  if (S.getTargetStreamer() == nullptr)
124  new AArch64TargetStreamer(S);
125 
126  // Initialize the set of available features.
127  setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
128  }
129 
130  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
131  SMLoc NameLoc, OperandVector &Operands) override;
132  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
133  bool ParseDirective(AsmToken DirectiveID) override;
134  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
135  unsigned Kind) override;
136 
137  static bool classifySymbolRef(const MCExpr *Expr,
138  AArch64MCExpr::VariantKind &ELFRefKind,
139  MCSymbolRefExpr::VariantKind &DarwinRefKind,
140  int64_t &Addend);
141 };
142 } // end anonymous namespace
143 
144 namespace {
145 
146 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
147 /// instruction.
148 class AArch64Operand : public MCParsedAsmOperand {
149 private:
150  enum KindTy {
151  k_Immediate,
152  k_ShiftedImm,
153  k_CondCode,
154  k_Register,
155  k_VectorList,
156  k_VectorIndex,
157  k_Token,
158  k_SysReg,
159  k_SysCR,
160  k_Prefetch,
161  k_ShiftExtend,
162  k_FPImm,
163  k_Barrier
164  } Kind;
165 
166  SMLoc StartLoc, EndLoc;
167 
168  struct TokOp {
169  const char *Data;
170  unsigned Length;
171  bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
172  };
173 
174  struct RegOp {
175  unsigned RegNum;
176  bool isVector;
177  };
178 
179  struct VectorListOp {
180  unsigned RegNum;
181  unsigned Count;
182  unsigned NumElements;
183  unsigned ElementKind;
184  };
185 
186  struct VectorIndexOp {
187  unsigned Val;
188  };
189 
190  struct ImmOp {
191  const MCExpr *Val;
192  };
193 
194  struct ShiftedImmOp {
195  const MCExpr *Val;
196  unsigned ShiftAmount;
197  };
198 
199  struct CondCodeOp {
201  };
202 
203  struct FPImmOp {
204  unsigned Val; // Encoded 8-bit representation.
205  };
206 
207  struct BarrierOp {
208  unsigned Val; // Not the enum since not all values have names.
209  const char *Data;
210  unsigned Length;
211  };
212 
213  struct SysRegOp {
214  const char *Data;
215  unsigned Length;
216  uint32_t MRSReg;
217  uint32_t MSRReg;
218  uint32_t PStateField;
219  };
220 
221  struct SysCRImmOp {
222  unsigned Val;
223  };
224 
225  struct PrefetchOp {
226  unsigned Val;
227  const char *Data;
228  unsigned Length;
229  };
230 
231  struct ShiftExtendOp {
233  unsigned Amount;
234  bool HasExplicitAmount;
235  };
236 
237  struct ExtendOp {
238  unsigned Val;
239  };
240 
241  union {
242  struct TokOp Tok;
243  struct RegOp Reg;
244  struct VectorListOp VectorList;
245  struct VectorIndexOp VectorIndex;
246  struct ImmOp Imm;
247  struct ShiftedImmOp ShiftedImm;
248  struct CondCodeOp CondCode;
249  struct FPImmOp FPImm;
250  struct BarrierOp Barrier;
251  struct SysRegOp SysReg;
252  struct SysCRImmOp SysCRImm;
253  struct PrefetchOp Prefetch;
254  struct ShiftExtendOp ShiftExtend;
255  };
256 
257  // Keep the MCContext around as the MCExprs may need manipulated during
258  // the add<>Operands() calls.
259  MCContext &Ctx;
260 
261 public:
262  AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
263 
264  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
265  Kind = o.Kind;
266  StartLoc = o.StartLoc;
267  EndLoc = o.EndLoc;
268  switch (Kind) {
269  case k_Token:
270  Tok = o.Tok;
271  break;
272  case k_Immediate:
273  Imm = o.Imm;
274  break;
275  case k_ShiftedImm:
276  ShiftedImm = o.ShiftedImm;
277  break;
278  case k_CondCode:
279  CondCode = o.CondCode;
280  break;
281  case k_FPImm:
282  FPImm = o.FPImm;
283  break;
284  case k_Barrier:
285  Barrier = o.Barrier;
286  break;
287  case k_Register:
288  Reg = o.Reg;
289  break;
290  case k_VectorList:
291  VectorList = o.VectorList;
292  break;
293  case k_VectorIndex:
294  VectorIndex = o.VectorIndex;
295  break;
296  case k_SysReg:
297  SysReg = o.SysReg;
298  break;
299  case k_SysCR:
300  SysCRImm = o.SysCRImm;
301  break;
302  case k_Prefetch:
303  Prefetch = o.Prefetch;
304  break;
305  case k_ShiftExtend:
306  ShiftExtend = o.ShiftExtend;
307  break;
308  }
309  }
310 
311  /// getStartLoc - Get the location of the first token of this operand.
312  SMLoc getStartLoc() const override { return StartLoc; }
313  /// getEndLoc - Get the location of the last token of this operand.
314  SMLoc getEndLoc() const override { return EndLoc; }
315 
316  StringRef getToken() const {
317  assert(Kind == k_Token && "Invalid access!");
318  return StringRef(Tok.Data, Tok.Length);
319  }
320 
321  bool isTokenSuffix() const {
322  assert(Kind == k_Token && "Invalid access!");
323  return Tok.IsSuffix;
324  }
325 
326  const MCExpr *getImm() const {
327  assert(Kind == k_Immediate && "Invalid access!");
328  return Imm.Val;
329  }
330 
331  const MCExpr *getShiftedImmVal() const {
332  assert(Kind == k_ShiftedImm && "Invalid access!");
333  return ShiftedImm.Val;
334  }
335 
336  unsigned getShiftedImmShift() const {
337  assert(Kind == k_ShiftedImm && "Invalid access!");
338  return ShiftedImm.ShiftAmount;
339  }
340 
341  AArch64CC::CondCode getCondCode() const {
342  assert(Kind == k_CondCode && "Invalid access!");
343  return CondCode.Code;
344  }
345 
346  unsigned getFPImm() const {
347  assert(Kind == k_FPImm && "Invalid access!");
348  return FPImm.Val;
349  }
350 
351  unsigned getBarrier() const {
352  assert(Kind == k_Barrier && "Invalid access!");
353  return Barrier.Val;
354  }
355 
356  StringRef getBarrierName() const {
357  assert(Kind == k_Barrier && "Invalid access!");
358  return StringRef(Barrier.Data, Barrier.Length);
359  }
360 
361  unsigned getReg() const override {
362  assert(Kind == k_Register && "Invalid access!");
363  return Reg.RegNum;
364  }
365 
366  unsigned getVectorListStart() const {
367  assert(Kind == k_VectorList && "Invalid access!");
368  return VectorList.RegNum;
369  }
370 
371  unsigned getVectorListCount() const {
372  assert(Kind == k_VectorList && "Invalid access!");
373  return VectorList.Count;
374  }
375 
376  unsigned getVectorIndex() const {
377  assert(Kind == k_VectorIndex && "Invalid access!");
378  return VectorIndex.Val;
379  }
380 
381  StringRef getSysReg() const {
382  assert(Kind == k_SysReg && "Invalid access!");
383  return StringRef(SysReg.Data, SysReg.Length);
384  }
385 
386  unsigned getSysCR() const {
387  assert(Kind == k_SysCR && "Invalid access!");
388  return SysCRImm.Val;
389  }
390 
391  unsigned getPrefetch() const {
392  assert(Kind == k_Prefetch && "Invalid access!");
393  return Prefetch.Val;
394  }
395 
396  StringRef getPrefetchName() const {
397  assert(Kind == k_Prefetch && "Invalid access!");
398  return StringRef(Prefetch.Data, Prefetch.Length);
399  }
400 
401  AArch64_AM::ShiftExtendType getShiftExtendType() const {
402  assert(Kind == k_ShiftExtend && "Invalid access!");
403  return ShiftExtend.Type;
404  }
405 
406  unsigned getShiftExtendAmount() const {
407  assert(Kind == k_ShiftExtend && "Invalid access!");
408  return ShiftExtend.Amount;
409  }
410 
411  bool hasShiftExtendAmount() const {
412  assert(Kind == k_ShiftExtend && "Invalid access!");
413  return ShiftExtend.HasExplicitAmount;
414  }
415 
416  bool isImm() const override { return Kind == k_Immediate; }
417  bool isMem() const override { return false; }
418  bool isSImm9() const {
419  if (!isImm())
420  return false;
421  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
422  if (!MCE)
423  return false;
424  int64_t Val = MCE->getValue();
425  return (Val >= -256 && Val < 256);
426  }
427  bool isSImm7s4() const {
428  if (!isImm())
429  return false;
430  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
431  if (!MCE)
432  return false;
433  int64_t Val = MCE->getValue();
434  return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
435  }
436  bool isSImm7s8() const {
437  if (!isImm())
438  return false;
439  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
440  if (!MCE)
441  return false;
442  int64_t Val = MCE->getValue();
443  return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
444  }
445  bool isSImm7s16() const {
446  if (!isImm())
447  return false;
448  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
449  if (!MCE)
450  return false;
451  int64_t Val = MCE->getValue();
452  return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
453  }
454 
455  bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
456  AArch64MCExpr::VariantKind ELFRefKind;
457  MCSymbolRefExpr::VariantKind DarwinRefKind;
458  int64_t Addend;
459  if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
460  Addend)) {
461  // If we don't understand the expression, assume the best and
462  // let the fixup and relocation code deal with it.
463  return true;
464  }
465 
466  if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
467  ELFRefKind == AArch64MCExpr::VK_LO12 ||
468  ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
469  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
470  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
471  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
472  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
473  ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
474  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
475  // Note that we don't range-check the addend. It's adjusted modulo page
476  // size when converted, so there is no "out of range" condition when using
477  // @pageoff.
478  return Addend >= 0 && (Addend % Scale) == 0;
479  } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
480  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
481  // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
482  return Addend == 0;
483  }
484 
485  return false;
486  }
487 
488  template <int Scale> bool isUImm12Offset() const {
489  if (!isImm())
490  return false;
491 
492  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
493  if (!MCE)
494  return isSymbolicUImm12Offset(getImm(), Scale);
495 
496  int64_t Val = MCE->getValue();
497  return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
498  }
499 
500  bool isImm0_7() const {
501  if (!isImm())
502  return false;
503  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
504  if (!MCE)
505  return false;
506  int64_t Val = MCE->getValue();
507  return (Val >= 0 && Val < 8);
508  }
509  bool isImm1_8() const {
510  if (!isImm())
511  return false;
512  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
513  if (!MCE)
514  return false;
515  int64_t Val = MCE->getValue();
516  return (Val > 0 && Val < 9);
517  }
518  bool isImm0_15() const {
519  if (!isImm())
520  return false;
521  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
522  if (!MCE)
523  return false;
524  int64_t Val = MCE->getValue();
525  return (Val >= 0 && Val < 16);
526  }
527  bool isImm1_16() const {
528  if (!isImm())
529  return false;
530  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
531  if (!MCE)
532  return false;
533  int64_t Val = MCE->getValue();
534  return (Val > 0 && Val < 17);
535  }
536  bool isImm0_31() const {
537  if (!isImm())
538  return false;
539  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
540  if (!MCE)
541  return false;
542  int64_t Val = MCE->getValue();
543  return (Val >= 0 && Val < 32);
544  }
545  bool isImm1_31() const {
546  if (!isImm())
547  return false;
548  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
549  if (!MCE)
550  return false;
551  int64_t Val = MCE->getValue();
552  return (Val >= 1 && Val < 32);
553  }
554  bool isImm1_32() const {
555  if (!isImm())
556  return false;
557  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
558  if (!MCE)
559  return false;
560  int64_t Val = MCE->getValue();
561  return (Val >= 1 && Val < 33);
562  }
563  bool isImm0_63() const {
564  if (!isImm())
565  return false;
566  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
567  if (!MCE)
568  return false;
569  int64_t Val = MCE->getValue();
570  return (Val >= 0 && Val < 64);
571  }
572  bool isImm1_63() const {
573  if (!isImm())
574  return false;
575  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
576  if (!MCE)
577  return false;
578  int64_t Val = MCE->getValue();
579  return (Val >= 1 && Val < 64);
580  }
581  bool isImm1_64() const {
582  if (!isImm())
583  return false;
584  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
585  if (!MCE)
586  return false;
587  int64_t Val = MCE->getValue();
588  return (Val >= 1 && Val < 65);
589  }
590  bool isImm0_127() const {
591  if (!isImm())
592  return false;
593  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
594  if (!MCE)
595  return false;
596  int64_t Val = MCE->getValue();
597  return (Val >= 0 && Val < 128);
598  }
599  bool isImm0_255() const {
600  if (!isImm())
601  return false;
602  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
603  if (!MCE)
604  return false;
605  int64_t Val = MCE->getValue();
606  return (Val >= 0 && Val < 256);
607  }
608  bool isImm0_65535() const {
609  if (!isImm())
610  return false;
611  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
612  if (!MCE)
613  return false;
614  int64_t Val = MCE->getValue();
615  return (Val >= 0 && Val < 65536);
616  }
617  bool isImm32_63() const {
618  if (!isImm())
619  return false;
620  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
621  if (!MCE)
622  return false;
623  int64_t Val = MCE->getValue();
624  return (Val >= 32 && Val < 64);
625  }
626  bool isLogicalImm32() const {
627  if (!isImm())
628  return false;
629  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
630  if (!MCE)
631  return false;
632  int64_t Val = MCE->getValue();
633  if (Val >> 32 != 0 && Val >> 32 != ~0LL)
634  return false;
635  Val &= 0xFFFFFFFF;
636  return AArch64_AM::isLogicalImmediate(Val, 32);
637  }
638  bool isLogicalImm64() const {
639  if (!isImm())
640  return false;
641  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
642  if (!MCE)
643  return false;
644  return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
645  }
646  bool isLogicalImm32Not() const {
647  if (!isImm())
648  return false;
649  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
650  if (!MCE)
651  return false;
652  int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
653  return AArch64_AM::isLogicalImmediate(Val, 32);
654  }
655  bool isLogicalImm64Not() const {
656  if (!isImm())
657  return false;
658  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
659  if (!MCE)
660  return false;
661  return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
662  }
663  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
664  bool isAddSubImm() const {
665  if (!isShiftedImm() && !isImm())
666  return false;
667 
668  const MCExpr *Expr;
669 
670  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
671  if (isShiftedImm()) {
672  unsigned Shift = ShiftedImm.ShiftAmount;
673  Expr = ShiftedImm.Val;
674  if (Shift != 0 && Shift != 12)
675  return false;
676  } else {
677  Expr = getImm();
678  }
679 
680  AArch64MCExpr::VariantKind ELFRefKind;
681  MCSymbolRefExpr::VariantKind DarwinRefKind;
682  int64_t Addend;
683  if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
684  DarwinRefKind, Addend)) {
685  return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
686  || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
687  || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
688  || ELFRefKind == AArch64MCExpr::VK_LO12
689  || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
690  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
691  || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
692  || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
693  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
694  || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
695  || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
696  }
697 
698  // Otherwise it should be a real immediate in range:
699  const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
700  return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
701  }
702  bool isAddSubImmNeg() const {
703  if (!isShiftedImm() && !isImm())
704  return false;
705 
706  const MCExpr *Expr;
707 
708  // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
709  if (isShiftedImm()) {
710  unsigned Shift = ShiftedImm.ShiftAmount;
711  Expr = ShiftedImm.Val;
712  if (Shift != 0 && Shift != 12)
713  return false;
714  } else
715  Expr = getImm();
716 
717  // Otherwise it should be a real negative immediate in range:
718  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
719  return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
720  }
721  bool isCondCode() const { return Kind == k_CondCode; }
722  bool isSIMDImmType10() const {
723  if (!isImm())
724  return false;
725  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
726  if (!MCE)
727  return false;
729  }
730  bool isBranchTarget26() const {
731  if (!isImm())
732  return false;
733  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
734  if (!MCE)
735  return true;
736  int64_t Val = MCE->getValue();
737  if (Val & 0x3)
738  return false;
739  return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
740  }
741  bool isPCRelLabel19() const {
742  if (!isImm())
743  return false;
744  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
745  if (!MCE)
746  return true;
747  int64_t Val = MCE->getValue();
748  if (Val & 0x3)
749  return false;
750  return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
751  }
752  bool isBranchTarget14() const {
753  if (!isImm())
754  return false;
755  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
756  if (!MCE)
757  return true;
758  int64_t Val = MCE->getValue();
759  if (Val & 0x3)
760  return false;
761  return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
762  }
763 
764  bool
765  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
766  if (!isImm())
767  return false;
768 
769  AArch64MCExpr::VariantKind ELFRefKind;
770  MCSymbolRefExpr::VariantKind DarwinRefKind;
771  int64_t Addend;
772  if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
773  DarwinRefKind, Addend)) {
774  return false;
775  }
776  if (DarwinRefKind != MCSymbolRefExpr::VK_None)
777  return false;
778 
779  for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
780  if (ELFRefKind == AllowedModifiers[i])
781  return Addend == 0;
782  }
783 
784  return false;
785  }
786 
787  bool isMovZSymbolG3() const {
788  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
789  }
790 
791  bool isMovZSymbolG2() const {
795  }
796 
797  bool isMovZSymbolG1() const {
798  return isMovWSymbol({
802  });
803  }
804 
805  bool isMovZSymbolG0() const {
809  }
810 
811  bool isMovKSymbolG3() const {
812  return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
813  }
814 
815  bool isMovKSymbolG2() const {
816  return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
817  }
818 
819  bool isMovKSymbolG1() const {
820  return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
823  }
824 
825  bool isMovKSymbolG0() const {
826  return isMovWSymbol(
829  }
830 
831  template<int RegWidth, int Shift>
832  bool isMOVZMovAlias() const {
833  if (!isImm()) return false;
834 
835  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836  if (!CE) return false;
837  uint64_t Value = CE->getValue();
838 
839  if (RegWidth == 32)
840  Value &= 0xffffffffULL;
841 
842  // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
843  if (Value == 0 && Shift != 0)
844  return false;
845 
846  return (Value & ~(0xffffULL << Shift)) == 0;
847  }
848 
849  template<int RegWidth, int Shift>
850  bool isMOVNMovAlias() const {
851  if (!isImm()) return false;
852 
853  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
854  if (!CE) return false;
855  uint64_t Value = CE->getValue();
856 
857  // MOVZ takes precedence over MOVN.
858  for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
859  if ((Value & ~(0xffffULL << MOVZShift)) == 0)
860  return false;
861 
862  Value = ~Value;
863  if (RegWidth == 32)
864  Value &= 0xffffffffULL;
865 
866  return (Value & ~(0xffffULL << Shift)) == 0;
867  }
868 
869  bool isFPImm() const { return Kind == k_FPImm; }
870  bool isBarrier() const { return Kind == k_Barrier; }
871  bool isSysReg() const { return Kind == k_SysReg; }
872  bool isMRSSystemRegister() const {
873  if (!isSysReg()) return false;
874 
875  return SysReg.MRSReg != -1U;
876  }
877  bool isMSRSystemRegister() const {
878  if (!isSysReg()) return false;
879 
880  return SysReg.MSRReg != -1U;
881  }
882  bool isSystemPStateField() const {
883  if (!isSysReg()) return false;
884 
885  return SysReg.PStateField != -1U;
886  }
887  bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
888  bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
889  bool isVectorRegLo() const {
890  return Kind == k_Register && Reg.isVector &&
891  AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
892  Reg.RegNum);
893  }
894  bool isGPR32as64() const {
895  return Kind == k_Register && !Reg.isVector &&
896  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
897  }
898  bool isWSeqPair() const {
899  return Kind == k_Register && !Reg.isVector &&
900  AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
901  Reg.RegNum);
902  }
903  bool isXSeqPair() const {
904  return Kind == k_Register && !Reg.isVector &&
905  AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
906  Reg.RegNum);
907  }
908 
909  bool isGPR64sp0() const {
910  return Kind == k_Register && !Reg.isVector &&
911  AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
912  }
913 
914  /// Is this a vector list with the type implicit (presumably attached to the
915  /// instruction itself)?
916  template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
917  return Kind == k_VectorList && VectorList.Count == NumRegs &&
918  !VectorList.ElementKind;
919  }
920 
921  template <unsigned NumRegs, unsigned NumElements, char ElementKind>
922  bool isTypedVectorList() const {
923  if (Kind != k_VectorList)
924  return false;
925  if (VectorList.Count != NumRegs)
926  return false;
927  if (VectorList.ElementKind != ElementKind)
928  return false;
929  return VectorList.NumElements == NumElements;
930  }
931 
932  bool isVectorIndex1() const {
933  return Kind == k_VectorIndex && VectorIndex.Val == 1;
934  }
935  bool isVectorIndexB() const {
936  return Kind == k_VectorIndex && VectorIndex.Val < 16;
937  }
938  bool isVectorIndexH() const {
939  return Kind == k_VectorIndex && VectorIndex.Val < 8;
940  }
941  bool isVectorIndexS() const {
942  return Kind == k_VectorIndex && VectorIndex.Val < 4;
943  }
944  bool isVectorIndexD() const {
945  return Kind == k_VectorIndex && VectorIndex.Val < 2;
946  }
947  bool isToken() const override { return Kind == k_Token; }
948  bool isTokenEqual(StringRef Str) const {
949  return Kind == k_Token && getToken() == Str;
950  }
951  bool isSysCR() const { return Kind == k_SysCR; }
952  bool isPrefetch() const { return Kind == k_Prefetch; }
953  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
954  bool isShifter() const {
955  if (!isShiftExtend())
956  return false;
957 
958  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
959  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
960  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
961  ST == AArch64_AM::MSL);
962  }
963  bool isExtend() const {
964  if (!isShiftExtend())
965  return false;
966 
967  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
968  return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
969  ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
970  ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
971  ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
972  ET == AArch64_AM::LSL) &&
973  getShiftExtendAmount() <= 4;
974  }
975 
976  bool isExtend64() const {
977  if (!isExtend())
978  return false;
979  // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
980  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
981  return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
982  }
983  bool isExtendLSL64() const {
984  if (!isExtend())
985  return false;
986  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
987  return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
988  ET == AArch64_AM::LSL) &&
989  getShiftExtendAmount() <= 4;
990  }
991 
992  template<int Width> bool isMemXExtend() const {
993  if (!isExtend())
994  return false;
995  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
996  return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
997  (getShiftExtendAmount() == Log2_32(Width / 8) ||
998  getShiftExtendAmount() == 0);
999  }
1000 
1001  template<int Width> bool isMemWExtend() const {
1002  if (!isExtend())
1003  return false;
1004  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1005  return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1006  (getShiftExtendAmount() == Log2_32(Width / 8) ||
1007  getShiftExtendAmount() == 0);
1008  }
1009 
1010  template <unsigned width>
1011  bool isArithmeticShifter() const {
1012  if (!isShifter())
1013  return false;
1014 
1015  // An arithmetic shifter is LSL, LSR, or ASR.
1016  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1017  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1018  ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1019  }
1020 
1021  template <unsigned width>
1022  bool isLogicalShifter() const {
1023  if (!isShifter())
1024  return false;
1025 
1026  // A logical shifter is LSL, LSR, ASR or ROR.
1027  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1028  return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1029  ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1030  getShiftExtendAmount() < width;
1031  }
1032 
1033  bool isMovImm32Shifter() const {
1034  if (!isShifter())
1035  return false;
1036 
1037  // A MOVi shifter is LSL of 0, 16, 32, or 48.
1038  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1039  if (ST != AArch64_AM::LSL)
1040  return false;
1041  uint64_t Val = getShiftExtendAmount();
1042  return (Val == 0 || Val == 16);
1043  }
1044 
1045  bool isMovImm64Shifter() const {
1046  if (!isShifter())
1047  return false;
1048 
1049  // A MOVi shifter is LSL of 0 or 16.
1050  AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1051  if (ST != AArch64_AM::LSL)
1052  return false;
1053  uint64_t Val = getShiftExtendAmount();
1054  return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1055  }
1056 
1057  bool isLogicalVecShifter() const {
1058  if (!isShifter())
1059  return false;
1060 
1061  // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1062  unsigned Shift = getShiftExtendAmount();
1063  return getShiftExtendType() == AArch64_AM::LSL &&
1064  (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1065  }
1066 
1067  bool isLogicalVecHalfWordShifter() const {
1068  if (!isLogicalVecShifter())
1069  return false;
1070 
1071  // A logical vector shifter is a left shift by 0 or 8.
1072  unsigned Shift = getShiftExtendAmount();
1073  return getShiftExtendType() == AArch64_AM::LSL &&
1074  (Shift == 0 || Shift == 8);
1075  }
1076 
1077  bool isMoveVecShifter() const {
1078  if (!isShiftExtend())
1079  return false;
1080 
1081  // A logical vector shifter is a left shift by 8 or 16.
1082  unsigned Shift = getShiftExtendAmount();
1083  return getShiftExtendType() == AArch64_AM::MSL &&
1084  (Shift == 8 || Shift == 16);
1085  }
1086 
1087  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1088  // to LDUR/STUR when the offset is not legal for the former but is for
1089  // the latter. As such, in addition to checking for being a legal unscaled
1090  // address, also check that it is not a legal scaled address. This avoids
1091  // ambiguity in the matcher.
1092  template<int Width>
1093  bool isSImm9OffsetFB() const {
1094  return isSImm9() && !isUImm12Offset<Width / 8>();
1095  }
1096 
1097  bool isAdrpLabel() const {
1098  // Validation was handled during parsing, so we just sanity check that
1099  // something didn't go haywire.
1100  if (!isImm())
1101  return false;
1102 
1103  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1104  int64_t Val = CE->getValue();
1105  int64_t Min = - (4096 * (1LL << (21 - 1)));
1106  int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1107  return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1108  }
1109 
1110  return true;
1111  }
1112 
1113  bool isAdrLabel() const {
1114  // Validation was handled during parsing, so we just sanity check that
1115  // something didn't go haywire.
1116  if (!isImm())
1117  return false;
1118 
1119  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1120  int64_t Val = CE->getValue();
1121  int64_t Min = - (1LL << (21 - 1));
1122  int64_t Max = ((1LL << (21 - 1)) - 1);
1123  return Val >= Min && Val <= Max;
1124  }
1125 
1126  return true;
1127  }
1128 
1129  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1130  // Add as immediates when possible. Null MCExpr = 0.
1131  if (!Expr)
1133  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1135  else
1136  Inst.addOperand(MCOperand::createExpr(Expr));
1137  }
1138 
1139  void addRegOperands(MCInst &Inst, unsigned N) const {
1140  assert(N == 1 && "Invalid number of operands!");
1142  }
1143 
1144  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1145  assert(N == 1 && "Invalid number of operands!");
1146  assert(
1147  AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1148 
1149  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1150  uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1151  RI->getEncodingValue(getReg()));
1152 
1153  Inst.addOperand(MCOperand::createReg(Reg));
1154  }
1155 
1156  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1157  assert(N == 1 && "Invalid number of operands!");
1158  assert(
1159  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1160  Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1161  }
1162 
1163  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1164  assert(N == 1 && "Invalid number of operands!");
1165  assert(
1166  AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1168  }
1169 
1170  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1171  assert(N == 1 && "Invalid number of operands!");
1173  }
1174 
1175  template <unsigned NumRegs>
1176  void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1177  assert(N == 1 && "Invalid number of operands!");
1178  static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1179  AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1180  unsigned FirstReg = FirstRegs[NumRegs - 1];
1181 
1182  Inst.addOperand(
1183  MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1184  }
1185 
1186  template <unsigned NumRegs>
1187  void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1188  assert(N == 1 && "Invalid number of operands!");
1189  static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1190  AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1191  unsigned FirstReg = FirstRegs[NumRegs - 1];
1192 
1193  Inst.addOperand(
1194  MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1195  }
1196 
1197  void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1198  assert(N == 1 && "Invalid number of operands!");
1199  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1200  }
1201 
1202  void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1203  assert(N == 1 && "Invalid number of operands!");
1204  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1205  }
1206 
1207  void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1208  assert(N == 1 && "Invalid number of operands!");
1209  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1210  }
1211 
1212  void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1213  assert(N == 1 && "Invalid number of operands!");
1214  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1215  }
1216 
1217  void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1218  assert(N == 1 && "Invalid number of operands!");
1219  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1220  }
1221 
1222  void addImmOperands(MCInst &Inst, unsigned N) const {
1223  assert(N == 1 && "Invalid number of operands!");
1224  // If this is a pageoff symrefexpr with an addend, adjust the addend
1225  // to be only the page-offset portion. Otherwise, just add the expr
1226  // as-is.
1227  addExpr(Inst, getImm());
1228  }
1229 
1230  void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1231  assert(N == 2 && "Invalid number of operands!");
1232  if (isShiftedImm()) {
1233  addExpr(Inst, getShiftedImmVal());
1234  Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1235  } else {
1236  addExpr(Inst, getImm());
1238  }
1239  }
1240 
1241  void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1242  assert(N == 2 && "Invalid number of operands!");
1243 
1244  const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1245  const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1246  int64_t Val = -CE->getValue();
1247  unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1248 
1249  Inst.addOperand(MCOperand::createImm(Val));
1250  Inst.addOperand(MCOperand::createImm(ShiftAmt));
1251  }
1252 
1253  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1254  assert(N == 1 && "Invalid number of operands!");
1255  Inst.addOperand(MCOperand::createImm(getCondCode()));
1256  }
1257 
1258  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1259  assert(N == 1 && "Invalid number of operands!");
1260  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1261  if (!MCE)
1262  addExpr(Inst, getImm());
1263  else
1264  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1265  }
1266 
1267  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1268  addImmOperands(Inst, N);
1269  }
1270 
1271  template<int Scale>
1272  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1273  assert(N == 1 && "Invalid number of operands!");
1274  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1275 
1276  if (!MCE) {
1277  Inst.addOperand(MCOperand::createExpr(getImm()));
1278  return;
1279  }
1280  Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1281  }
1282 
1283  void addSImm9Operands(MCInst &Inst, unsigned N) const {
1284  assert(N == 1 && "Invalid number of operands!");
1285  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1287  }
1288 
1289  void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1290  assert(N == 1 && "Invalid number of operands!");
1291  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1292  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1293  }
1294 
1295  void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1296  assert(N == 1 && "Invalid number of operands!");
1297  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1298  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1299  }
1300 
1301  void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1302  assert(N == 1 && "Invalid number of operands!");
1303  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1304  Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1305  }
1306 
1307  void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1308  assert(N == 1 && "Invalid number of operands!");
1309  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1311  }
1312 
1313  void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1314  assert(N == 1 && "Invalid number of operands!");
1315  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1317  }
1318 
1319  void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1320  assert(N == 1 && "Invalid number of operands!");
1321  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1323  }
1324 
1325  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1326  assert(N == 1 && "Invalid number of operands!");
1327  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328  assert(MCE && "Invalid constant immediate operand!");
1330  }
1331 
1332  void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1333  assert(N == 1 && "Invalid number of operands!");
1334  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1336  }
1337 
1338  void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1339  assert(N == 1 && "Invalid number of operands!");
1340  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1342  }
1343 
1344  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1345  assert(N == 1 && "Invalid number of operands!");
1346  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1348  }
1349 
1350  void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1351  assert(N == 1 && "Invalid number of operands!");
1352  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1354  }
1355 
1356  void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1357  assert(N == 1 && "Invalid number of operands!");
1358  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1360  }
1361 
1362  void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1363  assert(N == 1 && "Invalid number of operands!");
1364  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1366  }
1367 
1368  void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1369  assert(N == 1 && "Invalid number of operands!");
1370  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1372  }
1373 
1374  void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1375  assert(N == 1 && "Invalid number of operands!");
1376  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1378  }
1379 
1380  void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1381  assert(N == 1 && "Invalid number of operands!");
1382  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1384  }
1385 
1386  void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1387  assert(N == 1 && "Invalid number of operands!");
1388  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1390  }
1391 
1392  void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1393  assert(N == 1 && "Invalid number of operands!");
1394  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1395  uint64_t encoding =
1396  AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1397  Inst.addOperand(MCOperand::createImm(encoding));
1398  }
1399 
1400  void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1401  assert(N == 1 && "Invalid number of operands!");
1402  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1403  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1404  Inst.addOperand(MCOperand::createImm(encoding));
1405  }
1406 
1407  void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1408  assert(N == 1 && "Invalid number of operands!");
1409  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1410  int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1411  uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1412  Inst.addOperand(MCOperand::createImm(encoding));
1413  }
1414 
1415  void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1416  assert(N == 1 && "Invalid number of operands!");
1417  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1418  uint64_t encoding =
1420  Inst.addOperand(MCOperand::createImm(encoding));
1421  }
1422 
1423  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1424  assert(N == 1 && "Invalid number of operands!");
1425  const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1426  uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1427  Inst.addOperand(MCOperand::createImm(encoding));
1428  }
1429 
1430  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1431  // Branch operands don't encode the low bits, so shift them off
1432  // here. If it's a label, however, just put it on directly as there's
1433  // not enough information now to do anything.
1434  assert(N == 1 && "Invalid number of operands!");
1435  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1436  if (!MCE) {
1437  addExpr(Inst, getImm());
1438  return;
1439  }
1440  assert(MCE && "Invalid constant immediate operand!");
1441  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1442  }
1443 
1444  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1445  // Branch operands don't encode the low bits, so shift them off
1446  // here. If it's a label, however, just put it on directly as there's
1447  // not enough information now to do anything.
1448  assert(N == 1 && "Invalid number of operands!");
1449  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1450  if (!MCE) {
1451  addExpr(Inst, getImm());
1452  return;
1453  }
1454  assert(MCE && "Invalid constant immediate operand!");
1455  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1456  }
1457 
1458  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1459  // Branch operands don't encode the low bits, so shift them off
1460  // here. If it's a label, however, just put it on directly as there's
1461  // not enough information now to do anything.
1462  assert(N == 1 && "Invalid number of operands!");
1463  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1464  if (!MCE) {
1465  addExpr(Inst, getImm());
1466  return;
1467  }
1468  assert(MCE && "Invalid constant immediate operand!");
1469  Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1470  }
1471 
1472  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1473  assert(N == 1 && "Invalid number of operands!");
1474  Inst.addOperand(MCOperand::createImm(getFPImm()));
1475  }
1476 
1477  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1478  assert(N == 1 && "Invalid number of operands!");
1479  Inst.addOperand(MCOperand::createImm(getBarrier()));
1480  }
1481 
1482  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1483  assert(N == 1 && "Invalid number of operands!");
1484 
1485  Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1486  }
1487 
1488  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1489  assert(N == 1 && "Invalid number of operands!");
1490 
1491  Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1492  }
1493 
1494  void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1495  assert(N == 1 && "Invalid number of operands!");
1496 
1497  Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1498  }
1499 
1500  void addSysCROperands(MCInst &Inst, unsigned N) const {
1501  assert(N == 1 && "Invalid number of operands!");
1502  Inst.addOperand(MCOperand::createImm(getSysCR()));
1503  }
1504 
1505  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1506  assert(N == 1 && "Invalid number of operands!");
1507  Inst.addOperand(MCOperand::createImm(getPrefetch()));
1508  }
1509 
1510  void addShifterOperands(MCInst &Inst, unsigned N) const {
1511  assert(N == 1 && "Invalid number of operands!");
1512  unsigned Imm =
1513  AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1514  Inst.addOperand(MCOperand::createImm(Imm));
1515  }
1516 
1517  void addExtendOperands(MCInst &Inst, unsigned N) const {
1518  assert(N == 1 && "Invalid number of operands!");
1519  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1520  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1521  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1522  Inst.addOperand(MCOperand::createImm(Imm));
1523  }
1524 
1525  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1526  assert(N == 1 && "Invalid number of operands!");
1527  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1528  if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1529  unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1530  Inst.addOperand(MCOperand::createImm(Imm));
1531  }
1532 
1533  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1534  assert(N == 2 && "Invalid number of operands!");
1535  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1536  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1537  Inst.addOperand(MCOperand::createImm(IsSigned));
1538  Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1539  }
1540 
1541  // For 8-bit load/store instructions with a register offset, both the
1542  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1543  // they're disambiguated by whether the shift was explicit or implicit rather
1544  // than its size.
1545  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1546  assert(N == 2 && "Invalid number of operands!");
1547  AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1548  bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1549  Inst.addOperand(MCOperand::createImm(IsSigned));
1550  Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1551  }
1552 
1553  template<int Shift>
1554  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1555  assert(N == 1 && "Invalid number of operands!");
1556 
1557  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1558  uint64_t Value = CE->getValue();
1559  Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1560  }
1561 
1562  template<int Shift>
1563  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1564  assert(N == 1 && "Invalid number of operands!");
1565 
1566  const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1567  uint64_t Value = CE->getValue();
1568  Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1569  }
1570 
1571  void print(raw_ostream &OS) const override;
1572 
1573  static std::unique_ptr<AArch64Operand>
1574  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1575  auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1576  Op->Tok.Data = Str.data();
1577  Op->Tok.Length = Str.size();
1578  Op->Tok.IsSuffix = IsSuffix;
1579  Op->StartLoc = S;
1580  Op->EndLoc = S;
1581  return Op;
1582  }
1583 
1584  static std::unique_ptr<AArch64Operand>
1585  CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1586  auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1587  Op->Reg.RegNum = RegNum;
1588  Op->Reg.isVector = isVector;
1589  Op->StartLoc = S;
1590  Op->EndLoc = E;
1591  return Op;
1592  }
1593 
1594  static std::unique_ptr<AArch64Operand>
1595  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1596  char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1597  auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1598  Op->VectorList.RegNum = RegNum;
1599  Op->VectorList.Count = Count;
1600  Op->VectorList.NumElements = NumElements;
1601  Op->VectorList.ElementKind = ElementKind;
1602  Op->StartLoc = S;
1603  Op->EndLoc = E;
1604  return Op;
1605  }
1606 
1607  static std::unique_ptr<AArch64Operand>
1608  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1609  auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1610  Op->VectorIndex.Val = Idx;
1611  Op->StartLoc = S;
1612  Op->EndLoc = E;
1613  return Op;
1614  }
1615 
1616  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1617  SMLoc E, MCContext &Ctx) {
1618  auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1619  Op->Imm.Val = Val;
1620  Op->StartLoc = S;
1621  Op->EndLoc = E;
1622  return Op;
1623  }
1624 
1625  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1626  unsigned ShiftAmount,
1627  SMLoc S, SMLoc E,
1628  MCContext &Ctx) {
1629  auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1630  Op->ShiftedImm .Val = Val;
1631  Op->ShiftedImm.ShiftAmount = ShiftAmount;
1632  Op->StartLoc = S;
1633  Op->EndLoc = E;
1634  return Op;
1635  }
1636 
1637  static std::unique_ptr<AArch64Operand>
1638  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1639  auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1640  Op->CondCode.Code = Code;
1641  Op->StartLoc = S;
1642  Op->EndLoc = E;
1643  return Op;
1644  }
1645 
1646  static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1647  MCContext &Ctx) {
1648  auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1649  Op->FPImm.Val = Val;
1650  Op->StartLoc = S;
1651  Op->EndLoc = S;
1652  return Op;
1653  }
1654 
1655  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1656  StringRef Str,
1657  SMLoc S,
1658  MCContext &Ctx) {
1659  auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1660  Op->Barrier.Val = Val;
1661  Op->Barrier.Data = Str.data();
1662  Op->Barrier.Length = Str.size();
1663  Op->StartLoc = S;
1664  Op->EndLoc = S;
1665  return Op;
1666  }
1667 
1668  static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1669  uint32_t MRSReg,
1670  uint32_t MSRReg,
1671  uint32_t PStateField,
1672  MCContext &Ctx) {
1673  auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1674  Op->SysReg.Data = Str.data();
1675  Op->SysReg.Length = Str.size();
1676  Op->SysReg.MRSReg = MRSReg;
1677  Op->SysReg.MSRReg = MSRReg;
1678  Op->SysReg.PStateField = PStateField;
1679  Op->StartLoc = S;
1680  Op->EndLoc = S;
1681  return Op;
1682  }
1683 
1684  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1685  SMLoc E, MCContext &Ctx) {
1686  auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1687  Op->SysCRImm.Val = Val;
1688  Op->StartLoc = S;
1689  Op->EndLoc = E;
1690  return Op;
1691  }
1692 
1693  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1694  StringRef Str,
1695  SMLoc S,
1696  MCContext &Ctx) {
1697  auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1698  Op->Prefetch.Val = Val;
1699  Op->Barrier.Data = Str.data();
1700  Op->Barrier.Length = Str.size();
1701  Op->StartLoc = S;
1702  Op->EndLoc = S;
1703  return Op;
1704  }
1705 
1706  static std::unique_ptr<AArch64Operand>
1707  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1708  bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1709  auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1710  Op->ShiftExtend.Type = ShOp;
1711  Op->ShiftExtend.Amount = Val;
1712  Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1713  Op->StartLoc = S;
1714  Op->EndLoc = E;
1715  return Op;
1716  }
1717 };
1718 
1719 } // end anonymous namespace.
1720 
1721 void AArch64Operand::print(raw_ostream &OS) const {
1722  switch (Kind) {
1723  case k_FPImm:
1724  OS << "<fpimm " << getFPImm() << "("
1725  << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1726  break;
1727  case k_Barrier: {
1728  StringRef Name = getBarrierName();
1729  if (!Name.empty())
1730  OS << "<barrier " << Name << ">";
1731  else
1732  OS << "<barrier invalid #" << getBarrier() << ">";
1733  break;
1734  }
1735  case k_Immediate:
1736  OS << *getImm();
1737  break;
1738  case k_ShiftedImm: {
1739  unsigned Shift = getShiftedImmShift();
1740  OS << "<shiftedimm ";
1741  OS << *getShiftedImmVal();
1742  OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1743  break;
1744  }
1745  case k_CondCode:
1746  OS << "<condcode " << getCondCode() << ">";
1747  break;
1748  case k_Register:
1749  OS << "<register " << getReg() << ">";
1750  break;
1751  case k_VectorList: {
1752  OS << "<vectorlist ";
1753  unsigned Reg = getVectorListStart();
1754  for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1755  OS << Reg + i << " ";
1756  OS << ">";
1757  break;
1758  }
1759  case k_VectorIndex:
1760  OS << "<vectorindex " << getVectorIndex() << ">";
1761  break;
1762  case k_SysReg:
1763  OS << "<sysreg: " << getSysReg() << '>';
1764  break;
1765  case k_Token:
1766  OS << "'" << getToken() << "'";
1767  break;
1768  case k_SysCR:
1769  OS << "c" << getSysCR();
1770  break;
1771  case k_Prefetch: {
1772  StringRef Name = getPrefetchName();
1773  if (!Name.empty())
1774  OS << "<prfop " << Name << ">";
1775  else
1776  OS << "<prfop invalid #" << getPrefetch() << ">";
1777  break;
1778  }
1779  case k_ShiftExtend: {
1780  OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1781  << getShiftExtendAmount();
1782  if (!hasShiftExtendAmount())
1783  OS << "<imp>";
1784  OS << '>';
1785  break;
1786  }
1787  }
1788 }
1789 
1790 /// @name Auto-generated Match Functions
1791 /// {
1792 
1793 static unsigned MatchRegisterName(StringRef Name);
1794 
1795 /// }
1796 
1797 static unsigned matchVectorRegName(StringRef Name) {
1798  return StringSwitch<unsigned>(Name.lower())
1799  .Case("v0", AArch64::Q0)
1800  .Case("v1", AArch64::Q1)
1801  .Case("v2", AArch64::Q2)
1802  .Case("v3", AArch64::Q3)
1803  .Case("v4", AArch64::Q4)
1804  .Case("v5", AArch64::Q5)
1805  .Case("v6", AArch64::Q6)
1806  .Case("v7", AArch64::Q7)
1807  .Case("v8", AArch64::Q8)
1808  .Case("v9", AArch64::Q9)
1809  .Case("v10", AArch64::Q10)
1810  .Case("v11", AArch64::Q11)
1811  .Case("v12", AArch64::Q12)
1812  .Case("v13", AArch64::Q13)
1813  .Case("v14", AArch64::Q14)
1814  .Case("v15", AArch64::Q15)
1815  .Case("v16", AArch64::Q16)
1816  .Case("v17", AArch64::Q17)
1817  .Case("v18", AArch64::Q18)
1818  .Case("v19", AArch64::Q19)
1819  .Case("v20", AArch64::Q20)
1820  .Case("v21", AArch64::Q21)
1821  .Case("v22", AArch64::Q22)
1822  .Case("v23", AArch64::Q23)
1823  .Case("v24", AArch64::Q24)
1824  .Case("v25", AArch64::Q25)
1825  .Case("v26", AArch64::Q26)
1826  .Case("v27", AArch64::Q27)
1827  .Case("v28", AArch64::Q28)
1828  .Case("v29", AArch64::Q29)
1829  .Case("v30", AArch64::Q30)
1830  .Case("v31", AArch64::Q31)
1831  .Default(0);
1832 }
1833 
1834 static bool isValidVectorKind(StringRef Name) {
1835  return StringSwitch<bool>(Name.lower())
1836  .Case(".8b", true)
1837  .Case(".16b", true)
1838  .Case(".4h", true)
1839  .Case(".8h", true)
1840  .Case(".2s", true)
1841  .Case(".4s", true)
1842  .Case(".1d", true)
1843  .Case(".2d", true)
1844  .Case(".1q", true)
1845  // Accept the width neutral ones, too, for verbose syntax. If those
1846  // aren't used in the right places, the token operand won't match so
1847  // all will work out.
1848  .Case(".b", true)
1849  .Case(".h", true)
1850  .Case(".s", true)
1851  .Case(".d", true)
1852  .Default(false);
1853 }
1854 
1855 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1856  char &ElementKind) {
1857  assert(isValidVectorKind(Name));
1858 
1859  ElementKind = Name.lower()[Name.size() - 1];
1860  NumElements = 0;
1861 
1862  if (Name.size() == 2)
1863  return;
1864 
1865  // Parse the lane count
1866  Name = Name.drop_front();
1867  while (isdigit(Name.front())) {
1868  NumElements = 10 * NumElements + (Name.front() - '0');
1869  Name = Name.drop_front();
1870  }
1871 }
1872 
1873 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1874  SMLoc &EndLoc) {
1875  StartLoc = getLoc();
1876  RegNo = tryParseRegister();
1877  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1878  return (RegNo == (unsigned)-1);
1879 }
1880 
1881 // Matches a register name or register alias previously defined by '.req'
1882 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1883  bool isVector) {
1884  unsigned RegNum = isVector ? matchVectorRegName(Name)
1885  : MatchRegisterName(Name);
1886 
1887  if (RegNum == 0) {
1888  // Check for aliases registered via .req. Canonicalize to lower case.
1889  // That's more consistent since register names are case insensitive, and
1890  // it's how the original entry was passed in from MC/MCParser/AsmParser.
1891  auto Entry = RegisterReqs.find(Name.lower());
1892  if (Entry == RegisterReqs.end())
1893  return 0;
1894  // set RegNum if the match is the right kind of register
1895  if (isVector == Entry->getValue().first)
1896  RegNum = Entry->getValue().second;
1897  }
1898  return RegNum;
1899 }
1900 
1901 /// tryParseRegister - Try to parse a register name. The token must be an
1902 /// Identifier when called, and if it is a register name the token is eaten and
1903 /// the register is added to the operand list.
1904 int AArch64AsmParser::tryParseRegister() {
1905  MCAsmParser &Parser = getParser();
1906  const AsmToken &Tok = Parser.getTok();
1907  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1908 
1909  std::string lowerCase = Tok.getString().lower();
1910  unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1911  // Also handle a few aliases of registers.
1912  if (RegNum == 0)
1913  RegNum = StringSwitch<unsigned>(lowerCase)
1914  .Case("fp", AArch64::FP)
1915  .Case("lr", AArch64::LR)
1916  .Case("x31", AArch64::XZR)
1917  .Case("w31", AArch64::WZR)
1918  .Default(0);
1919 
1920  if (RegNum == 0)
1921  return -1;
1922 
1923  Parser.Lex(); // Eat identifier token.
1924  return RegNum;
1925 }
1926 
1927 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1928 /// kind specifier. If it is a register specifier, eat the token and return it.
1929 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1930  MCAsmParser &Parser = getParser();
1931  if (Parser.getTok().isNot(AsmToken::Identifier)) {
1932  TokError("vector register expected");
1933  return -1;
1934  }
1935 
1936  StringRef Name = Parser.getTok().getString();
1937  // If there is a kind specifier, it's separated from the register name by
1938  // a '.'.
1939  size_t Start = 0, Next = Name.find('.');
1940  StringRef Head = Name.slice(Start, Next);
1941  unsigned RegNum = matchRegisterNameAlias(Head, true);
1942 
1943  if (RegNum) {
1944  if (Next != StringRef::npos) {
1945  Kind = Name.slice(Next, StringRef::npos);
1946  if (!isValidVectorKind(Kind)) {
1947  TokError("invalid vector kind qualifier");
1948  return -1;
1949  }
1950  }
1951  Parser.Lex(); // Eat the register token.
1952  return RegNum;
1953  }
1954 
1955  if (expected)
1956  TokError("vector register expected");
1957  return -1;
1958 }
1959 
1960 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1961 AArch64AsmParser::OperandMatchResultTy
1962 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1963  MCAsmParser &Parser = getParser();
1964  SMLoc S = getLoc();
1965 
1966  if (Parser.getTok().isNot(AsmToken::Identifier)) {
1967  Error(S, "Expected cN operand where 0 <= N <= 15");
1968  return MatchOperand_ParseFail;
1969  }
1970 
1971  StringRef Tok = Parser.getTok().getIdentifier();
1972  if (Tok[0] != 'c' && Tok[0] != 'C') {
1973  Error(S, "Expected cN operand where 0 <= N <= 15");
1974  return MatchOperand_ParseFail;
1975  }
1976 
1977  uint32_t CRNum;
1978  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1979  if (BadNum || CRNum > 15) {
1980  Error(S, "Expected cN operand where 0 <= N <= 15");
1981  return MatchOperand_ParseFail;
1982  }
1983 
1984  Parser.Lex(); // Eat identifier token.
1985  Operands.push_back(
1986  AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1987  return MatchOperand_Success;
1988 }
1989 
1990 /// tryParsePrefetch - Try to parse a prefetch operand.
1991 AArch64AsmParser::OperandMatchResultTy
1992 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1993  MCAsmParser &Parser = getParser();
1994  SMLoc S = getLoc();
1995  const AsmToken &Tok = Parser.getTok();
1996  // Either an identifier for named values or a 5-bit immediate.
1997  bool Hash = Tok.is(AsmToken::Hash);
1998  if (Hash || Tok.is(AsmToken::Integer)) {
1999  if (Hash)
2000  Parser.Lex(); // Eat hash token.
2001  const MCExpr *ImmVal;
2002  if (getParser().parseExpression(ImmVal))
2003  return MatchOperand_ParseFail;
2004 
2005  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2006  if (!MCE) {
2007  TokError("immediate value expected for prefetch operand");
2008  return MatchOperand_ParseFail;
2009  }
2010  unsigned prfop = MCE->getValue();
2011  if (prfop > 31) {
2012  TokError("prefetch operand out of range, [0,31] expected");
2013  return MatchOperand_ParseFail;
2014  }
2015 
2016  bool Valid;
2017  auto Mapper = AArch64PRFM::PRFMMapper();
2018  StringRef Name =
2019  Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2020  Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
2021  S, getContext()));
2022  return MatchOperand_Success;
2023  }
2024 
2025  if (Tok.isNot(AsmToken::Identifier)) {
2026  TokError("pre-fetch hint expected");
2027  return MatchOperand_ParseFail;
2028  }
2029 
2030  bool Valid;
2031  auto Mapper = AArch64PRFM::PRFMMapper();
2032  unsigned prfop =
2033  Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2034  if (!Valid) {
2035  TokError("pre-fetch hint expected");
2036  return MatchOperand_ParseFail;
2037  }
2038 
2039  Parser.Lex(); // Eat identifier token.
2040  Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2041  S, getContext()));
2042  return MatchOperand_Success;
2043 }
2044 
2045 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2046 /// instruction.
2047 AArch64AsmParser::OperandMatchResultTy
2048 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2049  MCAsmParser &Parser = getParser();
2050  SMLoc S = getLoc();
2051  const MCExpr *Expr;
2052 
2053  if (Parser.getTok().is(AsmToken::Hash)) {
2054  Parser.Lex(); // Eat hash token.
2055  }
2056 
2057  if (parseSymbolicImmVal(Expr))
2058  return MatchOperand_ParseFail;
2059 
2060  AArch64MCExpr::VariantKind ELFRefKind;
2061  MCSymbolRefExpr::VariantKind DarwinRefKind;
2062  int64_t Addend;
2063  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2064  if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2065  ELFRefKind == AArch64MCExpr::VK_INVALID) {
2066  // No modifier was specified at all; this is the syntax for an ELF basic
2067  // ADRP relocation (unfortunately).
2068  Expr =
2070  } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2071  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2072  Addend != 0) {
2073  Error(S, "gotpage label reference not allowed an addend");
2074  return MatchOperand_ParseFail;
2075  } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2076  DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2077  DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2078  ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2079  ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2080  ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2081  // The operand must be an @page or @gotpage qualified symbolref.
2082  Error(S, "page or gotpage label reference expected");
2083  return MatchOperand_ParseFail;
2084  }
2085  }
2086 
2087  // We have either a label reference possibly with addend or an immediate. The
2088  // addend is a raw value here. The linker will adjust it to only reference the
2089  // page.
2090  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2091  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2092 
2093  return MatchOperand_Success;
2094 }
2095 
2096 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2097 /// instruction.
2098 AArch64AsmParser::OperandMatchResultTy
2099 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2100  MCAsmParser &Parser = getParser();
2101  SMLoc S = getLoc();
2102  const MCExpr *Expr;
2103 
2104  if (Parser.getTok().is(AsmToken::Hash)) {
2105  Parser.Lex(); // Eat hash token.
2106  }
2107 
2108  if (getParser().parseExpression(Expr))
2109  return MatchOperand_ParseFail;
2110 
2111  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2112  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2113 
2114  return MatchOperand_Success;
2115 }
2116 
2117 /// tryParseFPImm - A floating point immediate expression operand.
2118 AArch64AsmParser::OperandMatchResultTy
2119 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2120  MCAsmParser &Parser = getParser();
2121  SMLoc S = getLoc();
2122 
2123  bool Hash = false;
2124  if (Parser.getTok().is(AsmToken::Hash)) {
2125  Parser.Lex(); // Eat '#'
2126  Hash = true;
2127  }
2128 
2129  // Handle negation, as that still comes through as a separate token.
2130  bool isNegative = false;
2131  if (Parser.getTok().is(AsmToken::Minus)) {
2132  isNegative = true;
2133  Parser.Lex();
2134  }
2135  const AsmToken &Tok = Parser.getTok();
2136  if (Tok.is(AsmToken::Real)) {
2137  APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2138  if (isNegative)
2139  RealVal.changeSign();
2140 
2141  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2142  int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2143  Parser.Lex(); // Eat the token.
2144  // Check for out of range values. As an exception, we let Zero through,
2145  // as we handle that special case in post-processing before matching in
2146  // order to use the zero register for it.
2147  if (Val == -1 && !RealVal.isPosZero()) {
2148  TokError("expected compatible register or floating-point constant");
2149  return MatchOperand_ParseFail;
2150  }
2151  Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2152  return MatchOperand_Success;
2153  }
2154  if (Tok.is(AsmToken::Integer)) {
2155  int64_t Val;
2156  if (!isNegative && Tok.getString().startswith("0x")) {
2157  Val = Tok.getIntVal();
2158  if (Val > 255 || Val < 0) {
2159  TokError("encoded floating point value out of range");
2160  return MatchOperand_ParseFail;
2161  }
2162  } else {
2163  APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2164  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2165  // If we had a '-' in front, toggle the sign bit.
2166  IntVal ^= (uint64_t)isNegative << 63;
2167  Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2168  }
2169  Parser.Lex(); // Eat the token.
2170  Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2171  return MatchOperand_Success;
2172  }
2173 
2174  if (!Hash)
2175  return MatchOperand_NoMatch;
2176 
2177  TokError("invalid floating point immediate");
2178  return MatchOperand_ParseFail;
2179 }
2180 
2181 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2182 AArch64AsmParser::OperandMatchResultTy
2183 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2184  MCAsmParser &Parser = getParser();
2185  SMLoc S = getLoc();
2186 
2187  if (Parser.getTok().is(AsmToken::Hash))
2188  Parser.Lex(); // Eat '#'
2189  else if (Parser.getTok().isNot(AsmToken::Integer))
2190  // Operand should start from # or should be integer, emit error otherwise.
2191  return MatchOperand_NoMatch;
2192 
2193  const MCExpr *Imm;
2194  if (parseSymbolicImmVal(Imm))
2195  return MatchOperand_ParseFail;
2196  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2197  uint64_t ShiftAmount = 0;
2198  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2199  if (MCE) {
2200  int64_t Val = MCE->getValue();
2201  if (Val > 0xfff && (Val & 0xfff) == 0) {
2202  Imm = MCConstantExpr::create(Val >> 12, getContext());
2203  ShiftAmount = 12;
2204  }
2205  }
2206  SMLoc E = Parser.getTok().getLoc();
2207  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2208  getContext()));
2209  return MatchOperand_Success;
2210  }
2211 
2212  // Eat ','
2213  Parser.Lex();
2214 
2215  // The optional operand must be "lsl #N" where N is non-negative.
2216  if (!Parser.getTok().is(AsmToken::Identifier) ||
2217  !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2218  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2219  return MatchOperand_ParseFail;
2220  }
2221 
2222  // Eat 'lsl'
2223  Parser.Lex();
2224 
2225  if (Parser.getTok().is(AsmToken::Hash)) {
2226  Parser.Lex();
2227  }
2228 
2229  if (Parser.getTok().isNot(AsmToken::Integer)) {
2230  Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2231  return MatchOperand_ParseFail;
2232  }
2233 
2234  int64_t ShiftAmount = Parser.getTok().getIntVal();
2235 
2236  if (ShiftAmount < 0) {
2237  Error(Parser.getTok().getLoc(), "positive shift amount required");
2238  return MatchOperand_ParseFail;
2239  }
2240  Parser.Lex(); // Eat the number
2241 
2242  SMLoc E = Parser.getTok().getLoc();
2243  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2244  S, E, getContext()));
2245  return MatchOperand_Success;
2246 }
2247 
2248 /// parseCondCodeString - Parse a Condition Code string.
2249 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2251  .Case("eq", AArch64CC::EQ)
2252  .Case("ne", AArch64CC::NE)
2253  .Case("cs", AArch64CC::HS)
2254  .Case("hs", AArch64CC::HS)
2255  .Case("cc", AArch64CC::LO)
2256  .Case("lo", AArch64CC::LO)
2257  .Case("mi", AArch64CC::MI)
2258  .Case("pl", AArch64CC::PL)
2259  .Case("vs", AArch64CC::VS)
2260  .Case("vc", AArch64CC::VC)
2261  .Case("hi", AArch64CC::HI)
2262  .Case("ls", AArch64CC::LS)
2263  .Case("ge", AArch64CC::GE)
2264  .Case("lt", AArch64CC::LT)
2265  .Case("gt", AArch64CC::GT)
2266  .Case("le", AArch64CC::LE)
2267  .Case("al", AArch64CC::AL)
2268  .Case("nv", AArch64CC::NV)
2270  return CC;
2271 }
2272 
2273 /// parseCondCode - Parse a Condition Code operand.
2274 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2275  bool invertCondCode) {
2276  MCAsmParser &Parser = getParser();
2277  SMLoc S = getLoc();
2278  const AsmToken &Tok = Parser.getTok();
2279  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2280 
2281  StringRef Cond = Tok.getString();
2282  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2283  if (CC == AArch64CC::Invalid)
2284  return TokError("invalid condition code");
2285  Parser.Lex(); // Eat identifier token.
2286 
2287  if (invertCondCode) {
2288  if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2289  return TokError("condition codes AL and NV are invalid for this instruction");
2291  }
2292 
2293  Operands.push_back(
2294  AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2295  return false;
2296 }
2297 
2298 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2299 /// them if present.
2300 AArch64AsmParser::OperandMatchResultTy
2301 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2302  MCAsmParser &Parser = getParser();
2303  const AsmToken &Tok = Parser.getTok();
2304  std::string LowerID = Tok.getString().lower();
2307  .Case("lsl", AArch64_AM::LSL)
2308  .Case("lsr", AArch64_AM::LSR)
2309  .Case("asr", AArch64_AM::ASR)
2310  .Case("ror", AArch64_AM::ROR)
2311  .Case("msl", AArch64_AM::MSL)
2312  .Case("uxtb", AArch64_AM::UXTB)
2313  .Case("uxth", AArch64_AM::UXTH)
2314  .Case("uxtw", AArch64_AM::UXTW)
2315  .Case("uxtx", AArch64_AM::UXTX)
2316  .Case("sxtb", AArch64_AM::SXTB)
2317  .Case("sxth", AArch64_AM::SXTH)
2318  .Case("sxtw", AArch64_AM::SXTW)
2319  .Case("sxtx", AArch64_AM::SXTX)
2321 
2322  if (ShOp == AArch64_AM::InvalidShiftExtend)
2323  return MatchOperand_NoMatch;
2324 
2325  SMLoc S = Tok.getLoc();
2326  Parser.Lex();
2327 
2328  bool Hash = getLexer().is(AsmToken::Hash);
2329  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2330  if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2331  ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2332  ShOp == AArch64_AM::MSL) {
2333  // We expect a number here.
2334  TokError("expected #imm after shift specifier");
2335  return MatchOperand_ParseFail;
2336  }
2337 
2338  // "extend" type operatoins don't need an immediate, #0 is implicit.
2339  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2340  Operands.push_back(
2341  AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2342  return MatchOperand_Success;
2343  }
2344 
2345  if (Hash)
2346  Parser.Lex(); // Eat the '#'.
2347 
2348  // Make sure we do actually have a number or a parenthesized expression.
2349  SMLoc E = Parser.getTok().getLoc();
2350  if (!Parser.getTok().is(AsmToken::Integer) &&
2351  !Parser.getTok().is(AsmToken::LParen)) {
2352  Error(E, "expected integer shift amount");
2353  return MatchOperand_ParseFail;
2354  }
2355 
2356  const MCExpr *ImmVal;
2357  if (getParser().parseExpression(ImmVal))
2358  return MatchOperand_ParseFail;
2359 
2360  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2361  if (!MCE) {
2362  Error(E, "expected constant '#imm' after shift specifier");
2363  return MatchOperand_ParseFail;
2364  }
2365 
2366  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2367  Operands.push_back(AArch64Operand::CreateShiftExtend(
2368  ShOp, MCE->getValue(), true, S, E, getContext()));
2369  return MatchOperand_Success;
2370 }
2371 
2372 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2373 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2374 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2375  OperandVector &Operands) {
2376  if (Name.find('.') != StringRef::npos)
2377  return TokError("invalid operand");
2378 
2379  Mnemonic = Name;
2380  Operands.push_back(
2381  AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2382 
2383  MCAsmParser &Parser = getParser();
2384  const AsmToken &Tok = Parser.getTok();
2385  StringRef Op = Tok.getString();
2386  SMLoc S = Tok.getLoc();
2387 
2388  const MCExpr *Expr = nullptr;
2389 
2390 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2391  do { \
2392  Expr = MCConstantExpr::create(op1, getContext()); \
2393  Operands.push_back( \
2394  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2395  Operands.push_back( \
2396  AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2397  Operands.push_back( \
2398  AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2399  Expr = MCConstantExpr::create(op2, getContext()); \
2400  Operands.push_back( \
2401  AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2402  } while (0)
2403 
2404  if (Mnemonic == "ic") {
2405  if (!Op.compare_lower("ialluis")) {
2406  // SYS #0, C7, C1, #0
2407  SYS_ALIAS(0, 7, 1, 0);
2408  } else if (!Op.compare_lower("iallu")) {
2409  // SYS #0, C7, C5, #0
2410  SYS_ALIAS(0, 7, 5, 0);
2411  } else if (!Op.compare_lower("ivau")) {
2412  // SYS #3, C7, C5, #1
2413  SYS_ALIAS(3, 7, 5, 1);
2414  } else {
2415  return TokError("invalid operand for IC instruction");
2416  }
2417  } else if (Mnemonic == "dc") {
2418  if (!Op.compare_lower("zva")) {
2419  // SYS #3, C7, C4, #1
2420  SYS_ALIAS(3, 7, 4, 1);
2421  } else if (!Op.compare_lower("ivac")) {
2422  // SYS #3, C7, C6, #1
2423  SYS_ALIAS(0, 7, 6, 1);
2424  } else if (!Op.compare_lower("isw")) {
2425  // SYS #0, C7, C6, #2
2426  SYS_ALIAS(0, 7, 6, 2);
2427  } else if (!Op.compare_lower("cvac")) {
2428  // SYS #3, C7, C10, #1
2429  SYS_ALIAS(3, 7, 10, 1);
2430  } else if (!Op.compare_lower("csw")) {
2431  // SYS #0, C7, C10, #2
2432  SYS_ALIAS(0, 7, 10, 2);
2433  } else if (!Op.compare_lower("cvau")) {
2434  // SYS #3, C7, C11, #1
2435  SYS_ALIAS(3, 7, 11, 1);
2436  } else if (!Op.compare_lower("civac")) {
2437  // SYS #3, C7, C14, #1
2438  SYS_ALIAS(3, 7, 14, 1);
2439  } else if (!Op.compare_lower("cisw")) {
2440  // SYS #0, C7, C14, #2
2441  SYS_ALIAS(0, 7, 14, 2);
2442  } else {
2443  return TokError("invalid operand for DC instruction");
2444  }
2445  } else if (Mnemonic == "at") {
2446  if (!Op.compare_lower("s1e1r")) {
2447  // SYS #0, C7, C8, #0
2448  SYS_ALIAS(0, 7, 8, 0);
2449  } else if (!Op.compare_lower("s1e2r")) {
2450  // SYS #4, C7, C8, #0
2451  SYS_ALIAS(4, 7, 8, 0);
2452  } else if (!Op.compare_lower("s1e3r")) {
2453  // SYS #6, C7, C8, #0
2454  SYS_ALIAS(6, 7, 8, 0);
2455  } else if (!Op.compare_lower("s1e1w")) {
2456  // SYS #0, C7, C8, #1
2457  SYS_ALIAS(0, 7, 8, 1);
2458  } else if (!Op.compare_lower("s1e2w")) {
2459  // SYS #4, C7, C8, #1
2460  SYS_ALIAS(4, 7, 8, 1);
2461  } else if (!Op.compare_lower("s1e3w")) {
2462  // SYS #6, C7, C8, #1
2463  SYS_ALIAS(6, 7, 8, 1);
2464  } else if (!Op.compare_lower("s1e0r")) {
2465  // SYS #0, C7, C8, #3
2466  SYS_ALIAS(0, 7, 8, 2);
2467  } else if (!Op.compare_lower("s1e0w")) {
2468  // SYS #0, C7, C8, #3
2469  SYS_ALIAS(0, 7, 8, 3);
2470  } else if (!Op.compare_lower("s12e1r")) {
2471  // SYS #4, C7, C8, #4
2472  SYS_ALIAS(4, 7, 8, 4);
2473  } else if (!Op.compare_lower("s12e1w")) {
2474  // SYS #4, C7, C8, #5
2475  SYS_ALIAS(4, 7, 8, 5);
2476  } else if (!Op.compare_lower("s12e0r")) {
2477  // SYS #4, C7, C8, #6
2478  SYS_ALIAS(4, 7, 8, 6);
2479  } else if (!Op.compare_lower("s12e0w")) {
2480  // SYS #4, C7, C8, #7
2481  SYS_ALIAS(4, 7, 8, 7);
2482  } else {
2483  return TokError("invalid operand for AT instruction");
2484  }
2485  } else if (Mnemonic == "tlbi") {
2486  if (!Op.compare_lower("vmalle1is")) {
2487  // SYS #0, C8, C3, #0
2488  SYS_ALIAS(0, 8, 3, 0);
2489  } else if (!Op.compare_lower("alle2is")) {
2490  // SYS #4, C8, C3, #0
2491  SYS_ALIAS(4, 8, 3, 0);
2492  } else if (!Op.compare_lower("alle3is")) {
2493  // SYS #6, C8, C3, #0
2494  SYS_ALIAS(6, 8, 3, 0);
2495  } else if (!Op.compare_lower("vae1is")) {
2496  // SYS #0, C8, C3, #1
2497  SYS_ALIAS(0, 8, 3, 1);
2498  } else if (!Op.compare_lower("vae2is")) {
2499  // SYS #4, C8, C3, #1
2500  SYS_ALIAS(4, 8, 3, 1);
2501  } else if (!Op.compare_lower("vae3is")) {
2502  // SYS #6, C8, C3, #1
2503  SYS_ALIAS(6, 8, 3, 1);
2504  } else if (!Op.compare_lower("aside1is")) {
2505  // SYS #0, C8, C3, #2
2506  SYS_ALIAS(0, 8, 3, 2);
2507  } else if (!Op.compare_lower("vaae1is")) {
2508  // SYS #0, C8, C3, #3
2509  SYS_ALIAS(0, 8, 3, 3);
2510  } else if (!Op.compare_lower("alle1is")) {
2511  // SYS #4, C8, C3, #4
2512  SYS_ALIAS(4, 8, 3, 4);
2513  } else if (!Op.compare_lower("vale1is")) {
2514  // SYS #0, C8, C3, #5
2515  SYS_ALIAS(0, 8, 3, 5);
2516  } else if (!Op.compare_lower("vaale1is")) {
2517  // SYS #0, C8, C3, #7
2518  SYS_ALIAS(0, 8, 3, 7);
2519  } else if (!Op.compare_lower("vmalle1")) {
2520  // SYS #0, C8, C7, #0
2521  SYS_ALIAS(0, 8, 7, 0);
2522  } else if (!Op.compare_lower("alle2")) {
2523  // SYS #4, C8, C7, #0
2524  SYS_ALIAS(4, 8, 7, 0);
2525  } else if (!Op.compare_lower("vale2is")) {
2526  // SYS #4, C8, C3, #5
2527  SYS_ALIAS(4, 8, 3, 5);
2528  } else if (!Op.compare_lower("vale3is")) {
2529  // SYS #6, C8, C3, #5
2530  SYS_ALIAS(6, 8, 3, 5);
2531  } else if (!Op.compare_lower("alle3")) {
2532  // SYS #6, C8, C7, #0
2533  SYS_ALIAS(6, 8, 7, 0);
2534  } else if (!Op.compare_lower("vae1")) {
2535  // SYS #0, C8, C7, #1
2536  SYS_ALIAS(0, 8, 7, 1);
2537  } else if (!Op.compare_lower("vae2")) {
2538  // SYS #4, C8, C7, #1
2539  SYS_ALIAS(4, 8, 7, 1);
2540  } else if (!Op.compare_lower("vae3")) {
2541  // SYS #6, C8, C7, #1
2542  SYS_ALIAS(6, 8, 7, 1);
2543  } else if (!Op.compare_lower("aside1")) {
2544  // SYS #0, C8, C7, #2
2545  SYS_ALIAS(0, 8, 7, 2);
2546  } else if (!Op.compare_lower("vaae1")) {
2547  // SYS #0, C8, C7, #3
2548  SYS_ALIAS(0, 8, 7, 3);
2549  } else if (!Op.compare_lower("alle1")) {
2550  // SYS #4, C8, C7, #4
2551  SYS_ALIAS(4, 8, 7, 4);
2552  } else if (!Op.compare_lower("vale1")) {
2553  // SYS #0, C8, C7, #5
2554  SYS_ALIAS(0, 8, 7, 5);
2555  } else if (!Op.compare_lower("vale2")) {
2556  // SYS #4, C8, C7, #5
2557  SYS_ALIAS(4, 8, 7, 5);
2558  } else if (!Op.compare_lower("vale3")) {
2559  // SYS #6, C8, C7, #5
2560  SYS_ALIAS(6, 8, 7, 5);
2561  } else if (!Op.compare_lower("vaale1")) {
2562  // SYS #0, C8, C7, #7
2563  SYS_ALIAS(0, 8, 7, 7);
2564  } else if (!Op.compare_lower("ipas2e1")) {
2565  // SYS #4, C8, C4, #1
2566  SYS_ALIAS(4, 8, 4, 1);
2567  } else if (!Op.compare_lower("ipas2le1")) {
2568  // SYS #4, C8, C4, #5
2569  SYS_ALIAS(4, 8, 4, 5);
2570  } else if (!Op.compare_lower("ipas2e1is")) {
2571  // SYS #4, C8, C4, #1
2572  SYS_ALIAS(4, 8, 0, 1);
2573  } else if (!Op.compare_lower("ipas2le1is")) {
2574  // SYS #4, C8, C4, #5
2575  SYS_ALIAS(4, 8, 0, 5);
2576  } else if (!Op.compare_lower("vmalls12e1")) {
2577  // SYS #4, C8, C7, #6
2578  SYS_ALIAS(4, 8, 7, 6);
2579  } else if (!Op.compare_lower("vmalls12e1is")) {
2580  // SYS #4, C8, C3, #6
2581  SYS_ALIAS(4, 8, 3, 6);
2582  } else {
2583  return TokError("invalid operand for TLBI instruction");
2584  }
2585  }
2586 
2587 #undef SYS_ALIAS
2588 
2589  Parser.Lex(); // Eat operand.
2590 
2591  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2592  bool HasRegister = false;
2593 
2594  // Check for the optional register operand.
2595  if (getLexer().is(AsmToken::Comma)) {
2596  Parser.Lex(); // Eat comma.
2597 
2598  if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2599  return TokError("expected register operand");
2600 
2601  HasRegister = true;
2602  }
2603 
2604  if (getLexer().isNot(AsmToken::EndOfStatement)) {
2605  Parser.eatToEndOfStatement();
2606  return TokError("unexpected token in argument list");
2607  }
2608 
2609  if (ExpectRegister && !HasRegister) {
2610  return TokError("specified " + Mnemonic + " op requires a register");
2611  }
2612  else if (!ExpectRegister && HasRegister) {
2613  return TokError("specified " + Mnemonic + " op does not use a register");
2614  }
2615 
2616  Parser.Lex(); // Consume the EndOfStatement
2617  return false;
2618 }
2619 
2620 AArch64AsmParser::OperandMatchResultTy
2621 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2622  MCAsmParser &Parser = getParser();
2623  const AsmToken &Tok = Parser.getTok();
2624 
2625  // Can be either a #imm style literal or an option name
2626  bool Hash = Tok.is(AsmToken::Hash);
2627  if (Hash || Tok.is(AsmToken::Integer)) {
2628  // Immediate operand.
2629  if (Hash)
2630  Parser.Lex(); // Eat the '#'
2631  const MCExpr *ImmVal;
2632  SMLoc ExprLoc = getLoc();
2633  if (getParser().parseExpression(ImmVal))
2634  return MatchOperand_ParseFail;
2635  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2636  if (!MCE) {
2637  Error(ExprLoc, "immediate value expected for barrier operand");
2638  return MatchOperand_ParseFail;
2639  }
2640  if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2641  Error(ExprLoc, "barrier operand out of range");
2642  return MatchOperand_ParseFail;
2643  }
2644  bool Valid;
2645  auto Mapper = AArch64DB::DBarrierMapper();
2646  StringRef Name =
2647  Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2648  Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2649  ExprLoc, getContext()));
2650  return MatchOperand_Success;
2651  }
2652 
2653  if (Tok.isNot(AsmToken::Identifier)) {
2654  TokError("invalid operand for instruction");
2655  return MatchOperand_ParseFail;
2656  }
2657 
2658  bool Valid;
2659  auto Mapper = AArch64DB::DBarrierMapper();
2660  unsigned Opt =
2661  Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2662  if (!Valid) {
2663  TokError("invalid barrier option name");
2664  return MatchOperand_ParseFail;
2665  }
2666 
2667  // The only valid named option for ISB is 'sy'
2668  if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2669  TokError("'sy' or #imm operand expected");
2670  return MatchOperand_ParseFail;
2671  }
2672 
2673  Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2674  getLoc(), getContext()));
2675  Parser.Lex(); // Consume the option
2676 
2677  return MatchOperand_Success;
2678 }
2679 
2680 AArch64AsmParser::OperandMatchResultTy
2681 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2682  MCAsmParser &Parser = getParser();
2683  const AsmToken &Tok = Parser.getTok();
2684 
2685  if (Tok.isNot(AsmToken::Identifier))
2686  return MatchOperand_NoMatch;
2687 
2688  bool IsKnown;
2689  auto MRSMapper = AArch64SysReg::MRSMapper();
2690  uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2691  IsKnown);
2692  assert(IsKnown == (MRSReg != -1U) &&
2693  "register should be -1 if and only if it's unknown");
2694 
2695  auto MSRMapper = AArch64SysReg::MSRMapper();
2696  uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2697  IsKnown);
2698  assert(IsKnown == (MSRReg != -1U) &&
2699  "register should be -1 if and only if it's unknown");
2700 
2701  auto PStateMapper = AArch64PState::PStateMapper();
2702  uint32_t PStateField =
2703  PStateMapper.fromString(Tok.getString(), STI.getFeatureBits(), IsKnown);
2704  assert(IsKnown == (PStateField != -1U) &&
2705  "register should be -1 if and only if it's unknown");
2706 
2707  Operands.push_back(AArch64Operand::CreateSysReg(
2708  Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2709  Parser.Lex(); // Eat identifier
2710 
2711  return MatchOperand_Success;
2712 }
2713 
2714 /// tryParseVectorRegister - Parse a vector register operand.
2715 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2716  MCAsmParser &Parser = getParser();
2717  if (Parser.getTok().isNot(AsmToken::Identifier))
2718  return true;
2719 
2720  SMLoc S = getLoc();
2721  // Check for a vector register specifier first.
2722  StringRef Kind;
2723  int64_t Reg = tryMatchVectorRegister(Kind, false);
2724  if (Reg == -1)
2725  return true;
2726  Operands.push_back(
2727  AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2728  // If there was an explicit qualifier, that goes on as a literal text
2729  // operand.
2730  if (!Kind.empty())
2731  Operands.push_back(
2732  AArch64Operand::CreateToken(Kind, false, S, getContext()));
2733 
2734  // If there is an index specifier following the register, parse that too.
2735  if (Parser.getTok().is(AsmToken::LBrac)) {
2736  SMLoc SIdx = getLoc();
2737  Parser.Lex(); // Eat left bracket token.
2738 
2739  const MCExpr *ImmVal;
2740  if (getParser().parseExpression(ImmVal))
2741  return false;
2742  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2743  if (!MCE) {
2744  TokError("immediate value expected for vector index");
2745  return false;
2746  }
2747 
2748  SMLoc E = getLoc();
2749  if (Parser.getTok().isNot(AsmToken::RBrac)) {
2750  Error(E, "']' expected");
2751  return false;
2752  }
2753 
2754  Parser.Lex(); // Eat right bracket token.
2755 
2756  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2757  E, getContext()));
2758  }
2759 
2760  return false;
2761 }
2762 
2763 /// parseRegister - Parse a non-vector register operand.
2764 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2765  MCAsmParser &Parser = getParser();
2766  SMLoc S = getLoc();
2767  // Try for a vector register.
2768  if (!tryParseVectorRegister(Operands))
2769  return false;
2770 
2771  // Try for a scalar register.
2772  int64_t Reg = tryParseRegister();
2773  if (Reg == -1)
2774  return true;
2775  Operands.push_back(
2776  AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2777 
2778  // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2779  // as a string token in the instruction itself.
2780  if (getLexer().getKind() == AsmToken::LBrac) {
2781  SMLoc LBracS = getLoc();
2782  Parser.Lex();
2783  const AsmToken &Tok = Parser.getTok();
2784  if (Tok.is(AsmToken::Integer)) {
2785  SMLoc IntS = getLoc();
2786  int64_t Val = Tok.getIntVal();
2787  if (Val == 1) {
2788  Parser.Lex();
2789  if (getLexer().getKind() == AsmToken::RBrac) {
2790  SMLoc RBracS = getLoc();
2791  Parser.Lex();
2792  Operands.push_back(
2793  AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2794  Operands.push_back(
2795  AArch64Operand::CreateToken("1", false, IntS, getContext()));
2796  Operands.push_back(
2797  AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2798  return false;
2799  }
2800  }
2801  }
2802  }
2803 
2804  return false;
2805 }
2806 
2807 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2808  MCAsmParser &Parser = getParser();
2809  bool HasELFModifier = false;
2811 
2812  if (Parser.getTok().is(AsmToken::Colon)) {
2813  Parser.Lex(); // Eat ':"
2814  HasELFModifier = true;
2815 
2816  if (Parser.getTok().isNot(AsmToken::Identifier)) {
2817  Error(Parser.getTok().getLoc(),
2818  "expect relocation specifier in operand after ':'");
2819  return true;
2820  }
2821 
2822  std::string LowerCase = Parser.getTok().getIdentifier().lower();
2823  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2824  .Case("lo12", AArch64MCExpr::VK_LO12)
2825  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2826  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2827  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2828  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2829  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2830  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2831  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2832  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2833  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2834  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2835  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2836  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2837  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2838  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2839  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2840  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2841  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2842  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2843  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2844  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2845  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2846  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2847  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2848  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2849  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2850  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2851  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2853  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2855  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2856  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2857  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2860 
2861  if (RefKind == AArch64MCExpr::VK_INVALID) {
2862  Error(Parser.getTok().getLoc(),
2863  "expect relocation specifier in operand after ':'");
2864  return true;
2865  }
2866 
2867  Parser.Lex(); // Eat identifier
2868 
2869  if (Parser.getTok().isNot(AsmToken::Colon)) {
2870  Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2871  return true;
2872  }
2873  Parser.Lex(); // Eat ':'
2874  }
2875 
2876  if (getParser().parseExpression(ImmVal))
2877  return true;
2878 
2879  if (HasELFModifier)
2880  ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2881 
2882  return false;
2883 }
2884 
2885 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2886 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2887  MCAsmParser &Parser = getParser();
2888  assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2889  SMLoc S = getLoc();
2890  Parser.Lex(); // Eat left bracket token.
2891  StringRef Kind;
2892  int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2893  if (FirstReg == -1)
2894  return true;
2895  int64_t PrevReg = FirstReg;
2896  unsigned Count = 1;
2897 
2898  if (Parser.getTok().is(AsmToken::Minus)) {
2899  Parser.Lex(); // Eat the minus.
2900 
2901  SMLoc Loc = getLoc();
2902  StringRef NextKind;
2903  int64_t Reg = tryMatchVectorRegister(NextKind, true);
2904  if (Reg == -1)
2905  return true;
2906  // Any Kind suffices must match on all regs in the list.
2907  if (Kind != NextKind)
2908  return Error(Loc, "mismatched register size suffix");
2909 
2910  unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2911 
2912  if (Space == 0 || Space > 3) {
2913  return Error(Loc, "invalid number of vectors");
2914  }
2915 
2916  Count += Space;
2917  }
2918  else {
2919  while (Parser.getTok().is(AsmToken::Comma)) {
2920  Parser.Lex(); // Eat the comma token.
2921 
2922  SMLoc Loc = getLoc();
2923  StringRef NextKind;
2924  int64_t Reg = tryMatchVectorRegister(NextKind, true);
2925  if (Reg == -1)
2926  return true;
2927  // Any Kind suffices must match on all regs in the list.
2928  if (Kind != NextKind)
2929  return Error(Loc, "mismatched register size suffix");
2930 
2931  // Registers must be incremental (with wraparound at 31)
2932  if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2933  (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2934  return Error(Loc, "registers must be sequential");
2935 
2936  PrevReg = Reg;
2937  ++Count;
2938  }
2939  }
2940 
2941  if (Parser.getTok().isNot(AsmToken::RCurly))
2942  return Error(getLoc(), "'}' expected");
2943  Parser.Lex(); // Eat the '}' token.
2944 
2945  if (Count > 4)
2946  return Error(S, "invalid number of vectors");
2947 
2948  unsigned NumElements = 0;
2949  char ElementKind = 0;
2950  if (!Kind.empty())
2951  parseValidVectorKind(Kind, NumElements, ElementKind);
2952 
2953  Operands.push_back(AArch64Operand::CreateVectorList(
2954  FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2955 
2956  // If there is an index specifier following the list, parse that too.
2957  if (Parser.getTok().is(AsmToken::LBrac)) {
2958  SMLoc SIdx = getLoc();
2959  Parser.Lex(); // Eat left bracket token.
2960 
2961  const MCExpr *ImmVal;
2962  if (getParser().parseExpression(ImmVal))
2963  return false;
2964  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2965  if (!MCE) {
2966  TokError("immediate value expected for vector index");
2967  return false;
2968  }
2969 
2970  SMLoc E = getLoc();
2971  if (Parser.getTok().isNot(AsmToken::RBrac)) {
2972  Error(E, "']' expected");
2973  return false;
2974  }
2975 
2976  Parser.Lex(); // Eat right bracket token.
2977 
2978  Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2979  E, getContext()));
2980  }
2981  return false;
2982 }
2983 
2984 AArch64AsmParser::OperandMatchResultTy
2985 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2986  MCAsmParser &Parser = getParser();
2987  const AsmToken &Tok = Parser.getTok();
2988  if (!Tok.is(AsmToken::Identifier))
2989  return MatchOperand_NoMatch;
2990 
2991  unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2992 
2993  MCContext &Ctx = getContext();
2994  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2995  if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2996  return MatchOperand_NoMatch;
2997 
2998  SMLoc S = getLoc();
2999  Parser.Lex(); // Eat register
3000 
3001  if (Parser.getTok().isNot(AsmToken::Comma)) {
3002  Operands.push_back(
3003  AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3004  return MatchOperand_Success;
3005  }
3006  Parser.Lex(); // Eat comma.
3007 
3008  if (Parser.getTok().is(AsmToken::Hash))
3009  Parser.Lex(); // Eat hash
3010 
3011  if (Parser.getTok().isNot(AsmToken::Integer)) {
3012  Error(getLoc(), "index must be absent or #0");
3013  return MatchOperand_ParseFail;
3014  }
3015 
3016  const MCExpr *ImmVal;
3017  if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3018  cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3019  Error(getLoc(), "index must be absent or #0");
3020  return MatchOperand_ParseFail;
3021  }
3022 
3023  Operands.push_back(
3024  AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3025  return MatchOperand_Success;
3026 }
3027 
3028 /// parseOperand - Parse a arm instruction operand. For now this parses the
3029 /// operand regardless of the mnemonic.
3030 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3031  bool invertCondCode) {
3032  MCAsmParser &Parser = getParser();
3033  // Check if the current operand has a custom associated parser, if so, try to
3034  // custom parse the operand, or fallback to the general approach.
3035  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3036  if (ResTy == MatchOperand_Success)
3037  return false;
3038  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3039  // there was a match, but an error occurred, in which case, just return that
3040  // the operand parsing failed.
3041  if (ResTy == MatchOperand_ParseFail)
3042  return true;
3043 
3044  // Nothing custom, so do general case parsing.
3045  SMLoc S, E;
3046  switch (getLexer().getKind()) {
3047  default: {
3048  SMLoc S = getLoc();
3049  const MCExpr *Expr;
3050  if (parseSymbolicImmVal(Expr))
3051  return Error(S, "invalid operand");
3052 
3053  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3054  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3055  return false;
3056  }
3057  case AsmToken::LBrac: {
3058  SMLoc Loc = Parser.getTok().getLoc();
3059  Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3060  getContext()));
3061  Parser.Lex(); // Eat '['
3062 
3063  // There's no comma after a '[', so we can parse the next operand
3064  // immediately.
3065  return parseOperand(Operands, false, false);
3066  }
3067  case AsmToken::LCurly:
3068  return parseVectorList(Operands);
3069  case AsmToken::Identifier: {
3070  // If we're expecting a Condition Code operand, then just parse that.
3071  if (isCondCode)
3072  return parseCondCode(Operands, invertCondCode);
3073 
3074  // If it's a register name, parse it.
3075  if (!parseRegister(Operands))
3076  return false;
3077 
3078  // This could be an optional "shift" or "extend" operand.
3079  OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3080  // We can only continue if no tokens were eaten.
3081  if (GotShift != MatchOperand_NoMatch)
3082  return GotShift;
3083 
3084  // This was not a register so parse other operands that start with an
3085  // identifier (like labels) as expressions and create them as immediates.
3086  const MCExpr *IdVal;
3087  S = getLoc();
3088  if (getParser().parseExpression(IdVal))
3089  return true;
3090 
3091  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3092  Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3093  return false;
3094  }
3095  case AsmToken::Integer:
3096  case AsmToken::Real:
3097  case AsmToken::Hash: {
3098  // #42 -> immediate.
3099  S = getLoc();
3100  if (getLexer().is(AsmToken::Hash))
3101  Parser.Lex();
3102 
3103  // Parse a negative sign
3104  bool isNegative = false;
3105  if (Parser.getTok().is(AsmToken::Minus)) {
3106  isNegative = true;
3107  // We need to consume this token only when we have a Real, otherwise
3108  // we let parseSymbolicImmVal take care of it
3109  if (Parser.getLexer().peekTok().is(AsmToken::Real))
3110  Parser.Lex();
3111  }
3112 
3113  // The only Real that should come through here is a literal #0.0 for
3114  // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3115  // so convert the value.
3116  const AsmToken &Tok = Parser.getTok();
3117  if (Tok.is(AsmToken::Real)) {
3118  APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3119  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3120  if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3121  Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3122  Mnemonic != "fcmlt")
3123  return TokError("unexpected floating point literal");
3124  else if (IntVal != 0 || isNegative)
3125  return TokError("expected floating-point constant #0.0");
3126  Parser.Lex(); // Eat the token.
3127 
3128  Operands.push_back(
3129  AArch64Operand::CreateToken("#0", false, S, getContext()));
3130  Operands.push_back(
3131  AArch64Operand::CreateToken(".0", false, S, getContext()));
3132  return false;
3133  }
3134 
3135  const MCExpr *ImmVal;
3136  if (parseSymbolicImmVal(ImmVal))
3137  return true;
3138 
3139  E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3140  Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3141  return false;
3142  }
3143  case AsmToken::Equal: {
3144  SMLoc Loc = Parser.getTok().getLoc();
3145  if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3146  return Error(Loc, "unexpected token in operand");
3147  Parser.Lex(); // Eat '='
3148  const MCExpr *SubExprVal;
3149  if (getParser().parseExpression(SubExprVal))
3150  return true;
3151 
3152  if (Operands.size() < 2 ||
3153  !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3154  return true;
3155 
3156  bool IsXReg =
3157  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3158  Operands[1]->getReg());
3159 
3160  MCContext& Ctx = getContext();
3161  E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3162  // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3163  if (isa<MCConstantExpr>(SubExprVal)) {
3164  uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3165  uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3166  while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3167  ShiftAmt += 16;
3168  Imm >>= 16;
3169  }
3170  if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3171  Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3172  Operands.push_back(AArch64Operand::CreateImm(
3173  MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3174  if (ShiftAmt)
3175  Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3176  ShiftAmt, true, S, E, Ctx));
3177  return false;
3178  }
3179  APInt Simm = APInt(64, Imm << ShiftAmt);
3180  // check if the immediate is an unsigned or signed 32-bit int for W regs
3181  if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3182  return Error(Loc, "Immediate too large for register");
3183  }
3184  // If it is a label or an imm that cannot fit in a movz, put it into CP.
3185  const MCExpr *CPLoc =
3186  getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3187  Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3188  return false;
3189  }
3190  }
3191 }
3192 
3193 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3194 /// operands.
3195 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3196  StringRef Name, SMLoc NameLoc,
3197  OperandVector &Operands) {
3198  MCAsmParser &Parser = getParser();
3199  Name = StringSwitch<StringRef>(Name.lower())
3200  .Case("beq", "b.eq")
3201  .Case("bne", "b.ne")
3202  .Case("bhs", "b.hs")
3203  .Case("bcs", "b.cs")
3204  .Case("blo", "b.lo")
3205  .Case("bcc", "b.cc")
3206  .Case("bmi", "b.mi")
3207  .Case("bpl", "b.pl")
3208  .Case("bvs", "b.vs")
3209  .Case("bvc", "b.vc")
3210  .Case("bhi", "b.hi")
3211  .Case("bls", "b.ls")
3212  .Case("bge", "b.ge")
3213  .Case("blt", "b.lt")
3214  .Case("bgt", "b.gt")
3215  .Case("ble", "b.le")
3216  .Case("bal", "b.al")
3217  .Case("bnv", "b.nv")
3218  .Default(Name);
3219 
3220  // First check for the AArch64-specific .req directive.
3221  if (Parser.getTok().is(AsmToken::Identifier) &&
3222  Parser.getTok().getIdentifier() == ".req") {
3223  parseDirectiveReq(Name, NameLoc);
3224  // We always return 'error' for this, as we're done with this
3225  // statement and don't need to match the 'instruction."
3226  return true;
3227  }
3228 
3229  // Create the leading tokens for the mnemonic, split by '.' characters.
3230  size_t Start = 0, Next = Name.find('.');
3231  StringRef Head = Name.slice(Start, Next);
3232 
3233  // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3234  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3235  bool IsError = parseSysAlias(Head, NameLoc, Operands);
3236  if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3237  Parser.eatToEndOfStatement();
3238  return IsError;
3239  }
3240 
3241  Operands.push_back(
3242  AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3243  Mnemonic = Head;
3244 
3245  // Handle condition codes for a branch mnemonic
3246  if (Head == "b" && Next != StringRef::npos) {
3247  Start = Next;
3248  Next = Name.find('.', Start + 1);
3249  Head = Name.slice(Start + 1, Next);
3250 
3251  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3252  (Head.data() - Name.data()));
3253  AArch64CC::CondCode CC = parseCondCodeString(Head);
3254  if (CC == AArch64CC::Invalid)
3255  return Error(SuffixLoc, "invalid condition code");
3256  Operands.push_back(
3257  AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3258  Operands.push_back(
3259  AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3260  }
3261 
3262  // Add the remaining tokens in the mnemonic.
3263  while (Next != StringRef::npos) {
3264  Start = Next;
3265  Next = Name.find('.', Start + 1);
3266  Head = Name.slice(Start, Next);
3267  SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3268  (Head.data() - Name.data()) + 1);
3269  Operands.push_back(
3270  AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3271  }
3272 
3273  // Conditional compare instructions have a Condition Code operand, which needs
3274  // to be parsed and an immediate operand created.
3275  bool condCodeFourthOperand =
3276  (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3277  Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3278  Head == "csinc" || Head == "csinv" || Head == "csneg");
3279 
3280  // These instructions are aliases to some of the conditional select
3281  // instructions. However, the condition code is inverted in the aliased
3282  // instruction.
3283  //
3284  // FIXME: Is this the correct way to handle these? Or should the parser
3285  // generate the aliased instructions directly?
3286  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3287  bool condCodeThirdOperand =
3288  (Head == "cinc" || Head == "cinv" || Head == "cneg");
3289 
3290  // Read the remaining operands.
3291  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3292  // Read the first operand.
3293  if (parseOperand(Operands, false, false)) {
3294  Parser.eatToEndOfStatement();
3295  return true;
3296  }
3297 
3298  unsigned N = 2;
3299  while (getLexer().is(AsmToken::Comma)) {
3300  Parser.Lex(); // Eat the comma.
3301 
3302  // Parse and remember the operand.
3303  if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3304  (N == 3 && condCodeThirdOperand) ||
3305  (N == 2 && condCodeSecondOperand),
3306  condCodeSecondOperand || condCodeThirdOperand)) {
3307  Parser.eatToEndOfStatement();
3308  return true;
3309  }
3310 
3311  // After successfully parsing some operands there are two special cases to
3312  // consider (i.e. notional operands not separated by commas). Both are due
3313  // to memory specifiers:
3314  // + An RBrac will end an address for load/store/prefetch
3315  // + An '!' will indicate a pre-indexed operation.
3316  //
3317  // It's someone else's responsibility to make sure these tokens are sane
3318  // in the given context!
3319  if (Parser.getTok().is(AsmToken::RBrac)) {
3320  SMLoc Loc = Parser.getTok().getLoc();
3321  Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3322  getContext()));
3323  Parser.Lex();
3324  }
3325 
3326  if (Parser.getTok().is(AsmToken::Exclaim)) {
3327  SMLoc Loc = Parser.getTok().getLoc();
3328  Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3329  getContext()));
3330  Parser.Lex();
3331  }
3332 
3333  ++N;
3334  }
3335  }
3336 
3337  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3338  SMLoc Loc = Parser.getTok().getLoc();
3339  Parser.eatToEndOfStatement();
3340  return Error(Loc, "unexpected token in argument list");
3341  }
3342 
3343  Parser.Lex(); // Consume the EndOfStatement
3344  return false;
3345 }
3346 
3347 // FIXME: This entire function is a giant hack to provide us with decent
3348 // operand range validation/diagnostics until TableGen/MC can be extended
3349 // to support autogeneration of this kind of validation.
3350 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3351  SmallVectorImpl<SMLoc> &Loc) {
3352  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3353  // Check for indexed addressing modes w/ the base register being the
3354  // same as a destination/source register or pair load where
3355  // the Rt == Rt2. All of those are undefined behaviour.
3356  switch (Inst.getOpcode()) {
3357  case AArch64::LDPSWpre:
3358  case AArch64::LDPWpost:
3359  case AArch64::LDPWpre:
3360  case AArch64::LDPXpost:
3361  case AArch64::LDPXpre: {
3362  unsigned Rt = Inst.getOperand(1).getReg();
3363  unsigned Rt2 = Inst.getOperand(2).getReg();
3364  unsigned Rn = Inst.getOperand(3).getReg();
3365  if (RI->isSubRegisterEq(Rn, Rt))
3366  return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3367  "is also a destination");
3368  if (RI->isSubRegisterEq(Rn, Rt2))
3369  return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3370  "is also a destination");
3371  // FALLTHROUGH
3372  }
3373  case AArch64::LDPDi:
3374  case AArch64::LDPQi:
3375  case AArch64::LDPSi:
3376  case AArch64::LDPSWi:
3377  case AArch64::LDPWi:
3378  case AArch64::LDPXi: {
3379  unsigned Rt = Inst.getOperand(0).getReg();
3380  unsigned Rt2 = Inst.getOperand(1).getReg();
3381  if (Rt == Rt2)
3382  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3383  break;
3384  }
3385  case AArch64::LDPDpost:
3386  case AArch64::LDPDpre:
3387  case AArch64::LDPQpost:
3388  case AArch64::LDPQpre:
3389  case AArch64::LDPSpost:
3390  case AArch64::LDPSpre:
3391  case AArch64::LDPSWpost: {
3392  unsigned Rt = Inst.getOperand(1).getReg();
3393  unsigned Rt2 = Inst.getOperand(2).getReg();
3394  if (Rt == Rt2)
3395  return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3396  break;
3397  }
3398  case AArch64::STPDpost:
3399  case AArch64::STPDpre:
3400  case AArch64::STPQpost:
3401  case AArch64::STPQpre:
3402  case AArch64::STPSpost:
3403  case AArch64::STPSpre:
3404  case AArch64::STPWpost:
3405  case AArch64::STPWpre:
3406  case AArch64::STPXpost:
3407  case AArch64::STPXpre: {
3408  unsigned Rt = Inst.getOperand(1).getReg();
3409  unsigned Rt2 = Inst.getOperand(2).getReg();
3410  unsigned Rn = Inst.getOperand(3).getReg();
3411  if (RI->isSubRegisterEq(Rn, Rt))
3412  return Error(Loc[0], "unpredictable STP instruction, writeback base "
3413  "is also a source");
3414  if (RI->isSubRegisterEq(Rn, Rt2))
3415  return Error(Loc[1], "unpredictable STP instruction, writeback base "
3416  "is also a source");
3417  break;
3418  }
3419  case AArch64::LDRBBpre:
3420  case AArch64::LDRBpre:
3421  case AArch64::LDRHHpre:
3422  case AArch64::LDRHpre:
3423  case AArch64::LDRSBWpre:
3424  case AArch64::LDRSBXpre:
3425  case AArch64::LDRSHWpre:
3426  case AArch64::LDRSHXpre:
3427  case AArch64::LDRSWpre:
3428  case AArch64::LDRWpre:
3429  case AArch64::LDRXpre:
3430  case AArch64::LDRBBpost:
3431  case AArch64::LDRBpost:
3432  case AArch64::LDRHHpost:
3433  case AArch64::LDRHpost:
3434  case AArch64::LDRSBWpost:
3435  case AArch64::LDRSBXpost:
3436  case AArch64::LDRSHWpost:
3437  case AArch64::LDRSHXpost:
3438  case AArch64::LDRSWpost:
3439  case AArch64::LDRWpost:
3440  case AArch64::LDRXpost: {
3441  unsigned Rt = Inst.getOperand(1).getReg();
3442  unsigned Rn = Inst.getOperand(2).getReg();
3443  if (RI->isSubRegisterEq(Rn, Rt))
3444  return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3445  "is also a source");
3446  break;
3447  }
3448  case AArch64::STRBBpost:
3449  case AArch64::STRBpost:
3450  case AArch64::STRHHpost:
3451  case AArch64::STRHpost:
3452  case AArch64::STRWpost:
3453  case AArch64::STRXpost:
3454  case AArch64::STRBBpre:
3455  case AArch64::STRBpre:
3456  case AArch64::STRHHpre:
3457  case AArch64::STRHpre:
3458  case AArch64::STRWpre:
3459  case AArch64::STRXpre: {
3460  unsigned Rt = Inst.getOperand(1).getReg();
3461  unsigned Rn = Inst.getOperand(2).getReg();
3462  if (RI->isSubRegisterEq(Rn, Rt))
3463  return Error(Loc[0], "unpredictable STR instruction, writeback base "
3464  "is also a source");
3465  break;
3466  }
3467  }
3468 
3469  // Now check immediate ranges. Separate from the above as there is overlap
3470  // in the instructions being checked and this keeps the nested conditionals
3471  // to a minimum.
3472  switch (Inst.getOpcode()) {
3473  case AArch64::ADDSWri:
3474  case AArch64::ADDSXri:
3475  case AArch64::ADDWri:
3476  case AArch64::ADDXri:
3477  case AArch64::SUBSWri:
3478  case AArch64::SUBSXri:
3479  case AArch64::SUBWri:
3480  case AArch64::SUBXri: {
3481  // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3482  // some slight duplication here.
3483  if (Inst.getOperand(2).isExpr()) {
3484  const MCExpr *Expr = Inst.getOperand(2).getExpr();
3485  AArch64MCExpr::VariantKind ELFRefKind;
3486  MCSymbolRefExpr::VariantKind DarwinRefKind;
3487  int64_t Addend;
3488  if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3489  return Error(Loc[2], "invalid immediate expression");
3490  }
3491 
3492  // Only allow these with ADDXri.
3493  if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3494  DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3495  Inst.getOpcode() == AArch64::ADDXri)
3496  return false;
3497 
3498  // Only allow these with ADDXri/ADDWri
3499  if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3500  ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3501  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3502  ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3503  ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3504  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3505  ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3506  ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3507  (Inst.getOpcode() == AArch64::ADDXri ||
3508  Inst.getOpcode() == AArch64::ADDWri))
3509  return false;
3510 
3511  // Don't allow expressions in the immediate field otherwise
3512  return Error(Loc[2], "invalid immediate expression");
3513  }
3514  return false;
3515  }
3516  default:
3517  return false;
3518  }
3519 }
3520 
3521 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3522  switch (ErrCode) {
3523  case Match_MissingFeature:
3524  return Error(Loc,
3525  "instruction requires a CPU feature not currently enabled");
3526  case Match_InvalidOperand:
3527  return Error(Loc, "invalid operand for instruction");
3528  case Match_InvalidSuffix:
3529  return Error(Loc, "invalid type suffix for instruction");
3530  case Match_InvalidCondCode:
3531  return Error(Loc, "expected AArch64 condition code");
3532  case Match_AddSubRegExtendSmall:
3533  return Error(Loc,
3534  "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3535  case Match_AddSubRegExtendLarge:
3536  return Error(Loc,
3537  "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3538  case Match_AddSubSecondSource:
3539  return Error(Loc,
3540  "expected compatible register, symbol or integer in range [0, 4095]");
3541  case Match_LogicalSecondSource:
3542  return Error(Loc, "expected compatible register or logical immediate");
3543  case Match_InvalidMovImm32Shift:
3544  return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3545  case Match_InvalidMovImm64Shift:
3546  return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3547  case Match_AddSubRegShift32:
3548  return Error(Loc,
3549  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3550  case Match_AddSubRegShift64:
3551  return Error(Loc,
3552  "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3553  case Match_InvalidFPImm:
3554  return Error(Loc,
3555  "expected compatible register or floating-point constant");
3556  case Match_InvalidMemoryIndexedSImm9:
3557  return Error(Loc, "index must be an integer in range [-256, 255].");
3558  case Match_InvalidMemoryIndexed4SImm7:
3559  return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3560  case Match_InvalidMemoryIndexed8SImm7:
3561  return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3562  case Match_InvalidMemoryIndexed16SImm7:
3563  return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3564  case Match_InvalidMemoryWExtend8:
3565  return Error(Loc,
3566  "expected 'uxtw' or 'sxtw' with optional shift of #0");
3567  case Match_InvalidMemoryWExtend16:
3568  return Error(Loc,
3569  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3570  case Match_InvalidMemoryWExtend32:
3571  return Error(Loc,
3572  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3573  case Match_InvalidMemoryWExtend64:
3574  return Error(Loc,
3575  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3576  case Match_InvalidMemoryWExtend128:
3577  return Error(Loc,
3578  "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3579  case Match_InvalidMemoryXExtend8:
3580  return Error(Loc,
3581  "expected 'lsl' or 'sxtx' with optional shift of #0");
3582  case Match_InvalidMemoryXExtend16:
3583  return Error(Loc,
3584  "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3585  case Match_InvalidMemoryXExtend32:
3586  return Error(Loc,
3587  "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3588  case Match_InvalidMemoryXExtend64:
3589  return Error(Loc,
3590  "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3591  case Match_InvalidMemoryXExtend128:
3592  return Error(Loc,
3593  "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3594  case Match_InvalidMemoryIndexed1:
3595  return Error(Loc, "index must be an integer in range [0, 4095].");
3596  case Match_InvalidMemoryIndexed2:
3597  return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3598  case Match_InvalidMemoryIndexed4:
3599  return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3600  case Match_InvalidMemoryIndexed8:
3601  return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3602  case Match_InvalidMemoryIndexed16:
3603  return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3604  case Match_InvalidImm0_7:
3605  return Error(Loc, "immediate must be an integer in range [0, 7].");
3606  case Match_InvalidImm0_15:
3607  return Error(Loc, "immediate must be an integer in range [0, 15].");
3608  case Match_InvalidImm0_31:
3609  return Error(Loc, "immediate must be an integer in range [0, 31].");
3610  case Match_InvalidImm0_63:
3611  return Error(Loc, "immediate must be an integer in range [0, 63].");
3612  case Match_InvalidImm0_127:
3613  return Error(Loc, "immediate must be an integer in range [0, 127].");
3614  case Match_InvalidImm0_65535:
3615  return Error(Loc, "immediate must be an integer in range [0, 65535].");
3616  case Match_InvalidImm1_8:
3617  return Error(Loc, "immediate must be an integer in range [1, 8].");
3618  case Match_InvalidImm1_16:
3619  return Error(Loc, "immediate must be an integer in range [1, 16].");
3620  case Match_InvalidImm1_32:
3621  return Error(Loc, "immediate must be an integer in range [1, 32].");
3622  case Match_InvalidImm1_64:
3623  return Error(Loc, "immediate must be an integer in range [1, 64].");
3624  case Match_InvalidIndex1:
3625  return Error(Loc, "expected lane specifier '[1]'");
3626  case Match_InvalidIndexB:
3627  return Error(Loc, "vector lane must be an integer in range [0, 15].");
3628  case Match_InvalidIndexH:
3629  return Error(Loc, "vector lane must be an integer in range [0, 7].");
3630  case Match_InvalidIndexS:
3631  return Error(Loc, "vector lane must be an integer in range [0, 3].");
3632  case Match_InvalidIndexD:
3633  return Error(Loc, "vector lane must be an integer in range [0, 1].");
3634  case Match_InvalidLabel:
3635  return Error(Loc, "expected label or encodable integer pc offset");
3636  case Match_MRS:
3637  return Error(Loc, "expected readable system register");
3638  case Match_MSR:
3639  return Error(Loc, "expected writable system register or pstate");
3640  case Match_MnemonicFail:
3641  return Error(Loc, "unrecognized instruction mnemonic");
3642  default:
3643  llvm_unreachable("unexpected error code!");
3644  }
3645 }
3646 
3647 static const char *getSubtargetFeatureName(uint64_t Val);
3648 
3649 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3650  OperandVector &Operands,
3651  MCStreamer &Out,
3652  uint64_t &ErrorInfo,
3653  bool MatchingInlineAsm) {
3654  assert(!Operands.empty() && "Unexpect empty operand list!");
3655  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3656  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3657 
3658  StringRef Tok = Op.getToken();
3659  unsigned NumOperands = Operands.size();
3660 
3661  if (NumOperands == 4 && Tok == "lsl") {
3662  AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3663  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3664  if (Op2.isReg() && Op3.isImm()) {
3665  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3666  if (Op3CE) {
3667  uint64_t Op3Val = Op3CE->getValue();
3668  uint64_t NewOp3Val = 0;
3669  uint64_t NewOp4Val = 0;
3670  if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3671  Op2.getReg())) {
3672  NewOp3Val = (32 - Op3Val) & 0x1f;
3673  NewOp4Val = 31 - Op3Val;
3674  } else {
3675  NewOp3Val = (64 - Op3Val) & 0x3f;
3676  NewOp4Val = 63 - Op3Val;
3677  }
3678 
3679  const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3680  const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3681 
3682  Operands[0] = AArch64Operand::CreateToken(
3683  "ubfm", false, Op.getStartLoc(), getContext());
3684  Operands.push_back(AArch64Operand::CreateImm(
3685  NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3686  Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3687  Op3.getEndLoc(), getContext());
3688  }
3689  }
3690  } else if (NumOperands == 4 && Tok == "bfc") {
3691  // FIXME: Horrible hack to handle BFC->BFM alias.
3692  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3693  AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3694  AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3695 
3696  if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3697  const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3698  const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3699 
3700  if (LSBCE && WidthCE) {
3701  uint64_t LSB = LSBCE->getValue();
3702  uint64_t Width = WidthCE->getValue();
3703 
3704  uint64_t RegWidth = 0;
3705  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3706  Op1.getReg()))
3707  RegWidth = 64;
3708  else
3709  RegWidth = 32;
3710 
3711  if (LSB >= RegWidth)
3712  return Error(LSBOp.getStartLoc(),
3713  "expected integer in range [0, 31]");
3714  if (Width < 1 || Width > RegWidth)
3715  return Error(WidthOp.getStartLoc(),
3716  "expected integer in range [1, 32]");
3717 
3718  uint64_t ImmR = 0;
3719  if (RegWidth == 32)
3720  ImmR = (32 - LSB) & 0x1f;
3721  else
3722  ImmR = (64 - LSB) & 0x3f;
3723 
3724  uint64_t ImmS = Width - 1;
3725 
3726  if (ImmR != 0 && ImmS >= ImmR)
3727  return Error(WidthOp.getStartLoc(),
3728  "requested insert overflows register");
3729 
3730  const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3731  const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3732  Operands[0] = AArch64Operand::CreateToken(
3733  "bfm", false, Op.getStartLoc(), getContext());
3734  Operands[2] = AArch64Operand::CreateReg(
3735  RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3736  SMLoc(), getContext());
3737  Operands[3] = AArch64Operand::CreateImm(
3738  ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3739  Operands.emplace_back(
3740  AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3741  WidthOp.getEndLoc(), getContext()));
3742  }
3743  }
3744  } else if (NumOperands == 5) {
3745  // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3746  // UBFIZ -> UBFM aliases.
3747  if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3748  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3749  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3750  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3751 
3752  if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3753  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3754  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3755 
3756  if (Op3CE && Op4CE) {
3757  uint64_t Op3Val = Op3CE->getValue();
3758  uint64_t Op4Val = Op4CE->getValue();
3759 
3760  uint64_t RegWidth = 0;
3761  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3762  Op1.getReg()))
3763  RegWidth = 64;
3764  else
3765  RegWidth = 32;
3766 
3767  if (Op3Val >= RegWidth)
3768  return Error(Op3.getStartLoc(),
3769  "expected integer in range [0, 31]");
3770  if (Op4Val < 1 || Op4Val > RegWidth)
3771  return Error(Op4.getStartLoc(),
3772  "expected integer in range [1, 32]");
3773 
3774  uint64_t NewOp3Val = 0;
3775  if (RegWidth == 32)
3776  NewOp3Val = (32 - Op3Val) & 0x1f;
3777  else
3778  NewOp3Val = (64 - Op3Val) & 0x3f;
3779 
3780  uint64_t NewOp4Val = Op4Val - 1;
3781 
3782  if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3783  return Error(Op4.getStartLoc(),
3784  "requested insert overflows register");
3785 
3786  const MCExpr *NewOp3 =
3787  MCConstantExpr::create(NewOp3Val, getContext());
3788  const MCExpr *NewOp4 =
3789  MCConstantExpr::create(NewOp4Val, getContext());
3790  Operands[3] = AArch64Operand::CreateImm(
3791  NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3792  Operands[4] = AArch64Operand::CreateImm(
3793  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3794  if (Tok == "bfi")
3795  Operands[0] = AArch64Operand::CreateToken(
3796  "bfm", false, Op.getStartLoc(), getContext());
3797  else if (Tok == "sbfiz")
3798  Operands[0] = AArch64Operand::CreateToken(
3799  "sbfm", false, Op.getStartLoc(), getContext());
3800  else if (Tok == "ubfiz")
3801  Operands[0] = AArch64Operand::CreateToken(
3802  "ubfm", false, Op.getStartLoc(), getContext());
3803  else
3804  llvm_unreachable("No valid mnemonic for alias?");
3805  }
3806  }
3807 
3808  // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3809  // UBFX -> UBFM aliases.
3810  } else if (NumOperands == 5 &&
3811  (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3812  AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3813  AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3814  AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3815 
3816  if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3817  const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3818  const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3819 
3820  if (Op3CE && Op4CE) {
3821  uint64_t Op3Val = Op3CE->getValue();
3822  uint64_t Op4Val = Op4CE->getValue();
3823 
3824  uint64_t RegWidth = 0;
3825  if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3826  Op1.getReg()))
3827  RegWidth = 64;
3828  else
3829  RegWidth = 32;
3830 
3831  if (Op3Val >= RegWidth)
3832  return Error(Op3.getStartLoc(),
3833  "expected integer in range [0, 31]");
3834  if (Op4Val < 1 || Op4Val > RegWidth)
3835  return Error(Op4.getStartLoc(),
3836  "expected integer in range [1, 32]");
3837 
3838  uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3839 
3840  if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3841  return Error(Op4.getStartLoc(),
3842  "requested extract overflows register");
3843 
3844  const MCExpr *NewOp4 =
3845  MCConstantExpr::create(NewOp4Val, getContext());
3846  Operands[4] = AArch64Operand::CreateImm(
3847  NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3848  if (Tok == "bfxil")
3849  Operands[0] = AArch64Operand::CreateToken(
3850  "bfm", false, Op.getStartLoc(), getContext());
3851  else if (Tok == "sbfx")
3852  Operands[0] = AArch64Operand::CreateToken(
3853  "sbfm", false, Op.getStartLoc(), getContext());
3854  else if (Tok == "ubfx")
3855  Operands[0] = AArch64Operand::CreateToken(
3856  "ubfm", false, Op.getStartLoc(), getContext());
3857  else
3858  llvm_unreachable("No valid mnemonic for alias?");
3859  }
3860  }
3861  }
3862  }
3863  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3864  // InstAlias can't quite handle this since the reg classes aren't
3865  // subclasses.
3866  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3867  // The source register can be Wn here, but the matcher expects a
3868  // GPR64. Twiddle it here if necessary.
3869  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3870  if (Op.isReg()) {
3871  unsigned Reg = getXRegFromWReg(Op.getReg());
3872  Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3873  Op.getEndLoc(), getContext());
3874  }
3875  }
3876  // FIXME: Likewise for sxt[bh] with a Xd dst operand
3877  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3878  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3879  if (Op.isReg() &&
3880  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3881  Op.getReg())) {
3882  // The source register can be Wn here, but the matcher expects a
3883  // GPR64. Twiddle it here if necessary.
3884  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3885  if (Op.isReg()) {
3886  unsigned Reg = getXRegFromWReg(Op.getReg());
3887  Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3888  Op.getEndLoc(), getContext());
3889  }
3890  }
3891  }
3892  // FIXME: Likewise for uxt[bh] with a Xd dst operand
3893  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3894  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3895  if (Op.isReg() &&
3896  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3897  Op.getReg())) {
3898  // The source register can be Wn here, but the matcher expects a
3899  // GPR32. Twiddle it here if necessary.
3900  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3901  if (Op.isReg()) {
3902  unsigned Reg = getWRegFromXReg(Op.getReg());
3903  Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3904  Op.getEndLoc(), getContext());
3905  }
3906  }
3907  }
3908 
3909  // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3910  if (NumOperands == 3 && Tok == "fmov") {
3911  AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3912  AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3913  if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3914  unsigned zreg =
3915  AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3916  RegOp.getReg())
3917  ? AArch64::WZR
3918  : AArch64::XZR;
3919  Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3920  Op.getEndLoc(), getContext());
3921  }
3922  }
3923 
3924  MCInst Inst;
3925  // First try to match against the secondary set of tables containing the
3926  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3927  unsigned MatchResult =
3928  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3929 
3930  // If that fails, try against the alternate table containing long-form NEON:
3931  // "fadd v0.2s, v1.2s, v2.2s"
3932  if (MatchResult != Match_Success)
3933  MatchResult =
3934  MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3935 
3936  switch (MatchResult) {
3937  case Match_Success: {
3938  // Perform range checking and other semantic validations
3939  SmallVector<SMLoc, 8> OperandLocs;
3940  NumOperands = Operands.size();
3941  for (unsigned i = 1; i < NumOperands; ++i)
3942  OperandLocs.push_back(Operands[i]->getStartLoc());
3943  if (validateInstruction(Inst, OperandLocs))
3944  return true;
3945 
3946  Inst.setLoc(IDLoc);
3947  Out.EmitInstruction(Inst, STI);
3948  return false;
3949  }
3950  case Match_MissingFeature: {
3951  assert(ErrorInfo && "Unknown missing feature!");
3952  // Special case the error message for the very common case where only
3953  // a single subtarget feature is missing (neon, e.g.).
3954  std::string Msg = "instruction requires:";
3955  uint64_t Mask = 1;
3956  for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3957  if (ErrorInfo & Mask) {
3958  Msg += " ";
3959  Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3960  }
3961  Mask <<= 1;
3962  }
3963  return Error(IDLoc, Msg);
3964  }
3965  case Match_MnemonicFail:
3966  return showMatchError(IDLoc, MatchResult);
3967  case Match_InvalidOperand: {
3968  SMLoc ErrorLoc = IDLoc;
3969  if (ErrorInfo != ~0ULL) {
3970  if (ErrorInfo >= Operands.size())
3971  return Error(IDLoc, "too few operands for instruction");
3972 
3973  ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3974  if (ErrorLoc == SMLoc())
3975  ErrorLoc = IDLoc;
3976  }
3977  // If the match failed on a suffix token operand, tweak the diagnostic
3978  // accordingly.
3979  if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3980  ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3981  MatchResult = Match_InvalidSuffix;
3982 
3983  return showMatchError(ErrorLoc, MatchResult);
3984  }
3985  case Match_InvalidMemoryIndexed1:
3986  case Match_InvalidMemoryIndexed2:
3987  case Match_InvalidMemoryIndexed4:
3988  case Match_InvalidMemoryIndexed8:
3989  case Match_InvalidMemoryIndexed16:
3990  case Match_InvalidCondCode:
3991  case Match_AddSubRegExtendSmall:
3992  case Match_AddSubRegExtendLarge:
3993  case Match_AddSubSecondSource:
3994  case Match_LogicalSecondSource:
3995  case Match_AddSubRegShift32:
3996  case Match_AddSubRegShift64:
3997  case Match_InvalidMovImm32Shift:
3998  case Match_InvalidMovImm64Shift:
3999  case Match_InvalidFPImm:
4000  case Match_InvalidMemoryWExtend8:
4001  case Match_InvalidMemoryWExtend16:
4002  case Match_InvalidMemoryWExtend32:
4003  case Match_InvalidMemoryWExtend64:
4004  case Match_InvalidMemoryWExtend128:
4005  case Match_InvalidMemoryXExtend8:
4006  case Match_InvalidMemoryXExtend16:
4007  case Match_InvalidMemoryXExtend32:
4008  case Match_InvalidMemoryXExtend64:
4009  case Match_InvalidMemoryXExtend128:
4010  case Match_InvalidMemoryIndexed4SImm7:
4011  case Match_InvalidMemoryIndexed8SImm7:
4012  case Match_InvalidMemoryIndexed16SImm7:
4013  case Match_InvalidMemoryIndexedSImm9:
4014  case Match_InvalidImm0_7:
4015  case Match_InvalidImm0_15:
4016  case Match_InvalidImm0_31:
4017  case Match_InvalidImm0_63:
4018  case Match_InvalidImm0_127:
4019  case Match_InvalidImm0_65535:
4020  case Match_InvalidImm1_8:
4021  case Match_InvalidImm1_16:
4022  case Match_InvalidImm1_32:
4023  case Match_InvalidImm1_64:
4024  case Match_InvalidIndex1:
4025  case Match_InvalidIndexB:
4026  case Match_InvalidIndexH:
4027  case Match_InvalidIndexS:
4028  case Match_InvalidIndexD:
4029  case Match_InvalidLabel:
4030  case Match_MSR:
4031  case Match_MRS: {
4032  if (ErrorInfo >= Operands.size())
4033  return Error(IDLoc, "too few operands for instruction");
4034  // Any time we get here, there's nothing fancy to do. Just get the
4035  // operand SMLoc and display the diagnostic.
4036  SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4037  if (ErrorLoc == SMLoc())
4038  ErrorLoc = IDLoc;
4039  return showMatchError(ErrorLoc, MatchResult);
4040  }
4041  }
4042 
4043  llvm_unreachable("Implement any new match types added!");
4044 }
4045 
4046 /// ParseDirective parses the arm specific directives
4047 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4048  const MCObjectFileInfo::Environment Format =
4049  getContext().getObjectFileInfo()->getObjectFileType();
4050  bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4051  bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4052 
4053  StringRef IDVal = DirectiveID.getIdentifier();
4054  SMLoc Loc = DirectiveID.getLoc();
4055  if (IDVal == ".hword")
4056  return parseDirectiveWord(2, Loc);
4057  if (IDVal == ".word")
4058  return parseDirectiveWord(4, Loc);
4059  if (IDVal == ".xword")
4060  return parseDirectiveWord(8, Loc);
4061  if (IDVal == ".tlsdesccall")
4062  return parseDirectiveTLSDescCall(Loc);
4063  if (IDVal == ".ltorg" || IDVal == ".pool")
4064  return parseDirectiveLtorg(Loc);
4065  if (IDVal == ".unreq")
4066  return parseDirectiveUnreq(Loc);
4067 
4068  if (!IsMachO && !IsCOFF) {
4069  if (IDVal == ".inst")
4070  return parseDirectiveInst(Loc);
4071  }
4072 
4073  return parseDirectiveLOH(IDVal, Loc);
4074 }
4075 
4076 /// parseDirectiveWord
4077 /// ::= .word [ expression (, expression)* ]
4078 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4079  MCAsmParser &Parser = getParser();
4080  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4081  for (;;) {
4082  const MCExpr *Value;
4083  if (getParser().parseExpression(Value))
4084  return true;
4085 
4086  getParser().getStreamer().EmitValue(Value, Size);
4087 
4088  if (getLexer().is(AsmToken::EndOfStatement))
4089  break;
4090 
4091  // FIXME: Improve diagnostic.
4092  if (getLexer().isNot(AsmToken::Comma))
4093  return Error(L, "unexpected token in directive");
4094  Parser.Lex();
4095  }
4096  }
4097 
4098  Parser.Lex();
4099  return false;
4100 }
4101 
4102 /// parseDirectiveInst
4103 /// ::= .inst opcode [, ...]
4104 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4105  MCAsmParser &Parser = getParser();
4106  if (getLexer().is(AsmToken::EndOfStatement)) {
4107  Parser.eatToEndOfStatement();
4108  Error(Loc, "expected expression following directive");
4109  return false;
4110  }
4111 
4112  for (;;) {
4113  const MCExpr *Expr;
4114 
4115  if (getParser().parseExpression(Expr)) {
4116  Error(Loc, "expected expression");
4117  return false;
4118  }
4119 
4120  const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4121  if (!Value) {
4122  Error(Loc, "expected constant expression");
4123  return false;
4124  }
4125 
4126  getTargetStreamer().emitInst(Value->getValue());
4127 
4128  if (getLexer().is(AsmToken::EndOfStatement))
4129  break;
4130 
4131  if (getLexer().isNot(AsmToken::Comma)) {
4132  Error(Loc, "unexpected token in directive");
4133  return false;
4134  }
4135 
4136  Parser.Lex(); // Eat comma.
4137  }
4138 
4139  Parser.Lex();
4140  return false;
4141 }
4142 
4143 // parseDirectiveTLSDescCall:
4144 // ::= .tlsdesccall symbol
4145 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4146  StringRef Name;
4147  if (getParser().parseIdentifier(Name))
4148  return Error(L, "expected symbol after directive");
4149 
4150  MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4151  const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4152  Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4153 
4154  MCInst Inst;
4155  Inst.setOpcode(AArch64::TLSDESCCALL);
4156  Inst.addOperand(MCOperand::createExpr(Expr));
4157 
4158  getParser().getStreamer().EmitInstruction(Inst, STI);
4159  return false;
4160 }
4161 
4162 /// ::= .loh <lohName | lohId> label1, ..., labelN
4163 /// The number of arguments depends on the loh identifier.
4164 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4165  if (IDVal != MCLOHDirectiveName())
4166  return true;
4167  MCLOHType Kind;
4168  if (getParser().getTok().isNot(AsmToken::Identifier)) {
4169  if (getParser().getTok().isNot(AsmToken::Integer))
4170  return TokError("expected an identifier or a number in directive");
4171  // We successfully get a numeric value for the identifier.
4172  // Check if it is valid.
4173  int64_t Id = getParser().getTok().getIntVal();
4174  if (Id <= -1U && !isValidMCLOHType(Id))
4175  return TokError("invalid numeric identifier in directive");
4176  Kind = (MCLOHType)Id;
4177  } else {
4178  StringRef Name = getTok().getIdentifier();
4179  // We successfully parse an identifier.
4180  // Check if it is a recognized one.
4181  int Id = MCLOHNameToId(Name);
4182 
4183  if (Id == -1)
4184  return TokError("invalid identifier in directive");
4185  Kind = (MCLOHType)Id;
4186  }
4187  // Consume the identifier.
4188  Lex();
4189  // Get the number of arguments of this LOH.
4190  int NbArgs = MCLOHIdToNbArgs(Kind);
4191 
4192  assert(NbArgs != -1 && "Invalid number of arguments");
4193 
4195  for (int Idx = 0; Idx < NbArgs; ++Idx) {
4196  StringRef Name;
4197  if (getParser().parseIdentifier(Name))
4198  return TokError("expected identifier in directive");
4199  Args.push_back(getContext().getOrCreateSymbol(Name));
4200 
4201  if (Idx + 1 == NbArgs)
4202  break;
4203  if (getLexer().isNot(AsmToken::Comma))
4204  return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4205  Lex();
4206  }
4207  if (getLexer().isNot(AsmToken::EndOfStatement))
4208  return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4209 
4210  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4211  return false;
4212 }
4213 
4214 /// parseDirectiveLtorg
4215 /// ::= .ltorg | .pool
4216 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4217  getTargetStreamer().emitCurrentConstantPool();
4218  return false;
4219 }
4220 
4221 /// parseDirectiveReq
4222 /// ::= name .req registername
4223 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4224  MCAsmParser &Parser = getParser();
4225  Parser.Lex(); // Eat the '.req' token.
4226  SMLoc SRegLoc = getLoc();
4227  unsigned RegNum = tryParseRegister();
4228  bool IsVector = false;
4229 
4230  if (RegNum == static_cast<unsigned>(-1)) {
4231  StringRef Kind;
4232  RegNum = tryMatchVectorRegister(Kind, false);
4233  if (!Kind.empty()) {
4234  Error(SRegLoc, "vector register without type specifier expected");
4235  return false;
4236  }
4237  IsVector = true;
4238  }
4239 
4240  if (RegNum == static_cast<unsigned>(-1)) {
4241  Parser.eatToEndOfStatement();
4242  Error(SRegLoc, "register name or alias expected");
4243  return false;
4244  }
4245 
4246  // Shouldn't be anything else.
4247  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4248  Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4249  Parser.eatToEndOfStatement();
4250  return false;
4251  }
4252 
4253  Parser.Lex(); // Consume the EndOfStatement
4254 
4255  auto pair = std::make_pair(IsVector, RegNum);
4256  if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4257  Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4258 
4259  return true;
4260 }
4261 
4262 /// parseDirectiveUneq
4263 /// ::= .unreq registername
4264 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4265  MCAsmParser &Parser = getParser();
4266  if (Parser.getTok().isNot(AsmToken::Identifier)) {
4267  Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4268  Parser.eatToEndOfStatement();
4269  return false;
4270  }
4271  RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4272  Parser.Lex(); // Eat the identifier.
4273  return false;
4274 }
4275 
4276 bool
4277 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4278  AArch64MCExpr::VariantKind &ELFRefKind,
4279  MCSymbolRefExpr::VariantKind &DarwinRefKind,
4280  int64_t &Addend) {
4281  ELFRefKind = AArch64MCExpr::VK_INVALID;
4282  DarwinRefKind = MCSymbolRefExpr::VK_None;
4283  Addend = 0;
4284 
4285  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4286  ELFRefKind = AE->getKind();
4287  Expr = AE->getSubExpr();
4288  }
4289 
4290  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4291  if (SE) {
4292  // It's a simple symbol reference with no addend.
4293  DarwinRefKind = SE->getKind();
4294  return true;
4295  }
4296 
4297  const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4298  if (!BE)
4299  return false;
4300 
4301  SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4302  if (!SE)
4303  return false;
4304  DarwinRefKind = SE->getKind();
4305 
4306  if (BE->getOpcode() != MCBinaryExpr::Add &&
4307  BE->getOpcode() != MCBinaryExpr::Sub)
4308  return false;
4309 
4310  // See if the addend is is a constant, otherwise there's more going
4311  // on here than we can deal with.
4312  auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4313  if (!AddendExpr)
4314  return false;
4315 
4316  Addend = AddendExpr->getValue();
4317  if (BE->getOpcode() == MCBinaryExpr::Sub)
4318  Addend = -Addend;
4319 
4320  // It's some symbol reference + a constant addend, but really
4321  // shouldn't use both Darwin and ELF syntax.
4322  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4323  DarwinRefKind == MCSymbolRefExpr::VK_None;
4324 }
4325 
4326 /// Force static initialization.
4331 }
4332 
4333 #define GET_REGISTER_MATCHER
4334 #define GET_SUBTARGET_FEATURE_NAME
4335 #define GET_MATCHER_IMPLEMENTATION
4336 #include "AArch64GenAsmMatcher.inc"
4337 
4338 // Define this matcher function after the auto-generated include so we
4339 // have the match class enum definitions.
4340 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4341  unsigned Kind) {
4342  AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4343  // If the kind is a token for a literal immediate, check if our asm
4344  // operand matches. This is for InstAliases which have a fixed-value
4345  // immediate in the syntax.
4346  int64_t ExpectedVal;
4347  switch (Kind) {
4348  default:
4349  return Match_InvalidOperand;
4350  case MCK__35_0:
4351  ExpectedVal = 0;
4352  break;
4353  case MCK__35_1:
4354  ExpectedVal = 1;
4355  break;
4356  case MCK__35_12:
4357  ExpectedVal = 12;
4358  break;
4359  case MCK__35_16:
4360  ExpectedVal = 16;
4361  break;
4362  case MCK__35_2:
4363  ExpectedVal = 2;
4364  break;
4365  case MCK__35_24:
4366  ExpectedVal = 24;
4367  break;
4368  case MCK__35_3:
4369  ExpectedVal = 3;
4370  break;
4371  case MCK__35_32:
4372  ExpectedVal = 32;
4373  break;
4374  case MCK__35_4:
4375  ExpectedVal = 4;
4376  break;
4377  case MCK__35_48:
4378  ExpectedVal = 48;
4379  break;
4380  case MCK__35_6:
4381  ExpectedVal = 6;
4382  break;
4383  case MCK__35_64:
4384  ExpectedVal = 64;
4385  break;
4386  case MCK__35_8:
4387  ExpectedVal = 8;
4388  break;
4389  }
4390  if (!Op.isImm())
4391  return Match_InvalidOperand;
4392  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4393  if (!CE)
4394  return Match_InvalidOperand;
4395  if (CE->getValue() == ExpectedVal)
4396  return Match_Success;
4397  return Match_InvalidOperand;
4398 }
4399 
4400 
4401 AArch64AsmParser::OperandMatchResultTy
4402 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4403 
4404  SMLoc S = getLoc();
4405 
4406  if (getParser().getTok().isNot(AsmToken::Identifier)) {
4407  Error(S, "expected register");
4408  return MatchOperand_ParseFail;
4409  }
4410 
4411  int FirstReg = tryParseRegister();
4412  if (FirstReg == -1) {
4413  return MatchOperand_ParseFail;
4414  }
4415  const MCRegisterClass &WRegClass =
4416  AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4417  const MCRegisterClass &XRegClass =
4418  AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4419 
4420  bool isXReg = XRegClass.contains(FirstReg),
4421  isWReg = WRegClass.contains(FirstReg);
4422  if (!isXReg && !isWReg) {
4423  Error(S, "expected first even register of a "
4424  "consecutive same-size even/odd register pair");
4425  return MatchOperand_ParseFail;
4426  }
4427 
4428  const MCRegisterInfo *RI = getContext().getRegisterInfo();
4429  unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4430 
4431  if (FirstEncoding & 0x1) {
4432  Error(S, "expected first even register of a "
4433  "consecutive same-size even/odd register pair");
4434  return MatchOperand_ParseFail;
4435  }
4436 
4437  SMLoc M = getLoc();
4438  if (getParser().getTok().isNot(AsmToken::Comma)) {
4439  Error(M, "expected comma");
4440  return MatchOperand_ParseFail;
4441  }
4442  // Eat the comma
4443  getParser().Lex();
4444 
4445  SMLoc E = getLoc();
4446  int SecondReg = tryParseRegister();
4447  if (SecondReg ==-1) {
4448  return MatchOperand_ParseFail;
4449  }
4450 
4451  if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4452  (isXReg && !XRegClass.contains(SecondReg)) ||
4453  (isWReg && !WRegClass.contains(SecondReg))) {
4454  Error(E,"expected second odd register of a "
4455  "consecutive same-size even/odd register pair");
4456  return MatchOperand_ParseFail;
4457  }
4458 
4459  unsigned Pair = 0;
4460  if(isXReg) {
4461  Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4462  &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4463  } else {
4464  Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4465  &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4466  }
4467 
4468  Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4469  getContext()));
4470 
4471  return MatchOperand_Success;
4472 }
static bool isValidVectorKind(StringRef Name)
static bool isReg(const MCInst &MI, unsigned OpNo)
std::enable_if< std::numeric_limits< T >::is_signed, bool >::type getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:347
void push_back(const T &Elt)
Definition: SmallVector.h:222
int compare_lower(StringRef RHS) const
compare_lower - Compare two strings, ignoring case.
Definition: StringRef.cpp:52
static float getFPImmFloat(unsigned Imm)
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1327
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:315
virtual const AsmToken peekTok(bool ShouldSkipSpace=true)=0
Look ahead at the next token to be lexed.
const char * getPointer() const
Definition: SMLoc.h:33
size_t size() const
size - Get the string size.
Definition: StringRef.h:113
static const fltSemantics IEEEdouble
Definition: APFloat.h:133
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:39
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:64
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:129
MCTargetAsmParser - Generic interface to target specific assembly parsers.
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:240
static CondCode getInvertedCondCode(CondCode Code)
Target specific streamer interface.
Definition: MCStreamer.h:73
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmLexer.h:100
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
bool isNot(TokenKind K) const
Definition: MCAsmLexer.h:73
static unsigned getXRegFromWReg(unsigned Reg)
#define SYS_ALIAS(op1, Cn, Cm, op2)
StringSwitch & Case(const char(&S)[N], const T &Value)
Definition: StringSwitch.h:55
virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
Definition: MCStreamer.cpp:639
bool isSubRegisterEq(unsigned RegA, unsigned RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:111
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:79
std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:98
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APInt.h:33
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:33
Reg
All possible values of the reg field in the ModR/M byte.
Target independent representation for an assembler token.
Definition: MCAsmLexer.h:22
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:159
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
Windows NT (Windows on ARM)
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition: APInt.h:372
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:317
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand...
This file implements a class to represent arbitrary precision integral constant values and operations...
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
Context object for machine code objects.
Definition: MCContext.h:48
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
Definition: SmallVector.h:57
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:63
const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:107
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
Definition: ArrayRef.h:31
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:804
int64_t getIntVal() const
Definition: MCAsmLexer.h:105
MCRegisterClass - Base class of TargetRegisterClass.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:32
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:134
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:150
virtual void eatToEndOfStatement()=0
Skip to the end of the current statement, for error recovery.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
ppc loop data PPC Loop Data Prefetch
static unsigned getWRegFromXReg(unsigned Reg)
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
Definition: APFloat.h:122
const MCExpr * getExpr() const
Definition: MCInst.h:93
const MCExpr * getLHS() const
Get the left-hand side expression of the binary operator.
Definition: MCExpr.h:531
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:42
Streaming machine code generation interface.
Definition: MCStreamer.h:157
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:212
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
Definition: MathExtras.h:109
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
int64_t getValue() const
Definition: MCExpr.h:145
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
static int MCLOHNameToId(StringRef Name)
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:24
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
virtual MCAsmLexer & getLexer()=0
MCLOHType
Linker Optimization Hint Type.
bool isExpr() const
Definition: MCInst.h:59
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang","erlang-compatible garbage collector")
Target TheARM64Target
void changeSign()
Definition: APFloat.cpp:1623
SI Fold Operands
Binary assembler expressions.
Definition: MCExpr.h:405
void setLoc(SMLoc loc)
Definition: MCInst.h:161
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg...
Target TheAArch64leTarget
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
void setOpcode(unsigned Op)
Definition: MCInst.h:158
Target TheAArch64beTarget
bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:215
bool contains(unsigned Reg) const
contains - Return true if the specified register is included in this register class.
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition: StringRef.h:412
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
const FeatureBitset & getFeatureBits() const
getFeatureBits - Return the feature bits.
bool is(TokenKind K) const
Definition: MCAsmLexer.h:72
static StringRef MCLOHDirectiveName()
R Default(const T &Value) const
Definition: StringSwitch.h:111
unsigned Log2_32(uint32_t Value)
Log2_32 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
Definition: MathExtras.h:468
StringMap - This is an unconventional map that is specialized for handling keys that are "strings"...
Definition: StringMap.h:214
unsigned getOpcode() const
Definition: MCInst.h:159
Class for arbitrary precision integers.
Definition: APInt.h:73
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition: APInt.h:378
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
APInt bitcastToAPInt() const
Definition: APFloat.cpp:3084
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:285
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:35
const MCExpr * getRHS() const
Get the right-hand side expression of the binary operator.
Definition: MCExpr.h:534
static bool isAdvSIMDModImmType10(uint64_t Imm)
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string...
Definition: MCAsmLexer.h:89
RegisterMCAsmParser - Helper template for registering a target specific assembly parser, for use in the target machine initialization function.
static const size_t npos
Definition: StringRef.h:44
bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
Definition: StringRef.h:142
#define N
Opcode getOpcode() const
Get the kind of this binary expression.
Definition: MCExpr.h:528
static bool isValidMCLOHType(unsigned Kind)
MCSubtargetInfo - Generic base class for all target subtargets.
static bool isMem(const MachineInstr *MI, unsigned Op)
Definition: X86InstrInfo.h:147
VariantKind getKind() const
Definition: MCExpr.h:330
const ARM::ArchExtKind Kind
static int MCLOHIdToNbArgs(MCLOHType Kind)
uint16_t getEncodingValue(unsigned RegNo) const
Returns the encoding for RegNo.
LLVM Value Representation.
Definition: Value.h:69
This class implements an extremely fast bulk output stream that can only output to a stream...
Definition: raw_ostream.h:38
static TraceState * TS
Subtraction.
Definition: MCExpr.h:429
std::string Hash(const Unit &U)
Definition: FuzzerUtil.cpp:39
void addOperand(const MCOperand &Op)
Definition: MCInst.h:168
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:40
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:434
Represents a location in source code.
Definition: SMLoc.h:23
static const char * getSubtargetFeatureName(uint64_t Val)
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:117
std::string lower() const
Definition: StringRef.cpp:117
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx)
Definition: MCExpr.cpp:150
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:164
static void parseValidVectorKind(StringRef Name, unsigned &NumElements, char &ElementKind)
void LLVMInitializeAArch64AsmParser()
Force static initialization.
bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:110