LLVM  9.0.0svn
ARMAsmParser.cpp
Go to the documentation of this file.
1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMFeatures.h"
10 #include "Utils/ARMBaseInfo.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringMap.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/StringSwitch.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/MC/MCInstrInfo.h"
40 #include "llvm/MC/MCRegisterInfo.h"
41 #include "llvm/MC/MCSection.h"
42 #include "llvm/MC/MCStreamer.h"
44 #include "llvm/MC/MCSymbol.h"
47 #include "llvm/Support/ARMEHABI.h"
48 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/SMLoc.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstddef>
60 #include <cstdint>
61 #include <iterator>
62 #include <limits>
63 #include <memory>
64 #include <string>
65 #include <utility>
66 #include <vector>
67 
68 #define DEBUG_TYPE "asm-parser"
69 
70 using namespace llvm;
71 
72 namespace llvm {
73 extern const MCInstrDesc ARMInsts[];
74 } // end namespace llvm
75 
76 namespace {
77 
78 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
79 
80 static cl::opt<ImplicitItModeTy> ImplicitItMode(
81  "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
82  cl::desc("Allow conditional instructions outdside of an IT block"),
83  cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
84  "Accept in both ISAs, emit implicit ITs in Thumb"),
85  clEnumValN(ImplicitItModeTy::Never, "never",
86  "Warn in ARM, reject in Thumb"),
87  clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
88  "Accept in ARM, reject in Thumb"),
89  clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
90  "Warn in ARM, emit implicit ITs in Thumb")));
91 
92 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
93  cl::init(false));
94 
95 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
96 
97 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
98  // Position==0 means we're not in an IT block at all. Position==1
99  // means we want the first state bit, which is always 0 (Then).
100  // Position==2 means we want the second state bit, stored at bit 3
101  // of Mask, and so on downwards. So (5 - Position) will shift the
102  // right bit down to bit 0, including the always-0 bit at bit 4 for
103  // the mandatory initial Then.
104  return (Mask >> (5 - Position) & 1);
105 }
106 
107 class UnwindContext {
108  using Locs = SmallVector<SMLoc, 4>;
109 
110  MCAsmParser &Parser;
111  Locs FnStartLocs;
112  Locs CantUnwindLocs;
113  Locs PersonalityLocs;
114  Locs PersonalityIndexLocs;
115  Locs HandlerDataLocs;
116  int FPReg;
117 
118 public:
119  UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
120 
121  bool hasFnStart() const { return !FnStartLocs.empty(); }
122  bool cantUnwind() const { return !CantUnwindLocs.empty(); }
123  bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
124 
125  bool hasPersonality() const {
126  return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
127  }
128 
129  void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
130  void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
131  void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
132  void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
133  void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
134 
135  void saveFPReg(int Reg) { FPReg = Reg; }
136  int getFPReg() const { return FPReg; }
137 
138  void emitFnStartLocNotes() const {
139  for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
140  FI != FE; ++FI)
141  Parser.Note(*FI, ".fnstart was specified here");
142  }
143 
144  void emitCantUnwindLocNotes() const {
145  for (Locs::const_iterator UI = CantUnwindLocs.begin(),
146  UE = CantUnwindLocs.end(); UI != UE; ++UI)
147  Parser.Note(*UI, ".cantunwind was specified here");
148  }
149 
150  void emitHandlerDataLocNotes() const {
151  for (Locs::const_iterator HI = HandlerDataLocs.begin(),
152  HE = HandlerDataLocs.end(); HI != HE; ++HI)
153  Parser.Note(*HI, ".handlerdata was specified here");
154  }
155 
156  void emitPersonalityLocNotes() const {
157  for (Locs::const_iterator PI = PersonalityLocs.begin(),
158  PE = PersonalityLocs.end(),
159  PII = PersonalityIndexLocs.begin(),
160  PIE = PersonalityIndexLocs.end();
161  PI != PE || PII != PIE;) {
162  if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
163  Parser.Note(*PI++, ".personality was specified here");
164  else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
165  Parser.Note(*PII++, ".personalityindex was specified here");
166  else
167  llvm_unreachable(".personality and .personalityindex cannot be "
168  "at the same location");
169  }
170  }
171 
172  void reset() {
173  FnStartLocs = Locs();
174  CantUnwindLocs = Locs();
175  PersonalityLocs = Locs();
176  HandlerDataLocs = Locs();
177  PersonalityIndexLocs = Locs();
178  FPReg = ARM::SP;
179  }
180 };
181 
182 
183 class ARMAsmParser : public MCTargetAsmParser {
184  const MCRegisterInfo *MRI;
185  UnwindContext UC;
186 
187  ARMTargetStreamer &getTargetStreamer() {
188  assert(getParser().getStreamer().getTargetStreamer() &&
189  "do not have a target streamer");
190  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
191  return static_cast<ARMTargetStreamer &>(TS);
192  }
193 
194  // Map of register aliases registers via the .req directive.
195  StringMap<unsigned> RegisterReqs;
196 
197  bool NextSymbolIsThumb;
198 
199  bool useImplicitITThumb() const {
200  return ImplicitItMode == ImplicitItModeTy::Always ||
201  ImplicitItMode == ImplicitItModeTy::ThumbOnly;
202  }
203 
204  bool useImplicitITARM() const {
205  return ImplicitItMode == ImplicitItModeTy::Always ||
206  ImplicitItMode == ImplicitItModeTy::ARMOnly;
207  }
208 
209  struct {
210  ARMCC::CondCodes Cond; // Condition for IT block.
211  unsigned Mask:4; // Condition mask for instructions.
212  // Starting at first 1 (from lsb).
213  // '1' condition as indicated in IT.
214  // '0' inverse of condition (else).
215  // Count of instructions in IT block is
216  // 4 - trailingzeroes(mask)
217  // Note that this does not have the same encoding
218  // as in the IT instruction, which also depends
219  // on the low bit of the condition code.
220 
221  unsigned CurPosition; // Current position in parsing of IT
222  // block. In range [0,4], with 0 being the IT
223  // instruction itself. Initialized according to
224  // count of instructions in block. ~0U if no
225  // active IT block.
226 
227  bool IsExplicit; // true - The IT instruction was present in the
228  // input, we should not modify it.
229  // false - The IT instruction was added
230  // implicitly, we can extend it if that
231  // would be legal.
232  } ITState;
233 
234  SmallVector<MCInst, 4> PendingConditionalInsts;
235 
236  void flushPendingInstructions(MCStreamer &Out) override {
237  if (!inImplicitITBlock()) {
238  assert(PendingConditionalInsts.size() == 0);
239  return;
240  }
241 
242  // Emit the IT instruction
243  MCInst ITInst;
244  ITInst.setOpcode(ARM::t2IT);
245  ITInst.addOperand(MCOperand::createImm(ITState.Cond));
246  ITInst.addOperand(MCOperand::createImm(ITState.Mask));
247  Out.EmitInstruction(ITInst, getSTI());
248 
249  // Emit the conditonal instructions
250  assert(PendingConditionalInsts.size() <= 4);
251  for (const MCInst &Inst : PendingConditionalInsts) {
252  Out.EmitInstruction(Inst, getSTI());
253  }
254  PendingConditionalInsts.clear();
255 
256  // Clear the IT state
257  ITState.Mask = 0;
258  ITState.CurPosition = ~0U;
259  }
260 
261  bool inITBlock() { return ITState.CurPosition != ~0U; }
262  bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
263  bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
264 
265  bool lastInITBlock() {
266  return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
267  }
268 
269  void forwardITPosition() {
270  if (!inITBlock()) return;
271  // Move to the next instruction in the IT block, if there is one. If not,
272  // mark the block as done, except for implicit IT blocks, which we leave
273  // open until we find an instruction that can't be added to it.
274  unsigned TZ = countTrailingZeros(ITState.Mask);
275  if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
276  ITState.CurPosition = ~0U; // Done with the IT block after this.
277  }
278 
279  // Rewind the state of the current IT block, removing the last slot from it.
280  void rewindImplicitITPosition() {
281  assert(inImplicitITBlock());
282  assert(ITState.CurPosition > 1);
283  ITState.CurPosition--;
284  unsigned TZ = countTrailingZeros(ITState.Mask);
285  unsigned NewMask = 0;
286  NewMask |= ITState.Mask & (0xC << TZ);
287  NewMask |= 0x2 << TZ;
288  ITState.Mask = NewMask;
289  }
290 
291  // Rewind the state of the current IT block, removing the last slot from it.
292  // If we were at the first slot, this closes the IT block.
293  void discardImplicitITBlock() {
294  assert(inImplicitITBlock());
295  assert(ITState.CurPosition == 1);
296  ITState.CurPosition = ~0U;
297  }
298 
299  // Return the low-subreg of a given Q register.
300  unsigned getDRegFromQReg(unsigned QReg) const {
301  return MRI->getSubReg(QReg, ARM::dsub_0);
302  }
303 
304  // Get the condition code corresponding to the current IT block slot.
305  ARMCC::CondCodes currentITCond() {
306  unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
307  return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
308  }
309 
310  // Invert the condition of the current IT block slot without changing any
311  // other slots in the same block.
312  void invertCurrentITCondition() {
313  if (ITState.CurPosition == 1) {
314  ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
315  } else {
316  ITState.Mask ^= 1 << (5 - ITState.CurPosition);
317  }
318  }
319 
320  // Returns true if the current IT block is full (all 4 slots used).
321  bool isITBlockFull() {
322  return inITBlock() && (ITState.Mask & 1);
323  }
324 
325  // Extend the current implicit IT block to have one more slot with the given
326  // condition code.
327  void extendImplicitITBlock(ARMCC::CondCodes Cond) {
328  assert(inImplicitITBlock());
329  assert(!isITBlockFull());
330  assert(Cond == ITState.Cond ||
331  Cond == ARMCC::getOppositeCondition(ITState.Cond));
332  unsigned TZ = countTrailingZeros(ITState.Mask);
333  unsigned NewMask = 0;
334  // Keep any existing condition bits.
335  NewMask |= ITState.Mask & (0xE << TZ);
336  // Insert the new condition bit.
337  NewMask |= (Cond != ITState.Cond) << TZ;
338  // Move the trailing 1 down one bit.
339  NewMask |= 1 << (TZ - 1);
340  ITState.Mask = NewMask;
341  }
342 
343  // Create a new implicit IT block with a dummy condition code.
344  void startImplicitITBlock() {
345  assert(!inITBlock());
346  ITState.Cond = ARMCC::AL;
347  ITState.Mask = 8;
348  ITState.CurPosition = 1;
349  ITState.IsExplicit = false;
350  }
351 
352  // Create a new explicit IT block with the given condition and mask.
353  // The mask should be in the format used in ARMOperand and
354  // MCOperand, with a 1 implying 'e', regardless of the low bit of
355  // the condition.
356  void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
357  assert(!inITBlock());
358  ITState.Cond = Cond;
359  ITState.Mask = Mask;
360  ITState.CurPosition = 0;
361  ITState.IsExplicit = true;
362  }
363 
364  struct {
365  unsigned Mask : 4;
366  unsigned CurPosition;
367  } VPTState;
368  bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
369  void forwardVPTPosition() {
370  if (!inVPTBlock()) return;
371  unsigned TZ = countTrailingZeros(VPTState.Mask);
372  if (++VPTState.CurPosition == 5 - TZ)
373  VPTState.CurPosition = ~0U;
374  }
375 
376  void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
377  return getParser().Note(L, Msg, Range);
378  }
379 
380  bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
381  return getParser().Warning(L, Msg, Range);
382  }
383 
384  bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
385  return getParser().Error(L, Msg, Range);
386  }
387 
388  bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
389  unsigned ListNo, bool IsARPop = false);
390  bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
391  unsigned ListNo);
392 
393  int tryParseRegister();
394  bool tryParseRegisterWithWriteBack(OperandVector &);
395  int tryParseShiftRegister(OperandVector &);
396  bool parseRegisterList(OperandVector &, bool EnforceOrder = true);
397  bool parseMemory(OperandVector &);
398  bool parseOperand(OperandVector &, StringRef Mnemonic);
399  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
400  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
401  unsigned &ShiftAmount);
402  bool parseLiteralValues(unsigned Size, SMLoc L);
403  bool parseDirectiveThumb(SMLoc L);
404  bool parseDirectiveARM(SMLoc L);
405  bool parseDirectiveThumbFunc(SMLoc L);
406  bool parseDirectiveCode(SMLoc L);
407  bool parseDirectiveSyntax(SMLoc L);
408  bool parseDirectiveReq(StringRef Name, SMLoc L);
409  bool parseDirectiveUnreq(SMLoc L);
410  bool parseDirectiveArch(SMLoc L);
411  bool parseDirectiveEabiAttr(SMLoc L);
412  bool parseDirectiveCPU(SMLoc L);
413  bool parseDirectiveFPU(SMLoc L);
414  bool parseDirectiveFnStart(SMLoc L);
415  bool parseDirectiveFnEnd(SMLoc L);
416  bool parseDirectiveCantUnwind(SMLoc L);
417  bool parseDirectivePersonality(SMLoc L);
418  bool parseDirectiveHandlerData(SMLoc L);
419  bool parseDirectiveSetFP(SMLoc L);
420  bool parseDirectivePad(SMLoc L);
421  bool parseDirectiveRegSave(SMLoc L, bool IsVector);
422  bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
423  bool parseDirectiveLtorg(SMLoc L);
424  bool parseDirectiveEven(SMLoc L);
425  bool parseDirectivePersonalityIndex(SMLoc L);
426  bool parseDirectiveUnwindRaw(SMLoc L);
427  bool parseDirectiveTLSDescSeq(SMLoc L);
428  bool parseDirectiveMovSP(SMLoc L);
429  bool parseDirectiveObjectArch(SMLoc L);
430  bool parseDirectiveArchExtension(SMLoc L);
431  bool parseDirectiveAlign(SMLoc L);
432  bool parseDirectiveThumbSet(SMLoc L);
433 
434  bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
435  StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
436  unsigned &PredicationCode,
437  unsigned &VPTPredicationCode, bool &CarrySetting,
438  unsigned &ProcessorIMod, StringRef &ITMask);
439  void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
440  StringRef FullInst, bool &CanAcceptCarrySet,
441  bool &CanAcceptPredicationCode,
442  bool &CanAcceptVPTPredicationCode);
443 
444  void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
445  OperandVector &Operands);
446  bool isThumb() const {
447  // FIXME: Can tablegen auto-generate this?
448  return getSTI().getFeatureBits()[ARM::ModeThumb];
449  }
450 
451  bool isThumbOne() const {
452  return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
453  }
454 
455  bool isThumbTwo() const {
456  return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
457  }
458 
459  bool hasThumb() const {
460  return getSTI().getFeatureBits()[ARM::HasV4TOps];
461  }
462 
463  bool hasThumb2() const {
464  return getSTI().getFeatureBits()[ARM::FeatureThumb2];
465  }
466 
467  bool hasV6Ops() const {
468  return getSTI().getFeatureBits()[ARM::HasV6Ops];
469  }
470 
471  bool hasV6T2Ops() const {
472  return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
473  }
474 
475  bool hasV6MOps() const {
476  return getSTI().getFeatureBits()[ARM::HasV6MOps];
477  }
478 
479  bool hasV7Ops() const {
480  return getSTI().getFeatureBits()[ARM::HasV7Ops];
481  }
482 
483  bool hasV8Ops() const {
484  return getSTI().getFeatureBits()[ARM::HasV8Ops];
485  }
486 
487  bool hasV8MBaseline() const {
488  return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
489  }
490 
491  bool hasV8MMainline() const {
492  return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
493  }
494  bool hasV8_1MMainline() const {
495  return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
496  }
497  bool hasMVE() const {
498  return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
499  }
500  bool hasMVEFloat() const {
501  return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
502  }
503  bool has8MSecExt() const {
504  return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
505  }
506 
507  bool hasARM() const {
508  return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
509  }
510 
511  bool hasDSP() const {
512  return getSTI().getFeatureBits()[ARM::FeatureDSP];
513  }
514 
515  bool hasD32() const {
516  return getSTI().getFeatureBits()[ARM::FeatureD32];
517  }
518 
519  bool hasV8_1aOps() const {
520  return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
521  }
522 
523  bool hasRAS() const {
524  return getSTI().getFeatureBits()[ARM::FeatureRAS];
525  }
526 
527  void SwitchMode() {
528  MCSubtargetInfo &STI = copySTI();
529  auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
530  setAvailableFeatures(FB);
531  }
532 
533  void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
534 
535  bool isMClass() const {
536  return getSTI().getFeatureBits()[ARM::FeatureMClass];
537  }
538 
539  /// @name Auto-generated Match Functions
540  /// {
541 
542 #define GET_ASSEMBLER_HEADER
543 #include "ARMGenAsmMatcher.inc"
544 
545  /// }
546 
547  OperandMatchResultTy parseITCondCode(OperandVector &);
548  OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
549  OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
550  OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
551  OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
552  OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
553  OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
554  OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
555  OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
556  OperandMatchResultTy parseBankedRegOperand(OperandVector &);
557  OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
558  int High);
559  OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
560  return parsePKHImm(O, "lsl", 0, 31);
561  }
562  OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
563  return parsePKHImm(O, "asr", 1, 32);
564  }
565  OperandMatchResultTy parseSetEndImm(OperandVector &);
566  OperandMatchResultTy parseShifterImm(OperandVector &);
567  OperandMatchResultTy parseRotImm(OperandVector &);
568  OperandMatchResultTy parseModImm(OperandVector &);
569  OperandMatchResultTy parseBitfield(OperandVector &);
570  OperandMatchResultTy parsePostIdxReg(OperandVector &);
571  OperandMatchResultTy parseAM3Offset(OperandVector &);
572  OperandMatchResultTy parseFPImm(OperandVector &);
573  OperandMatchResultTy parseVectorList(OperandVector &);
574  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
575  SMLoc &EndLoc);
576 
577  // Asm Match Converter Methods
578  void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
579  void cvtThumbBranches(MCInst &Inst, const OperandVector &);
580 
581  bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
582  bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
583  bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
584  bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
585  bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
586  bool isITBlockTerminator(MCInst &Inst) const;
587  void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
588  bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
589  bool Load, bool ARMMode, bool Writeback);
590 
591 public:
592  enum ARMMatchResultTy {
593  Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
594  Match_RequiresNotITBlock,
595  Match_RequiresV6,
596  Match_RequiresThumb2,
597  Match_RequiresV8,
598  Match_RequiresFlagSetting,
599 #define GET_OPERAND_DIAGNOSTIC_TYPES
600 #include "ARMGenAsmMatcher.inc"
601 
602  };
603 
604  ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
605  const MCInstrInfo &MII, const MCTargetOptions &Options)
606  : MCTargetAsmParser(Options, STI, MII), UC(Parser) {
608 
609  // Cache the MCRegisterInfo.
610  MRI = getContext().getRegisterInfo();
611 
612  // Initialize the set of available features.
613  setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
614 
615  // Add build attributes based on the selected target.
616  if (AddBuildAttributes)
617  getTargetStreamer().emitTargetAttributes(STI);
618 
619  // Not in an ITBlock to start with.
620  ITState.CurPosition = ~0U;
621 
622  VPTState.CurPosition = ~0U;
623 
624  NextSymbolIsThumb = false;
625  }
626 
627  // Implementation of the MCTargetAsmParser interface:
628  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
629  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
630  SMLoc NameLoc, OperandVector &Operands) override;
631  bool ParseDirective(AsmToken DirectiveID) override;
632 
633  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
634  unsigned Kind) override;
635  unsigned checkTargetMatchPredicate(MCInst &Inst) override;
636 
637  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
638  OperandVector &Operands, MCStreamer &Out,
639  uint64_t &ErrorInfo,
640  bool MatchingInlineAsm) override;
641  unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
642  SmallVectorImpl<NearMissInfo> &NearMisses,
643  bool MatchingInlineAsm, bool &EmitInITBlock,
644  MCStreamer &Out);
645 
646  struct NearMissMessage {
647  SMLoc Loc;
648  SmallString<128> Message;
649  };
650 
651  const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
652 
653  void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
654  SmallVectorImpl<NearMissMessage> &NearMissesOut,
655  SMLoc IDLoc, OperandVector &Operands);
656  void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
657  OperandVector &Operands);
658 
659  void doBeforeLabelEmit(MCSymbol *Symbol) override;
660 
661  void onLabelParsed(MCSymbol *Symbol) override;
662 };
663 
664 /// ARMOperand - Instances of this class represent a parsed ARM machine
665 /// operand.
666 class ARMOperand : public MCParsedAsmOperand {
667  enum KindTy {
668  k_CondCode,
669  k_VPTPred,
670  k_CCOut,
671  k_ITCondMask,
672  k_CoprocNum,
673  k_CoprocReg,
674  k_CoprocOption,
675  k_Immediate,
676  k_MemBarrierOpt,
677  k_InstSyncBarrierOpt,
678  k_TraceSyncBarrierOpt,
679  k_Memory,
680  k_PostIndexRegister,
681  k_MSRMask,
682  k_BankedReg,
683  k_ProcIFlags,
684  k_VectorIndex,
685  k_Register,
686  k_RegisterList,
687  k_RegisterListWithAPSR,
688  k_DPRRegisterList,
689  k_SPRRegisterList,
690  k_FPSRegisterListWithVPR,
691  k_FPDRegisterListWithVPR,
692  k_VectorList,
693  k_VectorListAllLanes,
694  k_VectorListIndexed,
695  k_ShiftedRegister,
696  k_ShiftedImmediate,
697  k_ShifterImmediate,
698  k_RotateImmediate,
699  k_ModifiedImmediate,
700  k_ConstantPoolImmediate,
701  k_BitfieldDescriptor,
702  k_Token,
703  } Kind;
704 
705  SMLoc StartLoc, EndLoc, AlignmentLoc;
707 
708  struct CCOp {
709  ARMCC::CondCodes Val;
710  };
711 
712  struct VCCOp {
713  ARMVCC::VPTCodes Val;
714  };
715 
716  struct CopOp {
717  unsigned Val;
718  };
719 
720  struct CoprocOptionOp {
721  unsigned Val;
722  };
723 
724  struct ITMaskOp {
725  unsigned Mask:4;
726  };
727 
728  struct MBOptOp {
729  ARM_MB::MemBOpt Val;
730  };
731 
732  struct ISBOptOp {
734  };
735 
736  struct TSBOptOp {
738  };
739 
740  struct IFlagsOp {
741  ARM_PROC::IFlags Val;
742  };
743 
744  struct MMaskOp {
745  unsigned Val;
746  };
747 
748  struct BankedRegOp {
749  unsigned Val;
750  };
751 
752  struct TokOp {
753  const char *Data;
754  unsigned Length;
755  };
756 
757  struct RegOp {
758  unsigned RegNum;
759  };
760 
761  // A vector register list is a sequential list of 1 to 4 registers.
762  struct VectorListOp {
763  unsigned RegNum;
764  unsigned Count;
765  unsigned LaneIndex;
766  bool isDoubleSpaced;
767  };
768 
769  struct VectorIndexOp {
770  unsigned Val;
771  };
772 
773  struct ImmOp {
774  const MCExpr *Val;
775  };
776 
777  /// Combined record for all forms of ARM address expressions.
778  struct MemoryOp {
779  unsigned BaseRegNum;
780  // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
781  // was specified.
782  const MCConstantExpr *OffsetImm; // Offset immediate value
783  unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
784  ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
785  unsigned ShiftImm; // shift for OffsetReg.
786  unsigned Alignment; // 0 = no alignment specified
787  // n = alignment in bytes (2, 4, 8, 16, or 32)
788  unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
789  };
790 
791  struct PostIdxRegOp {
792  unsigned RegNum;
793  bool isAdd;
794  ARM_AM::ShiftOpc ShiftTy;
795  unsigned ShiftImm;
796  };
797 
798  struct ShifterImmOp {
799  bool isASR;
800  unsigned Imm;
801  };
802 
803  struct RegShiftedRegOp {
804  ARM_AM::ShiftOpc ShiftTy;
805  unsigned SrcReg;
806  unsigned ShiftReg;
807  unsigned ShiftImm;
808  };
809 
810  struct RegShiftedImmOp {
811  ARM_AM::ShiftOpc ShiftTy;
812  unsigned SrcReg;
813  unsigned ShiftImm;
814  };
815 
816  struct RotImmOp {
817  unsigned Imm;
818  };
819 
820  struct ModImmOp {
821  unsigned Bits;
822  unsigned Rot;
823  };
824 
825  struct BitfieldOp {
826  unsigned LSB;
827  unsigned Width;
828  };
829 
830  union {
831  struct CCOp CC;
832  struct VCCOp VCC;
833  struct CopOp Cop;
834  struct CoprocOptionOp CoprocOption;
835  struct MBOptOp MBOpt;
836  struct ISBOptOp ISBOpt;
837  struct TSBOptOp TSBOpt;
838  struct ITMaskOp ITMask;
839  struct IFlagsOp IFlags;
840  struct MMaskOp MMask;
841  struct BankedRegOp BankedReg;
842  struct TokOp Tok;
843  struct RegOp Reg;
844  struct VectorListOp VectorList;
845  struct VectorIndexOp VectorIndex;
846  struct ImmOp Imm;
847  struct MemoryOp Memory;
848  struct PostIdxRegOp PostIdxReg;
849  struct ShifterImmOp ShifterImm;
850  struct RegShiftedRegOp RegShiftedReg;
851  struct RegShiftedImmOp RegShiftedImm;
852  struct RotImmOp RotImm;
853  struct ModImmOp ModImm;
854  struct BitfieldOp Bitfield;
855  };
856 
857 public:
858  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
859 
860  /// getStartLoc - Get the location of the first token of this operand.
861  SMLoc getStartLoc() const override { return StartLoc; }
862 
863  /// getEndLoc - Get the location of the last token of this operand.
864  SMLoc getEndLoc() const override { return EndLoc; }
865 
866  /// getLocRange - Get the range between the first and last token of this
867  /// operand.
868  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
869 
870  /// getAlignmentLoc - Get the location of the Alignment token of this operand.
871  SMLoc getAlignmentLoc() const {
872  assert(Kind == k_Memory && "Invalid access!");
873  return AlignmentLoc;
874  }
875 
876  ARMCC::CondCodes getCondCode() const {
877  assert(Kind == k_CondCode && "Invalid access!");
878  return CC.Val;
879  }
880 
881  ARMVCC::VPTCodes getVPTPred() const {
882  assert(isVPTPred() && "Invalid access!");
883  return VCC.Val;
884  }
885 
886  unsigned getCoproc() const {
887  assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
888  return Cop.Val;
889  }
890 
891  StringRef getToken() const {
892  assert(Kind == k_Token && "Invalid access!");
893  return StringRef(Tok.Data, Tok.Length);
894  }
895 
896  unsigned getReg() const override {
897  assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
898  return Reg.RegNum;
899  }
900 
901  const SmallVectorImpl<unsigned> &getRegList() const {
902  assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
903  Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
904  Kind == k_FPSRegisterListWithVPR ||
905  Kind == k_FPDRegisterListWithVPR) &&
906  "Invalid access!");
907  return Registers;
908  }
909 
910  const MCExpr *getImm() const {
911  assert(isImm() && "Invalid access!");
912  return Imm.Val;
913  }
914 
915  const MCExpr *getConstantPoolImm() const {
916  assert(isConstantPoolImm() && "Invalid access!");
917  return Imm.Val;
918  }
919 
920  unsigned getVectorIndex() const {
921  assert(Kind == k_VectorIndex && "Invalid access!");
922  return VectorIndex.Val;
923  }
924 
925  ARM_MB::MemBOpt getMemBarrierOpt() const {
926  assert(Kind == k_MemBarrierOpt && "Invalid access!");
927  return MBOpt.Val;
928  }
929 
930  ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
931  assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
932  return ISBOpt.Val;
933  }
934 
935  ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
936  assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
937  return TSBOpt.Val;
938  }
939 
940  ARM_PROC::IFlags getProcIFlags() const {
941  assert(Kind == k_ProcIFlags && "Invalid access!");
942  return IFlags.Val;
943  }
944 
945  unsigned getMSRMask() const {
946  assert(Kind == k_MSRMask && "Invalid access!");
947  return MMask.Val;
948  }
949 
950  unsigned getBankedReg() const {
951  assert(Kind == k_BankedReg && "Invalid access!");
952  return BankedReg.Val;
953  }
954 
955  bool isCoprocNum() const { return Kind == k_CoprocNum; }
956  bool isCoprocReg() const { return Kind == k_CoprocReg; }
957  bool isCoprocOption() const { return Kind == k_CoprocOption; }
958  bool isCondCode() const { return Kind == k_CondCode; }
959  bool isVPTPred() const { return Kind == k_VPTPred; }
960  bool isCCOut() const { return Kind == k_CCOut; }
961  bool isITMask() const { return Kind == k_ITCondMask; }
962  bool isITCondCode() const { return Kind == k_CondCode; }
963  bool isImm() const override {
964  return Kind == k_Immediate;
965  }
966 
967  bool isARMBranchTarget() const {
968  if (!isImm()) return false;
969 
970  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
971  return CE->getValue() % 4 == 0;
972  return true;
973  }
974 
975 
976  bool isThumbBranchTarget() const {
977  if (!isImm()) return false;
978 
979  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
980  return CE->getValue() % 2 == 0;
981  return true;
982  }
983 
984  // checks whether this operand is an unsigned offset which fits is a field
985  // of specified width and scaled by a specific number of bits
986  template<unsigned width, unsigned scale>
987  bool isUnsignedOffset() const {
988  if (!isImm()) return false;
989  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
990  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
991  int64_t Val = CE->getValue();
992  int64_t Align = 1LL << scale;
993  int64_t Max = Align * ((1LL << width) - 1);
994  return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
995  }
996  return false;
997  }
998 
999  // checks whether this operand is an signed offset which fits is a field
1000  // of specified width and scaled by a specific number of bits
1001  template<unsigned width, unsigned scale>
1002  bool isSignedOffset() const {
1003  if (!isImm()) return false;
1004  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1005  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1006  int64_t Val = CE->getValue();
1007  int64_t Align = 1LL << scale;
1008  int64_t Max = Align * ((1LL << (width-1)) - 1);
1009  int64_t Min = -Align * (1LL << (width-1));
1010  return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1011  }
1012  return false;
1013  }
1014 
1015  // checks whether this operand is a memory operand computed as an offset
1016  // applied to PC. the offset may have 8 bits of magnitude and is represented
1017  // with two bits of shift. textually it may be either [pc, #imm], #imm or
1018  // relocable expression...
1019  bool isThumbMemPC() const {
1020  int64_t Val = 0;
1021  if (isImm()) {
1022  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1023  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1024  if (!CE) return false;
1025  Val = CE->getValue();
1026  }
1027  else if (isMem()) {
1028  if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1029  if(Memory.BaseRegNum != ARM::PC) return false;
1030  Val = Memory.OffsetImm->getValue();
1031  }
1032  else return false;
1033  return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1034  }
1035 
1036  bool isFPImm() const {
1037  if (!isImm()) return false;
1038  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1039  if (!CE) return false;
1040  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1041  return Val != -1;
1042  }
1043 
1044  template<int64_t N, int64_t M>
1045  bool isImmediate() const {
1046  if (!isImm()) return false;
1047  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1048  if (!CE) return false;
1049  int64_t Value = CE->getValue();
1050  return Value >= N && Value <= M;
1051  }
1052 
1053  template<int64_t N, int64_t M>
1054  bool isImmediateS4() const {
1055  if (!isImm()) return false;
1056  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1057  if (!CE) return false;
1058  int64_t Value = CE->getValue();
1059  return ((Value & 3) == 0) && Value >= N && Value <= M;
1060  }
1061 
1062  bool isFBits16() const {
1063  return isImmediate<0, 17>();
1064  }
1065  bool isFBits32() const {
1066  return isImmediate<1, 33>();
1067  }
1068  bool isImm8s4() const {
1069  return isImmediateS4<-1020, 1020>();
1070  }
1071  bool isImm7s4() const {
1072  return isImmediateS4<-508, 508>();
1073  }
1074  bool isImm0_1020s4() const {
1075  return isImmediateS4<0, 1020>();
1076  }
1077  bool isImm0_508s4() const {
1078  return isImmediateS4<0, 508>();
1079  }
1080  bool isImm0_508s4Neg() const {
1081  if (!isImm()) return false;
1082  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1083  if (!CE) return false;
1084  int64_t Value = -CE->getValue();
1085  // explicitly exclude zero. we want that to use the normal 0_508 version.
1086  return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1087  }
1088 
1089  bool isImm0_4095Neg() const {
1090  if (!isImm()) return false;
1091  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1092  if (!CE) return false;
1093  // isImm0_4095Neg is used with 32-bit immediates only.
1094  // 32-bit immediates are zero extended to 64-bit when parsed,
1095  // thus simple -CE->getValue() results in a big negative number,
1096  // not a small positive number as intended
1097  if ((CE->getValue() >> 32) > 0) return false;
1098  uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1099  return Value > 0 && Value < 4096;
1100  }
1101 
1102  bool isImm0_7() const {
1103  return isImmediate<0, 7>();
1104  }
1105 
1106  bool isImm1_16() const {
1107  return isImmediate<1, 16>();
1108  }
1109 
1110  bool isImm1_32() const {
1111  return isImmediate<1, 32>();
1112  }
1113 
1114  bool isImm8_255() const {
1115  return isImmediate<8, 255>();
1116  }
1117 
1118  bool isImm256_65535Expr() const {
1119  if (!isImm()) return false;
1120  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1121  // If it's not a constant expression, it'll generate a fixup and be
1122  // handled later.
1123  if (!CE) return true;
1124  int64_t Value = CE->getValue();
1125  return Value >= 256 && Value < 65536;
1126  }
1127 
1128  bool isImm0_65535Expr() const {
1129  if (!isImm()) return false;
1130  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1131  // If it's not a constant expression, it'll generate a fixup and be
1132  // handled later.
1133  if (!CE) return true;
1134  int64_t Value = CE->getValue();
1135  return Value >= 0 && Value < 65536;
1136  }
1137 
1138  bool isImm24bit() const {
1139  return isImmediate<0, 0xffffff + 1>();
1140  }
1141 
1142  bool isImmThumbSR() const {
1143  return isImmediate<1, 33>();
1144  }
1145 
1146  bool isPKHLSLImm() const {
1147  return isImmediate<0, 32>();
1148  }
1149 
1150  bool isPKHASRImm() const {
1151  return isImmediate<0, 33>();
1152  }
1153 
1154  bool isAdrLabel() const {
1155  // If we have an immediate that's not a constant, treat it as a label
1156  // reference needing a fixup.
1157  if (isImm() && !isa<MCConstantExpr>(getImm()))
1158  return true;
1159 
1160  // If it is a constant, it must fit into a modified immediate encoding.
1161  if (!isImm()) return false;
1162  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1163  if (!CE) return false;
1164  int64_t Value = CE->getValue();
1165  return (ARM_AM::getSOImmVal(Value) != -1 ||
1166  ARM_AM::getSOImmVal(-Value) != -1);
1167  }
1168 
1169  bool isT2SOImm() const {
1170  // If we have an immediate that's not a constant, treat it as an expression
1171  // needing a fixup.
1172  if (isImm() && !isa<MCConstantExpr>(getImm())) {
1173  // We want to avoid matching :upper16: and :lower16: as we want these
1174  // expressions to match in isImm0_65535Expr()
1175  const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1176  return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1177  ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1178  }
1179  if (!isImm()) return false;
1180  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1181  if (!CE) return false;
1182  int64_t Value = CE->getValue();
1183  return ARM_AM::getT2SOImmVal(Value) != -1;
1184  }
1185 
1186  bool isT2SOImmNot() const {
1187  if (!isImm()) return false;
1188  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1189  if (!CE) return false;
1190  int64_t Value = CE->getValue();
1191  return ARM_AM::getT2SOImmVal(Value) == -1 &&
1192  ARM_AM::getT2SOImmVal(~Value) != -1;
1193  }
1194 
1195  bool isT2SOImmNeg() const {
1196  if (!isImm()) return false;
1197  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1198  if (!CE) return false;
1199  int64_t Value = CE->getValue();
1200  // Only use this when not representable as a plain so_imm.
1201  return ARM_AM::getT2SOImmVal(Value) == -1 &&
1202  ARM_AM::getT2SOImmVal(-Value) != -1;
1203  }
1204 
1205  bool isSetEndImm() const {
1206  if (!isImm()) return false;
1207  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1208  if (!CE) return false;
1209  int64_t Value = CE->getValue();
1210  return Value == 1 || Value == 0;
1211  }
1212 
1213  bool isReg() const override { return Kind == k_Register; }
1214  bool isRegList() const { return Kind == k_RegisterList; }
1215  bool isRegListWithAPSR() const {
1216  return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1217  }
1218  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1219  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1220  bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1221  bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1222  bool isToken() const override { return Kind == k_Token; }
1223  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1224  bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1225  bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1226  bool isMem() const override {
1227  if (Kind != k_Memory)
1228  return false;
1229  if (Memory.BaseRegNum &&
1230  !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1231  return false;
1232  if (Memory.OffsetRegNum &&
1233  !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1234  return false;
1235  return true;
1236  }
1237  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1238  bool isRegShiftedReg() const {
1239  return Kind == k_ShiftedRegister &&
1240  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1241  RegShiftedReg.SrcReg) &&
1242  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1243  RegShiftedReg.ShiftReg);
1244  }
1245  bool isRegShiftedImm() const {
1246  return Kind == k_ShiftedImmediate &&
1247  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1248  RegShiftedImm.SrcReg);
1249  }
1250  bool isRotImm() const { return Kind == k_RotateImmediate; }
1251  bool isModImm() const { return Kind == k_ModifiedImmediate; }
1252 
1253  bool isModImmNot() const {
1254  if (!isImm()) return false;
1255  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1256  if (!CE) return false;
1257  int64_t Value = CE->getValue();
1258  return ARM_AM::getSOImmVal(~Value) != -1;
1259  }
1260 
1261  bool isModImmNeg() const {
1262  if (!isImm()) return false;
1263  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1264  if (!CE) return false;
1265  int64_t Value = CE->getValue();
1266  return ARM_AM::getSOImmVal(Value) == -1 &&
1267  ARM_AM::getSOImmVal(-Value) != -1;
1268  }
1269 
1270  bool isThumbModImmNeg1_7() const {
1271  if (!isImm()) return false;
1272  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1273  if (!CE) return false;
1274  int32_t Value = -(int32_t)CE->getValue();
1275  return 0 < Value && Value < 8;
1276  }
1277 
1278  bool isThumbModImmNeg8_255() const {
1279  if (!isImm()) return false;
1280  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1281  if (!CE) return false;
1282  int32_t Value = -(int32_t)CE->getValue();
1283  return 7 < Value && Value < 256;
1284  }
1285 
1286  bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1287  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1288  bool isPostIdxRegShifted() const {
1289  return Kind == k_PostIndexRegister &&
1290  ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1291  }
1292  bool isPostIdxReg() const {
1293  return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1294  }
1295  bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1296  if (!isMem())
1297  return false;
1298  // No offset of any kind.
1299  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1300  (alignOK || Memory.Alignment == Alignment);
1301  }
1302  bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1303  if (!isMem())
1304  return false;
1305 
1306  if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1307  Memory.BaseRegNum))
1308  return false;
1309 
1310  // No offset of any kind.
1311  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1312  (alignOK || Memory.Alignment == Alignment);
1313  }
1314  bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1315  if (!isMem())
1316  return false;
1317 
1318  if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1319  Memory.BaseRegNum))
1320  return false;
1321 
1322  // No offset of any kind.
1323  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1324  (alignOK || Memory.Alignment == Alignment);
1325  }
1326  bool isMemPCRelImm12() const {
1327  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1328  return false;
1329  // Base register must be PC.
1330  if (Memory.BaseRegNum != ARM::PC)
1331  return false;
1332  // Immediate offset in range [-4095, 4095].
1333  if (!Memory.OffsetImm) return true;
1334  int64_t Val = Memory.OffsetImm->getValue();
1335  return (Val > -4096 && Val < 4096) ||
1336  (Val == std::numeric_limits<int32_t>::min());
1337  }
1338 
1339  bool isAlignedMemory() const {
1340  return isMemNoOffset(true);
1341  }
1342 
1343  bool isAlignedMemoryNone() const {
1344  return isMemNoOffset(false, 0);
1345  }
1346 
1347  bool isDupAlignedMemoryNone() const {
1348  return isMemNoOffset(false, 0);
1349  }
1350 
1351  bool isAlignedMemory16() const {
1352  if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1353  return true;
1354  return isMemNoOffset(false, 0);
1355  }
1356 
1357  bool isDupAlignedMemory16() const {
1358  if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1359  return true;
1360  return isMemNoOffset(false, 0);
1361  }
1362 
1363  bool isAlignedMemory32() const {
1364  if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1365  return true;
1366  return isMemNoOffset(false, 0);
1367  }
1368 
1369  bool isDupAlignedMemory32() const {
1370  if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1371  return true;
1372  return isMemNoOffset(false, 0);
1373  }
1374 
1375  bool isAlignedMemory64() const {
1376  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1377  return true;
1378  return isMemNoOffset(false, 0);
1379  }
1380 
1381  bool isDupAlignedMemory64() const {
1382  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1383  return true;
1384  return isMemNoOffset(false, 0);
1385  }
1386 
1387  bool isAlignedMemory64or128() const {
1388  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1389  return true;
1390  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1391  return true;
1392  return isMemNoOffset(false, 0);
1393  }
1394 
1395  bool isDupAlignedMemory64or128() const {
1396  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1397  return true;
1398  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1399  return true;
1400  return isMemNoOffset(false, 0);
1401  }
1402 
1403  bool isAlignedMemory64or128or256() const {
1404  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1405  return true;
1406  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1407  return true;
1408  if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1409  return true;
1410  return isMemNoOffset(false, 0);
1411  }
1412 
1413  bool isAddrMode2() const {
1414  if (!isMem() || Memory.Alignment != 0) return false;
1415  // Check for register offset.
1416  if (Memory.OffsetRegNum) return true;
1417  // Immediate offset in range [-4095, 4095].
1418  if (!Memory.OffsetImm) return true;
1419  int64_t Val = Memory.OffsetImm->getValue();
1420  return Val > -4096 && Val < 4096;
1421  }
1422 
1423  bool isAM2OffsetImm() const {
1424  if (!isImm()) return false;
1425  // Immediate offset in range [-4095, 4095].
1426  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1427  if (!CE) return false;
1428  int64_t Val = CE->getValue();
1429  return (Val == std::numeric_limits<int32_t>::min()) ||
1430  (Val > -4096 && Val < 4096);
1431  }
1432 
1433  bool isAddrMode3() const {
1434  // If we have an immediate that's not a constant, treat it as a label
1435  // reference needing a fixup. If it is a constant, it's something else
1436  // and we reject it.
1437  if (isImm() && !isa<MCConstantExpr>(getImm()))
1438  return true;
1439  if (!isMem() || Memory.Alignment != 0) return false;
1440  // No shifts are legal for AM3.
1441  if (Memory.ShiftType != ARM_AM::no_shift) return false;
1442  // Check for register offset.
1443  if (Memory.OffsetRegNum) return true;
1444  // Immediate offset in range [-255, 255].
1445  if (!Memory.OffsetImm) return true;
1446  int64_t Val = Memory.OffsetImm->getValue();
1447  // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
1448  // have to check for this too.
1449  return (Val > -256 && Val < 256) ||
1450  Val == std::numeric_limits<int32_t>::min();
1451  }
1452 
1453  bool isAM3Offset() const {
1454  if (isPostIdxReg())
1455  return true;
1456  if (!isImm())
1457  return false;
1458  // Immediate offset in range [-255, 255].
1459  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1460  if (!CE) return false;
1461  int64_t Val = CE->getValue();
1462  // Special case, #-0 is std::numeric_limits<int32_t>::min().
1463  return (Val > -256 && Val < 256) ||
1464  Val == std::numeric_limits<int32_t>::min();
1465  }
1466 
1467  bool isAddrMode5() const {
1468  // If we have an immediate that's not a constant, treat it as a label
1469  // reference needing a fixup. If it is a constant, it's something else
1470  // and we reject it.
1471  if (isImm() && !isa<MCConstantExpr>(getImm()))
1472  return true;
1473  if (!isMem() || Memory.Alignment != 0) return false;
1474  // Check for register offset.
1475  if (Memory.OffsetRegNum) return false;
1476  // Immediate offset in range [-1020, 1020] and a multiple of 4.
1477  if (!Memory.OffsetImm) return true;
1478  int64_t Val = Memory.OffsetImm->getValue();
1479  return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1480  Val == std::numeric_limits<int32_t>::min();
1481  }
1482 
1483  bool isAddrMode5FP16() const {
1484  // If we have an immediate that's not a constant, treat it as a label
1485  // reference needing a fixup. If it is a constant, it's something else
1486  // and we reject it.
1487  if (isImm() && !isa<MCConstantExpr>(getImm()))
1488  return true;
1489  if (!isMem() || Memory.Alignment != 0) return false;
1490  // Check for register offset.
1491  if (Memory.OffsetRegNum) return false;
1492  // Immediate offset in range [-510, 510] and a multiple of 2.
1493  if (!Memory.OffsetImm) return true;
1494  int64_t Val = Memory.OffsetImm->getValue();
1495  return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1496  Val == std::numeric_limits<int32_t>::min();
1497  }
1498 
1499  bool isMemTBB() const {
1500  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1501  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1502  return false;
1503  return true;
1504  }
1505 
1506  bool isMemTBH() const {
1507  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1508  Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1509  Memory.Alignment != 0 )
1510  return false;
1511  return true;
1512  }
1513 
1514  bool isMemRegOffset() const {
1515  if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1516  return false;
1517  return true;
1518  }
1519 
1520  bool isT2MemRegOffset() const {
1521  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1522  Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1523  return false;
1524  // Only lsl #{0, 1, 2, 3} allowed.
1525  if (Memory.ShiftType == ARM_AM::no_shift)
1526  return true;
1527  if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1528  return false;
1529  return true;
1530  }
1531 
1532  bool isMemThumbRR() const {
1533  // Thumb reg+reg addressing is simple. Just two registers, a base and
1534  // an offset. No shifts, negations or any other complicating factors.
1535  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1536  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1537  return false;
1538  return isARMLowRegister(Memory.BaseRegNum) &&
1539  (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1540  }
1541 
1542  bool isMemThumbRIs4() const {
1543  if (!isMem() || Memory.OffsetRegNum != 0 ||
1544  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1545  return false;
1546  // Immediate offset, multiple of 4 in range [0, 124].
1547  if (!Memory.OffsetImm) return true;
1548  int64_t Val = Memory.OffsetImm->getValue();
1549  return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1550  }
1551 
1552  bool isMemThumbRIs2() const {
1553  if (!isMem() || Memory.OffsetRegNum != 0 ||
1554  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1555  return false;
1556  // Immediate offset, multiple of 4 in range [0, 62].
1557  if (!Memory.OffsetImm) return true;
1558  int64_t Val = Memory.OffsetImm->getValue();
1559  return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1560  }
1561 
1562  bool isMemThumbRIs1() const {
1563  if (!isMem() || Memory.OffsetRegNum != 0 ||
1564  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1565  return false;
1566  // Immediate offset in range [0, 31].
1567  if (!Memory.OffsetImm) return true;
1568  int64_t Val = Memory.OffsetImm->getValue();
1569  return Val >= 0 && Val <= 31;
1570  }
1571 
1572  bool isMemThumbSPI() const {
1573  if (!isMem() || Memory.OffsetRegNum != 0 ||
1574  Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1575  return false;
1576  // Immediate offset, multiple of 4 in range [0, 1020].
1577  if (!Memory.OffsetImm) return true;
1578  int64_t Val = Memory.OffsetImm->getValue();
1579  return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1580  }
1581 
1582  bool isMemImm8s4Offset() const {
1583  // If we have an immediate that's not a constant, treat it as a label
1584  // reference needing a fixup. If it is a constant, it's something else
1585  // and we reject it.
1586  if (isImm() && !isa<MCConstantExpr>(getImm()))
1587  return true;
1588  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1589  return false;
1590  // Immediate offset a multiple of 4 in range [-1020, 1020].
1591  if (!Memory.OffsetImm) return true;
1592  int64_t Val = Memory.OffsetImm->getValue();
1593  // Special case, #-0 is std::numeric_limits<int32_t>::min().
1594  return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1595  Val == std::numeric_limits<int32_t>::min();
1596  }
1597  bool isMemImm7s4Offset() const {
1598  // If we have an immediate that's not a constant, treat it as a label
1599  // reference needing a fixup. If it is a constant, it's something else
1600  // and we reject it.
1601  if (isImm() && !isa<MCConstantExpr>(getImm()))
1602  return true;
1603  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1604  !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1605  Memory.BaseRegNum))
1606  return false;
1607  // Immediate offset a multiple of 4 in range [-508, 508].
1608  if (!Memory.OffsetImm) return true;
1609  int64_t Val = Memory.OffsetImm->getValue();
1610  // Special case, #-0 is INT32_MIN.
1611  return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1612  }
1613  bool isMemImm0_1020s4Offset() const {
1614  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1615  return false;
1616  // Immediate offset a multiple of 4 in range [0, 1020].
1617  if (!Memory.OffsetImm) return true;
1618  int64_t Val = Memory.OffsetImm->getValue();
1619  return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1620  }
1621 
1622  bool isMemImm8Offset() const {
1623  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1624  return false;
1625  // Base reg of PC isn't allowed for these encodings.
1626  if (Memory.BaseRegNum == ARM::PC) return false;
1627  // Immediate offset in range [-255, 255].
1628  if (!Memory.OffsetImm) return true;
1629  int64_t Val = Memory.OffsetImm->getValue();
1630  return (Val == std::numeric_limits<int32_t>::min()) ||
1631  (Val > -256 && Val < 256);
1632  }
1633 
1634  bool isMemPosImm8Offset() const {
1635  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1636  return false;
1637  // Immediate offset in range [0, 255].
1638  if (!Memory.OffsetImm) return true;
1639  int64_t Val = Memory.OffsetImm->getValue();
1640  return Val >= 0 && Val < 256;
1641  }
1642 
1643  bool isMemNegImm8Offset() const {
1644  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1645  return false;
1646  // Base reg of PC isn't allowed for these encodings.
1647  if (Memory.BaseRegNum == ARM::PC) return false;
1648  // Immediate offset in range [-255, -1].
1649  if (!Memory.OffsetImm) return false;
1650  int64_t Val = Memory.OffsetImm->getValue();
1651  return (Val == std::numeric_limits<int32_t>::min()) ||
1652  (Val > -256 && Val < 0);
1653  }
1654 
1655  bool isMemUImm12Offset() const {
1656  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1657  return false;
1658  // Immediate offset in range [0, 4095].
1659  if (!Memory.OffsetImm) return true;
1660  int64_t Val = Memory.OffsetImm->getValue();
1661  return (Val >= 0 && Val < 4096);
1662  }
1663 
1664  bool isMemImm12Offset() const {
1665  // If we have an immediate that's not a constant, treat it as a label
1666  // reference needing a fixup. If it is a constant, it's something else
1667  // and we reject it.
1668 
1669  if (isImm() && !isa<MCConstantExpr>(getImm()))
1670  return true;
1671 
1672  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1673  return false;
1674  // Immediate offset in range [-4095, 4095].
1675  if (!Memory.OffsetImm) return true;
1676  int64_t Val = Memory.OffsetImm->getValue();
1677  return (Val > -4096 && Val < 4096) ||
1678  (Val == std::numeric_limits<int32_t>::min());
1679  }
1680 
1681  bool isConstPoolAsmImm() const {
1682  // Delay processing of Constant Pool Immediate, this will turn into
1683  // a constant. Match no other operand
1684  return (isConstantPoolImm());
1685  }
1686 
1687  bool isPostIdxImm8() const {
1688  if (!isImm()) return false;
1689  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1690  if (!CE) return false;
1691  int64_t Val = CE->getValue();
1692  return (Val > -256 && Val < 256) ||
1693  (Val == std::numeric_limits<int32_t>::min());
1694  }
1695 
1696  bool isPostIdxImm8s4() const {
1697  if (!isImm()) return false;
1698  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1699  if (!CE) return false;
1700  int64_t Val = CE->getValue();
1701  return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1702  (Val == std::numeric_limits<int32_t>::min());
1703  }
1704 
1705  bool isMSRMask() const { return Kind == k_MSRMask; }
1706  bool isBankedReg() const { return Kind == k_BankedReg; }
1707  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1708 
1709  // NEON operands.
1710  bool isSingleSpacedVectorList() const {
1711  return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1712  }
1713 
1714  bool isDoubleSpacedVectorList() const {
1715  return Kind == k_VectorList && VectorList.isDoubleSpaced;
1716  }
1717 
1718  bool isVecListOneD() const {
1719  if (!isSingleSpacedVectorList()) return false;
1720  return VectorList.Count == 1;
1721  }
1722 
1723  bool isVecListDPair() const {
1724  if (!isSingleSpacedVectorList()) return false;
1725  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1726  .contains(VectorList.RegNum));
1727  }
1728 
1729  bool isVecListThreeD() const {
1730  if (!isSingleSpacedVectorList()) return false;
1731  return VectorList.Count == 3;
1732  }
1733 
1734  bool isVecListFourD() const {
1735  if (!isSingleSpacedVectorList()) return false;
1736  return VectorList.Count == 4;
1737  }
1738 
1739  bool isVecListDPairSpaced() const {
1740  if (Kind != k_VectorList) return false;
1741  if (isSingleSpacedVectorList()) return false;
1742  return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1743  .contains(VectorList.RegNum));
1744  }
1745 
1746  bool isVecListThreeQ() const {
1747  if (!isDoubleSpacedVectorList()) return false;
1748  return VectorList.Count == 3;
1749  }
1750 
1751  bool isVecListFourQ() const {
1752  if (!isDoubleSpacedVectorList()) return false;
1753  return VectorList.Count == 4;
1754  }
1755 
1756  bool isSingleSpacedVectorAllLanes() const {
1757  return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1758  }
1759 
1760  bool isDoubleSpacedVectorAllLanes() const {
1761  return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1762  }
1763 
1764  bool isVecListOneDAllLanes() const {
1765  if (!isSingleSpacedVectorAllLanes()) return false;
1766  return VectorList.Count == 1;
1767  }
1768 
1769  bool isVecListDPairAllLanes() const {
1770  if (!isSingleSpacedVectorAllLanes()) return false;
1771  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1772  .contains(VectorList.RegNum));
1773  }
1774 
1775  bool isVecListDPairSpacedAllLanes() const {
1776  if (!isDoubleSpacedVectorAllLanes()) return false;
1777  return VectorList.Count == 2;
1778  }
1779 
1780  bool isVecListThreeDAllLanes() const {
1781  if (!isSingleSpacedVectorAllLanes()) return false;
1782  return VectorList.Count == 3;
1783  }
1784 
1785  bool isVecListThreeQAllLanes() const {
1786  if (!isDoubleSpacedVectorAllLanes()) return false;
1787  return VectorList.Count == 3;
1788  }
1789 
1790  bool isVecListFourDAllLanes() const {
1791  if (!isSingleSpacedVectorAllLanes()) return false;
1792  return VectorList.Count == 4;
1793  }
1794 
1795  bool isVecListFourQAllLanes() const {
1796  if (!isDoubleSpacedVectorAllLanes()) return false;
1797  return VectorList.Count == 4;
1798  }
1799 
1800  bool isSingleSpacedVectorIndexed() const {
1801  return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1802  }
1803 
1804  bool isDoubleSpacedVectorIndexed() const {
1805  return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1806  }
1807 
1808  bool isVecListOneDByteIndexed() const {
1809  if (!isSingleSpacedVectorIndexed()) return false;
1810  return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1811  }
1812 
1813  bool isVecListOneDHWordIndexed() const {
1814  if (!isSingleSpacedVectorIndexed()) return false;
1815  return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1816  }
1817 
1818  bool isVecListOneDWordIndexed() const {
1819  if (!isSingleSpacedVectorIndexed()) return false;
1820  return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1821  }
1822 
1823  bool isVecListTwoDByteIndexed() const {
1824  if (!isSingleSpacedVectorIndexed()) return false;
1825  return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1826  }
1827 
1828  bool isVecListTwoDHWordIndexed() const {
1829  if (!isSingleSpacedVectorIndexed()) return false;
1830  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1831  }
1832 
1833  bool isVecListTwoQWordIndexed() const {
1834  if (!isDoubleSpacedVectorIndexed()) return false;
1835  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1836  }
1837 
1838  bool isVecListTwoQHWordIndexed() const {
1839  if (!isDoubleSpacedVectorIndexed()) return false;
1840  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1841  }
1842 
1843  bool isVecListTwoDWordIndexed() const {
1844  if (!isSingleSpacedVectorIndexed()) return false;
1845  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1846  }
1847 
1848  bool isVecListThreeDByteIndexed() const {
1849  if (!isSingleSpacedVectorIndexed()) return false;
1850  return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1851  }
1852 
1853  bool isVecListThreeDHWordIndexed() const {
1854  if (!isSingleSpacedVectorIndexed()) return false;
1855  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1856  }
1857 
1858  bool isVecListThreeQWordIndexed() const {
1859  if (!isDoubleSpacedVectorIndexed()) return false;
1860  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1861  }
1862 
1863  bool isVecListThreeQHWordIndexed() const {
1864  if (!isDoubleSpacedVectorIndexed()) return false;
1865  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1866  }
1867 
1868  bool isVecListThreeDWordIndexed() const {
1869  if (!isSingleSpacedVectorIndexed()) return false;
1870  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1871  }
1872 
1873  bool isVecListFourDByteIndexed() const {
1874  if (!isSingleSpacedVectorIndexed()) return false;
1875  return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1876  }
1877 
1878  bool isVecListFourDHWordIndexed() const {
1879  if (!isSingleSpacedVectorIndexed()) return false;
1880  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1881  }
1882 
1883  bool isVecListFourQWordIndexed() const {
1884  if (!isDoubleSpacedVectorIndexed()) return false;
1885  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1886  }
1887 
1888  bool isVecListFourQHWordIndexed() const {
1889  if (!isDoubleSpacedVectorIndexed()) return false;
1890  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1891  }
1892 
1893  bool isVecListFourDWordIndexed() const {
1894  if (!isSingleSpacedVectorIndexed()) return false;
1895  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1896  }
1897 
1898  bool isVectorIndex() const { return Kind == k_VectorIndex; }
1899 
1900  bool isVectorIndex8() const {
1901  if (Kind != k_VectorIndex) return false;
1902  return VectorIndex.Val < 8;
1903  }
1904 
1905  bool isVectorIndex16() const {
1906  if (Kind != k_VectorIndex) return false;
1907  return VectorIndex.Val < 4;
1908  }
1909 
1910  bool isVectorIndex32() const {
1911  if (Kind != k_VectorIndex) return false;
1912  return VectorIndex.Val < 2;
1913  }
1914  bool isVectorIndex64() const {
1915  if (Kind != k_VectorIndex) return false;
1916  return VectorIndex.Val < 1;
1917  }
1918 
1919  bool isNEONi8splat() const {
1920  if (!isImm()) return false;
1921  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1922  // Must be a constant.
1923  if (!CE) return false;
1924  int64_t Value = CE->getValue();
1925  // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1926  // value.
1927  return Value >= 0 && Value < 256;
1928  }
1929 
1930  bool isNEONi16splat() const {
1931  if (isNEONByteReplicate(2))
1932  return false; // Leave that for bytes replication and forbid by default.
1933  if (!isImm())
1934  return false;
1935  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1936  // Must be a constant.
1937  if (!CE) return false;
1938  unsigned Value = CE->getValue();
1939  return ARM_AM::isNEONi16splat(Value);
1940  }
1941 
1942  bool isNEONi16splatNot() const {
1943  if (!isImm())
1944  return false;
1945  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1946  // Must be a constant.
1947  if (!CE) return false;
1948  unsigned Value = CE->getValue();
1949  return ARM_AM::isNEONi16splat(~Value & 0xffff);
1950  }
1951 
1952  bool isNEONi32splat() const {
1953  if (isNEONByteReplicate(4))
1954  return false; // Leave that for bytes replication and forbid by default.
1955  if (!isImm())
1956  return false;
1957  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1958  // Must be a constant.
1959  if (!CE) return false;
1960  unsigned Value = CE->getValue();
1961  return ARM_AM::isNEONi32splat(Value);
1962  }
1963 
1964  bool isNEONi32splatNot() const {
1965  if (!isImm())
1966  return false;
1967  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1968  // Must be a constant.
1969  if (!CE) return false;
1970  unsigned Value = CE->getValue();
1971  return ARM_AM::isNEONi32splat(~Value);
1972  }
1973 
1974  static bool isValidNEONi32vmovImm(int64_t Value) {
1975  // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1976  // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1977  return ((Value & 0xffffffffffffff00) == 0) ||
1978  ((Value & 0xffffffffffff00ff) == 0) ||
1979  ((Value & 0xffffffffff00ffff) == 0) ||
1980  ((Value & 0xffffffff00ffffff) == 0) ||
1981  ((Value & 0xffffffffffff00ff) == 0xff) ||
1982  ((Value & 0xffffffffff00ffff) == 0xffff);
1983  }
1984 
1985  bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
1986  assert((Width == 8 || Width == 16 || Width == 32) &&
1987  "Invalid element width");
1988  assert(NumElems * Width <= 64 && "Invalid result width");
1989 
1990  if (!isImm())
1991  return false;
1992  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1993  // Must be a constant.
1994  if (!CE)
1995  return false;
1996  int64_t Value = CE->getValue();
1997  if (!Value)
1998  return false; // Don't bother with zero.
1999  if (Inv)
2000  Value = ~Value;
2001 
2002  uint64_t Mask = (1ull << Width) - 1;
2003  uint64_t Elem = Value & Mask;
2004  if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2005  return false;
2006  if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2007  return false;
2008 
2009  for (unsigned i = 1; i < NumElems; ++i) {
2010  Value >>= Width;
2011  if ((Value & Mask) != Elem)
2012  return false;
2013  }
2014  return true;
2015  }
2016 
2017  bool isNEONByteReplicate(unsigned NumBytes) const {
2018  return isNEONReplicate(8, NumBytes, false);
2019  }
2020 
2021  static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2022  assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2023  "Invalid source width");
2024  assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2025  "Invalid destination width");
2026  assert(FromW < ToW && "ToW is not less than FromW");
2027  }
2028 
2029  template<unsigned FromW, unsigned ToW>
2030  bool isNEONmovReplicate() const {
2031  checkNeonReplicateArgs(FromW, ToW);
2032  if (ToW == 64 && isNEONi64splat())
2033  return false;
2034  return isNEONReplicate(FromW, ToW / FromW, false);
2035  }
2036 
2037  template<unsigned FromW, unsigned ToW>
2038  bool isNEONinvReplicate() const {
2039  checkNeonReplicateArgs(FromW, ToW);
2040  return isNEONReplicate(FromW, ToW / FromW, true);
2041  }
2042 
2043  bool isNEONi32vmov() const {
2044  if (isNEONByteReplicate(4))
2045  return false; // Let it to be classified as byte-replicate case.
2046  if (!isImm())
2047  return false;
2048  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2049  // Must be a constant.
2050  if (!CE)
2051  return false;
2052  return isValidNEONi32vmovImm(CE->getValue());
2053  }
2054 
2055  bool isNEONi32vmovNeg() const {
2056  if (!isImm()) return false;
2057  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2058  // Must be a constant.
2059  if (!CE) return false;
2060  return isValidNEONi32vmovImm(~CE->getValue());
2061  }
2062 
2063  bool isNEONi64splat() const {
2064  if (!isImm()) return false;
2065  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2066  // Must be a constant.
2067  if (!CE) return false;
2068  uint64_t Value = CE->getValue();
2069  // i64 value with each byte being either 0 or 0xff.
2070  for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2071  if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2072  return true;
2073  }
2074 
2075  template<int64_t Angle, int64_t Remainder>
2076  bool isComplexRotation() const {
2077  if (!isImm()) return false;
2078 
2079  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2080  if (!CE) return false;
2081  uint64_t Value = CE->getValue();
2082 
2083  return (Value % Angle == Remainder && Value <= 270);
2084  }
2085 
2086  bool isMVELongShift() const {
2087  if (!isImm()) return false;
2088  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2089  // Must be a constant.
2090  if (!CE) return false;
2091  uint64_t Value = CE->getValue();
2092  return Value >= 1 && Value <= 32;
2093  }
2094 
2095  bool isITCondCodeNoAL() const {
2096  if (!isITCondCode()) return false;
2098  return CC != ARMCC::AL;
2099  }
2100 
2101  bool isITCondCodeRestrictedI() const {
2102  if (!isITCondCode())
2103  return false;
2105  return CC == ARMCC::EQ || CC == ARMCC::NE;
2106  }
2107 
2108  bool isITCondCodeRestrictedS() const {
2109  if (!isITCondCode())
2110  return false;
2112  return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2113  CC == ARMCC::GE;
2114  }
2115 
2116  bool isITCondCodeRestrictedU() const {
2117  if (!isITCondCode())
2118  return false;
2120  return CC == ARMCC::HS || CC == ARMCC::HI;
2121  }
2122 
2123  bool isITCondCodeRestrictedFP() const {
2124  if (!isITCondCode())
2125  return false;
2127  return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2128  CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2129  }
2130 
2131  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2132  // Add as immediates when possible. Null MCExpr = 0.
2133  if (!Expr)
2135  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2136  Inst.addOperand(MCOperand::createImm(CE->getValue()));
2137  else
2138  Inst.addOperand(MCOperand::createExpr(Expr));
2139  }
2140 
2141  void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2142  assert(N == 1 && "Invalid number of operands!");
2143  addExpr(Inst, getImm());
2144  }
2145 
2146  void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2147  assert(N == 1 && "Invalid number of operands!");
2148  addExpr(Inst, getImm());
2149  }
2150 
2151  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2152  assert(N == 2 && "Invalid number of operands!");
2153  Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2154  unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2155  Inst.addOperand(MCOperand::createReg(RegNum));
2156  }
2157 
2158  void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2159  assert(N == 2 && "Invalid number of operands!");
2160  Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2161  unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2162  Inst.addOperand(MCOperand::createReg(RegNum));
2163  }
2164 
2165  void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2166  assert(N == 3 && "Invalid number of operands!");
2167  addVPTPredNOperands(Inst, N-1);
2168  unsigned RegNum;
2169  if (getVPTPred() == ARMVCC::None) {
2170  RegNum = 0;
2171  } else {
2172  unsigned NextOpIndex = Inst.getNumOperands();
2173  const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()];
2174  int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2175  assert(TiedOp >= 0 &&
2176  "Inactive register in vpred_r is not tied to an output!");
2177  RegNum = Inst.getOperand(TiedOp).getReg();
2178  }
2179  Inst.addOperand(MCOperand::createReg(RegNum));
2180  }
2181 
2182  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2183  assert(N == 1 && "Invalid number of operands!");
2184  Inst.addOperand(MCOperand::createImm(getCoproc()));
2185  }
2186 
2187  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2188  assert(N == 1 && "Invalid number of operands!");
2189  Inst.addOperand(MCOperand::createImm(getCoproc()));
2190  }
2191 
2192  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2193  assert(N == 1 && "Invalid number of operands!");
2194  Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2195  }
2196 
2197  void addITMaskOperands(MCInst &Inst, unsigned N) const {
2198  assert(N == 1 && "Invalid number of operands!");
2199  Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2200  }
2201 
2202  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2203  assert(N == 1 && "Invalid number of operands!");
2204  Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2205  }
2206 
2207  void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2208  assert(N == 1 && "Invalid number of operands!");
2210  }
2211 
2212  void addCCOutOperands(MCInst &Inst, unsigned N) const {
2213  assert(N == 1 && "Invalid number of operands!");
2215  }
2216 
2217  void addRegOperands(MCInst &Inst, unsigned N) const {
2218  assert(N == 1 && "Invalid number of operands!");
2220  }
2221 
2222  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2223  assert(N == 3 && "Invalid number of operands!");
2224  assert(isRegShiftedReg() &&
2225  "addRegShiftedRegOperands() on non-RegShiftedReg!");
2226  Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2227  Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2229  ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2230  }
2231 
2232  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2233  assert(N == 2 && "Invalid number of operands!");
2234  assert(isRegShiftedImm() &&
2235  "addRegShiftedImmOperands() on non-RegShiftedImm!");
2236  Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2237  // Shift of #32 is encoded as 0 where permitted
2238  unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2240  ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2241  }
2242 
2243  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2244  assert(N == 1 && "Invalid number of operands!");
2245  Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2246  ShifterImm.Imm));
2247  }
2248 
2249  void addRegListOperands(MCInst &Inst, unsigned N) const {
2250  assert(N == 1 && "Invalid number of operands!");
2251  const SmallVectorImpl<unsigned> &RegList = getRegList();
2253  I = RegList.begin(), E = RegList.end(); I != E; ++I)
2255  }
2256 
2257  void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2258  assert(N == 1 && "Invalid number of operands!");
2259  const SmallVectorImpl<unsigned> &RegList = getRegList();
2261  I = RegList.begin(), E = RegList.end(); I != E; ++I)
2263  }
2264 
2265  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2266  addRegListOperands(Inst, N);
2267  }
2268 
2269  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2270  addRegListOperands(Inst, N);
2271  }
2272 
2273  void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2274  addRegListOperands(Inst, N);
2275  }
2276 
2277  void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2278  addRegListOperands(Inst, N);
2279  }
2280 
2281  void addRotImmOperands(MCInst &Inst, unsigned N) const {
2282  assert(N == 1 && "Invalid number of operands!");
2283  // Encoded as val>>3. The printer handles display as 8, 16, 24.
2284  Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2285  }
2286 
2287  void addModImmOperands(MCInst &Inst, unsigned N) const {
2288  assert(N == 1 && "Invalid number of operands!");
2289 
2290  // Support for fixups (MCFixup)
2291  if (isImm())
2292  return addImmOperands(Inst, N);
2293 
2294  Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2295  }
2296 
2297  void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2298  assert(N == 1 && "Invalid number of operands!");
2299  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2300  uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2301  Inst.addOperand(MCOperand::createImm(Enc));
2302  }
2303 
2304  void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2305  assert(N == 1 && "Invalid number of operands!");
2306  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2307  uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2308  Inst.addOperand(MCOperand::createImm(Enc));
2309  }
2310 
2311  void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2312  assert(N == 1 && "Invalid number of operands!");
2313  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2314  uint32_t Val = -CE->getValue();
2315  Inst.addOperand(MCOperand::createImm(Val));
2316  }
2317 
2318  void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2319  assert(N == 1 && "Invalid number of operands!");
2320  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2321  uint32_t Val = -CE->getValue();
2322  Inst.addOperand(MCOperand::createImm(Val));
2323  }
2324 
2325  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2326  assert(N == 1 && "Invalid number of operands!");
2327  // Munge the lsb/width into a bitfield mask.
2328  unsigned lsb = Bitfield.LSB;
2329  unsigned width = Bitfield.Width;
2330  // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2331  uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2332  (32 - (lsb + width)));
2333  Inst.addOperand(MCOperand::createImm(Mask));
2334  }
2335 
2336  void addImmOperands(MCInst &Inst, unsigned N) const {
2337  assert(N == 1 && "Invalid number of operands!");
2338  addExpr(Inst, getImm());
2339  }
2340 
2341  void addFBits16Operands(MCInst &Inst, unsigned N) const {
2342  assert(N == 1 && "Invalid number of operands!");
2343  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2344  Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2345  }
2346 
2347  void addFBits32Operands(MCInst &Inst, unsigned N) const {
2348  assert(N == 1 && "Invalid number of operands!");
2349  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2350  Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2351  }
2352 
2353  void addFPImmOperands(MCInst &Inst, unsigned N) const {
2354  assert(N == 1 && "Invalid number of operands!");
2355  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2356  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2357  Inst.addOperand(MCOperand::createImm(Val));
2358  }
2359 
2360  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2361  assert(N == 1 && "Invalid number of operands!");
2362  // FIXME: We really want to scale the value here, but the LDRD/STRD
2363  // instruction don't encode operands that way yet.
2364  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2366  }
2367 
2368  void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2369  assert(N == 1 && "Invalid number of operands!");
2370  // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2371  // instruction don't encode operands that way yet.
2372  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2374  }
2375 
2376  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2377  assert(N == 1 && "Invalid number of operands!");
2378  // The immediate is scaled by four in the encoding and is stored
2379  // in the MCInst as such. Lop off the low two bits here.
2380  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2381  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2382  }
2383 
2384  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2385  assert(N == 1 && "Invalid number of operands!");
2386  // The immediate is scaled by four in the encoding and is stored
2387  // in the MCInst as such. Lop off the low two bits here.
2388  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2389  Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2390  }
2391 
2392  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2393  assert(N == 1 && "Invalid number of operands!");
2394  // The immediate is scaled by four in the encoding and is stored
2395  // in the MCInst as such. Lop off the low two bits here.
2396  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2397  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2398  }
2399 
2400  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2401  assert(N == 1 && "Invalid number of operands!");
2402  // The constant encodes as the immediate-1, and we store in the instruction
2403  // the bits as encoded, so subtract off one here.
2404  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2405  Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2406  }
2407 
2408  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2409  assert(N == 1 && "Invalid number of operands!");
2410  // The constant encodes as the immediate-1, and we store in the instruction
2411  // the bits as encoded, so subtract off one here.
2412  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2413  Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2414  }
2415 
2416  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2417  assert(N == 1 && "Invalid number of operands!");
2418  // The constant encodes as the immediate, except for 32, which encodes as
2419  // zero.
2420  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2421  unsigned Imm = CE->getValue();
2422  Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2423  }
2424 
2425  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2426  assert(N == 1 && "Invalid number of operands!");
2427  // An ASR value of 32 encodes as 0, so that's how we want to add it to
2428  // the instruction as well.
2429  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2430  int Val = CE->getValue();
2431  Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2432  }
2433 
2434  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2435  assert(N == 1 && "Invalid number of operands!");
2436  // The operand is actually a t2_so_imm, but we have its bitwise
2437  // negation in the assembly source, so twiddle it here.
2438  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2440  }
2441 
2442  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2443  assert(N == 1 && "Invalid number of operands!");
2444  // The operand is actually a t2_so_imm, but we have its
2445  // negation in the assembly source, so twiddle it here.
2446  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2448  }
2449 
2450  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2451  assert(N == 1 && "Invalid number of operands!");
2452  // The operand is actually an imm0_4095, but we have its
2453  // negation in the assembly source, so twiddle it here.
2454  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2456  }
2457 
2458  void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2459  if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2460  Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2461  return;
2462  }
2463 
2464  const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2465  assert(SR && "Unknown value type!");
2467  }
2468 
2469  void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2470  assert(N == 1 && "Invalid number of operands!");
2471  if (isImm()) {
2472  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2473  if (CE) {
2475  return;
2476  }
2477 
2478  const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2479 
2480  assert(SR && "Unknown value type!");
2482  return;
2483  }
2484 
2485  assert(isMem() && "Unknown value type!");
2486  assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2487  Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
2488  }
2489 
2490  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2491  assert(N == 1 && "Invalid number of operands!");
2492  Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2493  }
2494 
2495  void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2496  assert(N == 1 && "Invalid number of operands!");
2497  Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2498  }
2499 
2500  void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2501  assert(N == 1 && "Invalid number of operands!");
2502  Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2503  }
2504 
2505  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2506  assert(N == 1 && "Invalid number of operands!");
2507  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2508  }
2509 
2510  void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2511  assert(N == 1 && "Invalid number of operands!");
2512  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2513  }
2514 
2515  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2516  assert(N == 1 && "Invalid number of operands!");
2517  int32_t Imm = Memory.OffsetImm->getValue();
2518  Inst.addOperand(MCOperand::createImm(Imm));
2519  }
2520 
2521  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2522  assert(N == 1 && "Invalid number of operands!");
2523  assert(isImm() && "Not an immediate!");
2524 
2525  // If we have an immediate that's not a constant, treat it as a label
2526  // reference needing a fixup.
2527  if (!isa<MCConstantExpr>(getImm())) {
2528  Inst.addOperand(MCOperand::createExpr(getImm()));
2529  return;
2530  }
2531 
2532  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2533  int Val = CE->getValue();
2534  Inst.addOperand(MCOperand::createImm(Val));
2535  }
2536 
2537  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2538  assert(N == 2 && "Invalid number of operands!");
2539  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2540  Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2541  }
2542 
2543  void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2544  addAlignedMemoryOperands(Inst, N);
2545  }
2546 
2547  void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2548  addAlignedMemoryOperands(Inst, N);
2549  }
2550 
2551  void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2552  addAlignedMemoryOperands(Inst, N);
2553  }
2554 
2555  void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2556  addAlignedMemoryOperands(Inst, N);
2557  }
2558 
2559  void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2560  addAlignedMemoryOperands(Inst, N);
2561  }
2562 
2563  void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2564  addAlignedMemoryOperands(Inst, N);
2565  }
2566 
2567  void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2568  addAlignedMemoryOperands(Inst, N);
2569  }
2570 
2571  void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2572  addAlignedMemoryOperands(Inst, N);
2573  }
2574 
2575  void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2576  addAlignedMemoryOperands(Inst, N);
2577  }
2578 
2579  void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2580  addAlignedMemoryOperands(Inst, N);
2581  }
2582 
2583  void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2584  addAlignedMemoryOperands(Inst, N);
2585  }
2586 
2587  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2588  assert(N == 3 && "Invalid number of operands!");
2589  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2590  if (!Memory.OffsetRegNum) {
2591  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2592  // Special case for #-0
2593  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2594  if (Val < 0) Val = -Val;
2595  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2596  } else {
2597  // For register offset, we encode the shift type and negation flag
2598  // here.
2599  Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2600  Memory.ShiftImm, Memory.ShiftType);
2601  }
2602  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2603  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2604  Inst.addOperand(MCOperand::createImm(Val));
2605  }
2606 
2607  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2608  assert(N == 2 && "Invalid number of operands!");
2609  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2610  assert(CE && "non-constant AM2OffsetImm operand!");
2611  int32_t Val = CE->getValue();
2612  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2613  // Special case for #-0
2614  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2615  if (Val < 0) Val = -Val;
2616  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2618  Inst.addOperand(MCOperand::createImm(Val));
2619  }
2620 
2621  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2622  assert(N == 3 && "Invalid number of operands!");
2623  // If we have an immediate that's not a constant, treat it as a label
2624  // reference needing a fixup. If it is a constant, it's something else
2625  // and we reject it.
2626  if (isImm()) {
2627  Inst.addOperand(MCOperand::createExpr(getImm()));
2630  return;
2631  }
2632 
2633  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2634  if (!Memory.OffsetRegNum) {
2635  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2636  // Special case for #-0
2637  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2638  if (Val < 0) Val = -Val;
2639  Val = ARM_AM::getAM3Opc(AddSub, Val);
2640  } else {
2641  // For register offset, we encode the shift type and negation flag
2642  // here.
2643  Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2644  }
2645  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2646  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2647  Inst.addOperand(MCOperand::createImm(Val));
2648  }
2649 
2650  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2651  assert(N == 2 && "Invalid number of operands!");
2652  if (Kind == k_PostIndexRegister) {
2653  int32_t Val =
2654  ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2655  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2656  Inst.addOperand(MCOperand::createImm(Val));
2657  return;
2658  }
2659 
2660  // Constant offset.
2661  const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2662  int32_t Val = CE->getValue();
2663  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2664  // Special case for #-0
2665  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2666  if (Val < 0) Val = -Val;
2667  Val = ARM_AM::getAM3Opc(AddSub, Val);
2669  Inst.addOperand(MCOperand::createImm(Val));
2670  }
2671 
2672  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2673  assert(N == 2 && "Invalid number of operands!");
2674  // If we have an immediate that's not a constant, treat it as a label
2675  // reference needing a fixup. If it is a constant, it's something else
2676  // and we reject it.
2677  if (isImm()) {
2678  Inst.addOperand(MCOperand::createExpr(getImm()));
2680  return;
2681  }
2682 
2683  // The lower two bits are always zero and as such are not encoded.
2684  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2685  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2686  // Special case for #-0
2687  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2688  if (Val < 0) Val = -Val;
2689  Val = ARM_AM::getAM5Opc(AddSub, Val);
2690  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2691  Inst.addOperand(MCOperand::createImm(Val));
2692  }
2693 
2694  void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
2695  assert(N == 2 && "Invalid number of operands!");
2696  // If we have an immediate that's not a constant, treat it as a label
2697  // reference needing a fixup. If it is a constant, it's something else
2698  // and we reject it.
2699  if (isImm()) {
2700  Inst.addOperand(MCOperand::createExpr(getImm()));
2702  return;
2703  }
2704 
2705  // The lower bit is always zero and as such is not encoded.
2706  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
2707  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2708  // Special case for #-0
2709  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2710  if (Val < 0) Val = -Val;
2711  Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
2712  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2713  Inst.addOperand(MCOperand::createImm(Val));
2714  }
2715 
2716  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2717  assert(N == 2 && "Invalid number of operands!");
2718  // If we have an immediate that's not a constant, treat it as a label
2719  // reference needing a fixup. If it is a constant, it's something else
2720  // and we reject it.
2721  if (isImm()) {
2722  Inst.addOperand(MCOperand::createExpr(getImm()));
2724  return;
2725  }
2726 
2727  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2728  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2729  Inst.addOperand(MCOperand::createImm(Val));
2730  }
2731 
2732  void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
2733  assert(N == 2 && "Invalid number of operands!");
2734  // If we have an immediate that's not a constant, treat it as a label
2735  // reference needing a fixup. If it is a constant, it's something else
2736  // and we reject it.
2737  if (isImm()) {
2738  Inst.addOperand(MCOperand::createExpr(getImm()));
2740  return;
2741  }
2742 
2743  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2744  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2745  Inst.addOperand(MCOperand::createImm(Val));
2746  }
2747 
2748  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2749  assert(N == 2 && "Invalid number of operands!");
2750  // The lower two bits are always zero and as such are not encoded.
2751  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2752  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2753  Inst.addOperand(MCOperand::createImm(Val));
2754  }
2755 
2756  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2757  assert(N == 2 && "Invalid number of operands!");
2758  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2759  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2760  Inst.addOperand(MCOperand::createImm(Val));
2761  }
2762 
2763  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2764  addMemImm8OffsetOperands(Inst, N);
2765  }
2766 
2767  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2768  addMemImm8OffsetOperands(Inst, N);
2769  }
2770 
2771  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2772  assert(N == 2 && "Invalid number of operands!");
2773  // If this is an immediate, it's a label reference.
2774  if (isImm()) {
2775  addExpr(Inst, getImm());
2777  return;
2778  }
2779 
2780  // Otherwise, it's a normal memory reg+offset.
2781  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2782  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2783  Inst.addOperand(MCOperand::createImm(Val));
2784  }
2785 
2786  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2787  assert(N == 2 && "Invalid number of operands!");
2788  // If this is an immediate, it's a label reference.
2789  if (isImm()) {
2790  addExpr(Inst, getImm());
2792  return;
2793  }
2794 
2795  // Otherwise, it's a normal memory reg+offset.
2796  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2797  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2798  Inst.addOperand(MCOperand::createImm(Val));
2799  }
2800 
2801  void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
2802  assert(N == 1 && "Invalid number of operands!");
2803  // This is container for the immediate that we will create the constant
2804  // pool from
2805  addExpr(Inst, getConstantPoolImm());
2806  return;
2807  }
2808 
2809  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2810  assert(N == 2 && "Invalid number of operands!");
2811  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2812  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2813  }
2814 
2815  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2816  assert(N == 2 && "Invalid number of operands!");
2817  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2818  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2819  }
2820 
2821  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2822  assert(N == 3 && "Invalid number of operands!");
2823  unsigned Val =
2825  Memory.ShiftImm, Memory.ShiftType);
2826  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2827  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2828  Inst.addOperand(MCOperand::createImm(Val));
2829  }
2830 
2831  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2832  assert(N == 3 && "Invalid number of operands!");
2833  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2834  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2835  Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
2836  }
2837 
2838  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2839  assert(N == 2 && "Invalid number of operands!");
2840  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2841  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2842  }
2843 
2844  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2845  assert(N == 2 && "Invalid number of operands!");
2846  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2847  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2848  Inst.addOperand(MCOperand::createImm(Val));
2849  }
2850 
2851  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2852  assert(N == 2 && "Invalid number of operands!");
2853  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2854  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2855  Inst.addOperand(MCOperand::createImm(Val));
2856  }
2857 
2858  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2859  assert(N == 2 && "Invalid number of operands!");
2860  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2861  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2862  Inst.addOperand(MCOperand::createImm(Val));
2863  }
2864 
2865  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2866  assert(N == 2 && "Invalid number of operands!");
2867  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2868  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2869  Inst.addOperand(MCOperand::createImm(Val));
2870  }
2871 
2872  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2873  assert(N == 1 && "Invalid number of operands!");
2874  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2875  assert(CE && "non-constant post-idx-imm8 operand!");
2876  int Imm = CE->getValue();
2877  bool isAdd = Imm >= 0;
2878  if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2879  Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2880  Inst.addOperand(MCOperand::createImm(Imm));
2881  }
2882 
2883  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2884  assert(N == 1 && "Invalid number of operands!");
2885  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2886  assert(CE && "non-constant post-idx-imm8s4 operand!");
2887  int Imm = CE->getValue();
2888  bool isAdd = Imm >= 0;
2889  if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2890  // Immediate is scaled by 4.
2891  Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2892  Inst.addOperand(MCOperand::createImm(Imm));
2893  }
2894 
2895  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2896  assert(N == 2 && "Invalid number of operands!");
2897  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2898  Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
2899  }
2900 
2901  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2902  assert(N == 2 && "Invalid number of operands!");
2903  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2904  // The sign, shift type, and shift amount are encoded in a single operand
2905  // using the AM2 encoding helpers.
2906  ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2907  unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2908  PostIdxReg.ShiftTy);
2909  Inst.addOperand(MCOperand::createImm(Imm));
2910  }
2911 
2912  void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
2913  assert(N == 1 && "Invalid number of operands!");
2914  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2916  }
2917 
2918  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2919  assert(N == 1 && "Invalid number of operands!");
2920  Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
2921  }
2922 
2923  void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2924  assert(N == 1 && "Invalid number of operands!");
2925  Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
2926  }
2927 
2928  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2929  assert(N == 1 && "Invalid number of operands!");
2930  Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
2931  }
2932 
2933  void addVecListOperands(MCInst &Inst, unsigned N) const {
2934  assert(N == 1 && "Invalid number of operands!");
2935  Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2936  }
2937 
2938  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2939  assert(N == 2 && "Invalid number of operands!");
2940  Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2941  Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
2942  }
2943 
2944  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2945  assert(N == 1 && "Invalid number of operands!");
2946  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2947  }
2948 
2949  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2950  assert(N == 1 && "Invalid number of operands!");
2951  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2952  }
2953 
2954  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2955  assert(N == 1 && "Invalid number of operands!");
2956  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2957  }
2958 
2959  void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
2960  assert(N == 1 && "Invalid number of operands!");
2961  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2962  }
2963 
2964  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2965  assert(N == 1 && "Invalid number of operands!");
2966  // The immediate encodes the type of constant as well as the value.
2967  // Mask in that this is an i8 splat.
2968  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2969  Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
2970  }
2971 
2972  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2973  assert(N == 1 && "Invalid number of operands!");
2974  // The immediate encodes the type of constant as well as the value.
2975  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2976  unsigned Value = CE->getValue();
2977  Value = ARM_AM::encodeNEONi16splat(Value);
2978  Inst.addOperand(MCOperand::createImm(Value));
2979  }
2980 
2981  void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2982  assert(N == 1 && "Invalid number of operands!");
2983  // The immediate encodes the type of constant as well as the value.
2984  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2985  unsigned Value = CE->getValue();
2986  Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2987  Inst.addOperand(MCOperand::createImm(Value));
2988  }
2989 
2990  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2991  assert(N == 1 && "Invalid number of operands!");
2992  // The immediate encodes the type of constant as well as the value.
2993  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2994  unsigned Value = CE->getValue();
2995  Value = ARM_AM::encodeNEONi32splat(Value);
2996  Inst.addOperand(MCOperand::createImm(Value));
2997  }
2998 
2999  void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3000  assert(N == 1 && "Invalid number of operands!");
3001  // The immediate encodes the type of constant as well as the value.
3002  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3003  unsigned Value = CE->getValue();
3004  Value = ARM_AM::encodeNEONi32splat(~Value);
3005  Inst.addOperand(MCOperand::createImm(Value));
3006  }
3007 
3008  void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3009  // The immediate encodes the type of constant as well as the value.
3010  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3011  assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3012  Inst.getOpcode() == ARM::VMOVv16i8) &&
3013  "All instructions that wants to replicate non-zero byte "
3014  "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3015  unsigned Value = CE->getValue();
3016  if (Inv)
3017  Value = ~Value;
3018  unsigned B = Value & 0xff;
3019  B |= 0xe00; // cmode = 0b1110
3021  }
3022 
3023  void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3024  assert(N == 1 && "Invalid number of operands!");
3025  addNEONi8ReplicateOperands(Inst, true);
3026  }
3027 
3028  static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3029  if (Value >= 256 && Value <= 0xffff)
3030  Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3031  else if (Value > 0xffff && Value <= 0xffffff)
3032  Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3033  else if (Value > 0xffffff)
3034  Value = (Value >> 24) | 0x600;
3035  return Value;
3036  }
3037 
3038  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3039  assert(N == 1 && "Invalid number of operands!");
3040  // The immediate encodes the type of constant as well as the value.
3041  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3042  unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3043  Inst.addOperand(MCOperand::createImm(Value));
3044  }
3045 
3046  void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3047  assert(N == 1 && "Invalid number of operands!");
3048  addNEONi8ReplicateOperands(Inst, false);
3049  }
3050 
3051  void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3052  assert(N == 1 && "Invalid number of operands!");
3053  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3054  assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3055  Inst.getOpcode() == ARM::VMOVv8i16 ||
3056  Inst.getOpcode() == ARM::VMVNv4i16 ||
3057  Inst.getOpcode() == ARM::VMVNv8i16) &&
3058  "All instructions that want to replicate non-zero half-word "
3059  "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3060  uint64_t Value = CE->getValue();
3061  unsigned Elem = Value & 0xffff;
3062  if (Elem >= 256)
3063  Elem = (Elem >> 8) | 0x200;
3064  Inst.addOperand(MCOperand::createImm(Elem));
3065  }
3066 
3067  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3068  assert(N == 1 && "Invalid number of operands!");
3069  // The immediate encodes the type of constant as well as the value.
3070  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3071  unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3072  Inst.addOperand(MCOperand::createImm(Value));
3073  }
3074 
3075  void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3076  assert(N == 1 && "Invalid number of operands!");
3077  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3078  assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3079  Inst.getOpcode() == ARM::VMOVv4i32 ||
3080  Inst.getOpcode() == ARM::VMVNv2i32 ||
3081  Inst.getOpcode() == ARM::VMVNv4i32) &&
3082  "All instructions that want to replicate non-zero word "
3083  "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3084  uint64_t Value = CE->getValue();
3085  unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3086  Inst.addOperand(MCOperand::createImm(Elem));
3087  }
3088 
3089  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3090  assert(N == 1 && "Invalid number of operands!");
3091  // The immediate encodes the type of constant as well as the value.
3092  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3093  uint64_t Value = CE->getValue();
3094  unsigned Imm = 0;
3095  for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3096  Imm |= (Value & 1) << i;
3097  }
3098  Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3099  }
3100 
3101  void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3102  assert(N == 1 && "Invalid number of operands!");
3103  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3104  Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3105  }
3106 
3107  void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3108  assert(N == 1 && "Invalid number of operands!");
3109  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3110  Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3111  }
3112 
3113  void print(raw_ostream &OS) const override;
3114 
3115  static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3116  auto Op = make_unique<ARMOperand>(k_ITCondMask);
3117  Op->ITMask.Mask = Mask;
3118  Op->StartLoc = S;
3119  Op->EndLoc = S;
3120  return Op;
3121  }
3122 
3123  static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3124  SMLoc S) {
3125  auto Op = make_unique<ARMOperand>(k_CondCode);
3126  Op->CC.Val = CC;
3127  Op->StartLoc = S;
3128  Op->EndLoc = S;
3129  return Op;
3130  }
3131 
3132  static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3133  SMLoc S) {
3134  auto Op = make_unique<ARMOperand>(k_VPTPred);
3135  Op->VCC.Val = CC;
3136  Op->StartLoc = S;
3137  Op->EndLoc = S;
3138  return Op;
3139  }
3140 
3141  static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3142  auto Op = make_unique<ARMOperand>(k_CoprocNum);
3143  Op->Cop.Val = CopVal;
3144  Op->StartLoc = S;
3145  Op->EndLoc = S;
3146  return Op;
3147  }
3148 
3149  static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3150  auto Op = make_unique<ARMOperand>(k_CoprocReg);
3151  Op->Cop.Val = CopVal;
3152  Op->StartLoc = S;
3153  Op->EndLoc = S;
3154  return Op;
3155  }
3156 
3157  static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3158  SMLoc E) {
3159  auto Op = make_unique<ARMOperand>(k_CoprocOption);
3160  Op->Cop.Val = Val;
3161  Op->StartLoc = S;
3162  Op->EndLoc = E;
3163  return Op;
3164  }
3165 
3166  static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3167  auto Op = make_unique<ARMOperand>(k_CCOut);
3168  Op->Reg.RegNum = RegNum;
3169  Op->StartLoc = S;
3170  Op->EndLoc = S;
3171  return Op;
3172  }
3173 
3174  static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3175  auto Op = make_unique<ARMOperand>(k_Token);
3176  Op->Tok.Data = Str.data();
3177  Op->Tok.Length = Str.size();
3178  Op->StartLoc = S;
3179  Op->EndLoc = S;
3180  return Op;
3181  }
3182 
3183  static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3184  SMLoc E) {
3185  auto Op = make_unique<ARMOperand>(k_Register);
3186  Op->Reg.RegNum = RegNum;
3187  Op->StartLoc = S;
3188  Op->EndLoc = E;
3189  return Op;
3190  }
3191 
3192  static std::unique_ptr<ARMOperand>
3193  CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3194  unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3195  SMLoc E) {
3196  auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
3197  Op->RegShiftedReg.ShiftTy = ShTy;
3198  Op->RegShiftedReg.SrcReg = SrcReg;
3199  Op->RegShiftedReg.ShiftReg = ShiftReg;
3200  Op->RegShiftedReg.ShiftImm = ShiftImm;
3201  Op->StartLoc = S;
3202  Op->EndLoc = E;
3203  return Op;
3204  }
3205 
3206  static std::unique_ptr<ARMOperand>
3207  CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3208  unsigned ShiftImm, SMLoc S, SMLoc E) {
3209  auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
3210  Op->RegShiftedImm.ShiftTy = ShTy;
3211  Op->RegShiftedImm.SrcReg = SrcReg;
3212  Op->RegShiftedImm.ShiftImm = ShiftImm;
3213  Op->StartLoc = S;
3214  Op->EndLoc = E;
3215  return Op;
3216  }
3217 
3218  static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3219  SMLoc S, SMLoc E) {
3220  auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
3221  Op->ShifterImm.isASR = isASR;
3222  Op->ShifterImm.Imm = Imm;
3223  Op->StartLoc = S;
3224  Op->EndLoc = E;
3225  return Op;
3226  }
3227 
3228  static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3229  SMLoc E) {
3230  auto Op = make_unique<ARMOperand>(k_RotateImmediate);
3231  Op->RotImm.Imm = Imm;
3232  Op->StartLoc = S;
3233  Op->EndLoc = E;
3234  return Op;
3235  }
3236 
3237  static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3238  SMLoc S, SMLoc E) {
3239  auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
3240  Op->ModImm.Bits = Bits;
3241  Op->ModImm.Rot = Rot;
3242  Op->StartLoc = S;
3243  Op->EndLoc = E;
3244  return Op;
3245  }
3246 
3247  static std::unique_ptr<ARMOperand>
3248  CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3249  auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
3250  Op->Imm.Val = Val;
3251  Op->StartLoc = S;
3252  Op->EndLoc = E;
3253  return Op;
3254  }
3255 
3256  static std::unique_ptr<ARMOperand>
3257  CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3258  auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
3259  Op->Bitfield.LSB = LSB;
3260  Op->Bitfield.Width = Width;
3261  Op->StartLoc = S;
3262  Op->EndLoc = E;
3263  return Op;
3264  }
3265 
3266  static std::unique_ptr<ARMOperand>
3267  CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3268  SMLoc StartLoc, SMLoc EndLoc) {
3269  assert(Regs.size() > 0 && "RegList contains no registers?");
3270  KindTy Kind = k_RegisterList;
3271 
3272  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3273  Regs.front().second)) {
3274  if (Regs.back().second == ARM::VPR)
3275  Kind = k_FPDRegisterListWithVPR;
3276  else
3277  Kind = k_DPRRegisterList;
3278  } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3279  Regs.front().second)) {
3280  if (Regs.back().second == ARM::VPR)
3281  Kind = k_FPSRegisterListWithVPR;
3282  else
3283  Kind = k_SPRRegisterList;
3284  }
3285 
3286  // Sort based on the register encoding values.
3287  array_pod_sort(Regs.begin(), Regs.end());
3288 
3289  if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3290  Kind = k_RegisterListWithAPSR;
3291 
3292  auto Op = make_unique<ARMOperand>(Kind);
3293  for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator
3294  I = Regs.begin(), E = Regs.end(); I != E; ++I)
3295  Op->Registers.push_back(I->second);
3296 
3297  Op->StartLoc = StartLoc;
3298  Op->EndLoc = EndLoc;
3299  return Op;
3300  }
3301 
3302  static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3303  unsigned Count,
3304  bool isDoubleSpaced,
3305  SMLoc S, SMLoc E) {
3306  auto Op = make_unique<ARMOperand>(k_VectorList);
3307  Op->VectorList.RegNum = RegNum;
3308  Op->VectorList.Count = Count;
3309  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3310  Op->StartLoc = S;
3311  Op->EndLoc = E;
3312  return Op;
3313  }
3314 
3315  static std::unique_ptr<ARMOperand>
3316  CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3317  SMLoc S, SMLoc E) {
3318  auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
3319  Op->VectorList.RegNum = RegNum;
3320  Op->VectorList.Count = Count;
3321  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3322  Op->StartLoc = S;
3323  Op->EndLoc = E;
3324  return Op;
3325  }
3326 
3327  static std::unique_ptr<ARMOperand>
3328  CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3329  bool isDoubleSpaced, SMLoc S, SMLoc E) {
3330  auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
3331  Op->VectorList.RegNum = RegNum;
3332  Op->VectorList.Count = Count;
3333  Op->VectorList.LaneIndex = Index;
3334  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3335  Op->StartLoc = S;
3336  Op->EndLoc = E;
3337  return Op;
3338  }
3339 
3340  static std::unique_ptr<ARMOperand>
3341  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3342  auto Op = make_unique<ARMOperand>(k_VectorIndex);
3343  Op->VectorIndex.Val = Idx;
3344  Op->StartLoc = S;
3345  Op->EndLoc = E;
3346  return Op;
3347  }
3348 
3349  static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3350  SMLoc E) {
3351  auto Op = make_unique<ARMOperand>(k_Immediate);
3352  Op->Imm.Val = Val;
3353  Op->StartLoc = S;
3354  Op->EndLoc = E;
3355  return Op;
3356  }
3357 
3358  static std::unique_ptr<ARMOperand>
3359  CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
3360  unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
3361  unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
3362  SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3363  auto Op = make_unique<ARMOperand>(k_Memory);
3364  Op->Memory.BaseRegNum = BaseRegNum;
3365  Op->Memory.OffsetImm = OffsetImm;
3366  Op->Memory.OffsetRegNum = OffsetRegNum;
3367  Op->Memory.ShiftType = ShiftType;
3368  Op->Memory.ShiftImm = ShiftImm;
3369  Op->Memory.Alignment = Alignment;
3370  Op->Memory.isNegative = isNegative;
3371  Op->StartLoc = S;
3372  Op->EndLoc = E;
3373  Op->AlignmentLoc = AlignmentLoc;
3374  return Op;
3375  }
3376 
3377  static std::unique_ptr<ARMOperand>
3378  CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3379  unsigned ShiftImm, SMLoc S, SMLoc E) {
3380  auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
3381  Op->PostIdxReg.RegNum = RegNum;
3382  Op->PostIdxReg.isAdd = isAdd;
3383  Op->PostIdxReg.ShiftTy = ShiftTy;
3384  Op->PostIdxReg.ShiftImm = ShiftImm;
3385  Op->StartLoc = S;
3386  Op->EndLoc = E;
3387  return Op;
3388  }
3389 
3390  static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3391  SMLoc S) {
3392  auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
3393  Op->MBOpt.Val = Opt;
3394  Op->StartLoc = S;
3395  Op->EndLoc = S;
3396  return Op;
3397  }
3398 
3399  static std::unique_ptr<ARMOperand>
3400  CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3401  auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3402  Op->ISBOpt.Val = Opt;
3403  Op->StartLoc = S;
3404  Op->EndLoc = S;
3405  return Op;
3406  }
3407 
3408  static std::unique_ptr<ARMOperand>
3409  CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3410  auto Op = make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3411  Op->TSBOpt.Val = Opt;
3412  Op->StartLoc = S;
3413  Op->EndLoc = S;
3414  return Op;
3415  }
3416 
3417  static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3418  SMLoc S) {
3419  auto Op = make_unique<ARMOperand>(k_ProcIFlags);
3420  Op->IFlags.Val = IFlags;
3421  Op->StartLoc = S;
3422  Op->EndLoc = S;
3423  return Op;
3424  }
3425 
3426  static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3427  auto Op = make_unique<ARMOperand>(k_MSRMask);
3428  Op->MMask.Val = MMask;
3429  Op->StartLoc = S;
3430  Op->EndLoc = S;
3431  return Op;
3432  }
3433 
3434  static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3435  auto Op = make_unique<ARMOperand>(k_BankedReg);
3436  Op->BankedReg.Val = Reg;
3437  Op->StartLoc = S;
3438  Op->EndLoc = S;
3439  return Op;
3440  }
3441 };
3442 
3443 } // end anonymous namespace.
3444 
3445 void ARMOperand::print(raw_ostream &OS) const {
3446  auto RegName = [](unsigned Reg) {
3447  if (Reg)
3449  else
3450  return "noreg";
3451  };
3452 
3453  switch (Kind) {
3454  case k_CondCode:
3455  OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3456  break;
3457  case k_VPTPred:
3458  OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3459  break;
3460  case k_CCOut:
3461  OS << "<ccout " << RegName(getReg()) << ">";
3462  break;
3463  case k_ITCondMask: {
3464  static const char *const MaskStr[] = {
3465  "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3466  "(tt)", "(ttet)", "(tte)", "(ttee)",
3467  "(t)", "(tett)", "(tet)", "(tete)",
3468  "(te)", "(teet)", "(tee)", "(teee)",
3469  };
3470  assert((ITMask.Mask & 0xf) == ITMask.Mask);
3471  OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3472  break;
3473  }
3474  case k_CoprocNum:
3475  OS << "<coprocessor number: " << getCoproc() << ">";
3476  break;
3477  case k_CoprocReg:
3478  OS << "<coprocessor register: " << getCoproc() << ">";
3479  break;
3480  case k_CoprocOption:
3481  OS << "<coprocessor option: " << CoprocOption.Val << ">";
3482  break;
3483  case k_MSRMask:
3484  OS << "<mask: " << getMSRMask() << ">";
3485  break;
3486  case k_BankedReg:
3487  OS << "<banked reg: " << getBankedReg() << ">";
3488  break;
3489  case k_Immediate:
3490  OS << *getImm();
3491  break;
3492  case k_MemBarrierOpt:
3493  OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3494  break;
3495  case k_InstSyncBarrierOpt:
3496  OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3497  break;
3498  case k_TraceSyncBarrierOpt:
3499  OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3500  break;
3501  case k_Memory:
3502  OS << "<memory";
3503  if (Memory.BaseRegNum)
3504  OS << " base:" << RegName(Memory.BaseRegNum);
3505  if (Memory.OffsetImm)
3506  OS << " offset-imm:" << *Memory.OffsetImm;
3507  if (Memory.OffsetRegNum)
3508  OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3509  << RegName(Memory.OffsetRegNum);
3510  if (Memory.ShiftType != ARM_AM::no_shift) {
3511  OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3512  OS << " shift-imm:" << Memory.ShiftImm;
3513  }
3514  if (Memory.Alignment)
3515  OS << " alignment:" << Memory.Alignment;
3516  OS << ">";
3517  break;
3518  case k_PostIndexRegister:
3519  OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3520  << RegName(PostIdxReg.RegNum);
3521  if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3522  OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3523  << PostIdxReg.ShiftImm;
3524  OS << ">";
3525  break;
3526  case k_ProcIFlags: {
3527  OS << "<ARM_PROC::";
3528  unsigned IFlags = getProcIFlags();
3529  for (int i=2; i >= 0; --i)
3530  if (IFlags & (1 << i))
3531  OS << ARM_PROC::IFlagsToString(1 << i);
3532  OS << ">";
3533  break;
3534  }
3535  case k_Register:
3536  OS << "<register " << RegName(getReg()) << ">";
3537  break;
3538  case k_ShifterImmediate:
3539  OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3540  << " #" << ShifterImm.Imm << ">";
3541  break;
3542  case k_ShiftedRegister:
3543  OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3544  << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3545  << RegName(RegShiftedReg.ShiftReg) << ">";
3546  break;
3547  case k_ShiftedImmediate:
3548  OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3549  << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3550  << RegShiftedImm.ShiftImm << ">";
3551  break;
3552  case k_RotateImmediate:
3553  OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3554  break;
3555  case k_ModifiedImmediate:
3556  OS << "<mod_imm #" << ModImm.Bits << ", #"
3557  << ModImm.Rot << ")>";
3558  break;
3559  case k_ConstantPoolImmediate:
3560  OS << "<constant_pool_imm #" << *getConstantPoolImm();
3561  break;
3562  case k_BitfieldDescriptor:
3563  OS << "<bitfield " << "lsb: " << Bitfield.LSB
3564  << ", width: " << Bitfield.Width << ">";
3565  break;
3566  case k_RegisterList:
3567  case k_RegisterListWithAPSR:
3568  case k_DPRRegisterList:
3569  case k_SPRRegisterList:
3570  case k_FPSRegisterListWithVPR:
3571  case k_FPDRegisterListWithVPR: {
3572  OS << "<register_list ";
3573 
3574  const SmallVectorImpl<unsigned> &RegList = getRegList();
3576  I = RegList.begin(), E = RegList.end(); I != E; ) {
3577  OS << RegName(*I);
3578  if (++I < E) OS << ", ";
3579  }
3580 
3581  OS << ">";
3582  break;
3583  }
3584  case k_VectorList:
3585  OS << "<vector_list " << VectorList.Count << " * "
3586  << RegName(VectorList.RegNum) << ">";
3587  break;
3588  case k_VectorListAllLanes:
3589  OS << "<vector_list(all lanes) " << VectorList.Count << " * "
3590  << RegName(VectorList.RegNum) << ">";
3591  break;
3592  case k_VectorListIndexed:
3593  OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
3594  << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
3595  break;
3596  case k_Token:
3597  OS << "'" << getToken() << "'";
3598  break;
3599  case k_VectorIndex:
3600  OS << "<vectorindex " << getVectorIndex() << ">";
3601  break;
3602  }
3603 }
3604 
3605 /// @name Auto-generated Match Functions
3606 /// {
3607 
3608 static unsigned MatchRegisterName(StringRef Name);
3609 
3610 /// }
3611 
3612 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
3613  SMLoc &StartLoc, SMLoc &EndLoc) {
3614  const AsmToken &Tok = getParser().getTok();
3615  StartLoc = Tok.getLoc();
3616  EndLoc = Tok.getEndLoc();
3617  RegNo = tryParseRegister();
3618 
3619  return (RegNo == (unsigned)-1);
3620 }
3621 
3622 /// Try to parse a register name. The token must be an Identifier when called,
3623 /// and if it is a register name the token is eaten and the register number is
3624 /// returned. Otherwise return -1.
3625 int ARMAsmParser::tryParseRegister() {
3626  MCAsmParser &Parser = getParser();
3627  const AsmToken &Tok = Parser.getTok();
3628  if (Tok.isNot(AsmToken::Identifier)) return -1;
3629 
3630  std::string lowerCase = Tok.getString().lower();
3631  unsigned RegNum = MatchRegisterName(lowerCase);
3632  if (!RegNum) {
3633  RegNum = StringSwitch<unsigned>(lowerCase)
3634  .Case("r13", ARM::SP)
3635  .Case("r14", ARM::LR)
3636  .Case("r15", ARM::PC)
3637  .Case("ip", ARM::R12)
3638  // Additional register name aliases for 'gas' compatibility.
3639  .Case("a1", ARM::R0)
3640  .Case("a2", ARM::R1)
3641  .Case("a3", ARM::R2)
3642  .Case("a4", ARM::R3)
3643  .Case("v1", ARM::R4)
3644  .Case("v2", ARM::R5)
3645  .Case("v3", ARM::R6)
3646  .Case("v4", ARM::R7)
3647  .Case("v5", ARM::R8)
3648  .Case("v6", ARM::R9)
3649  .Case("v7", ARM::R10)
3650  .Case("v8", ARM::R11)
3651  .Case("sb", ARM::R9)
3652  .Case("sl", ARM::R10)
3653  .Case("fp", ARM::R11)
3654  .Default(0);
3655  }
3656  if (!RegNum) {
3657  // Check for aliases registered via .req. Canonicalize to lower case.
3658  // That's more consistent since register names are case insensitive, and
3659  // it's how the original entry was passed in from MC/MCParser/AsmParser.
3660  StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
3661  // If no match, return failure.
3662  if (Entry == RegisterReqs.end())
3663  return -1;
3664  Parser.Lex(); // Eat identifier token.
3665  return Entry->getValue();
3666  }
3667 
3668  // Some FPUs only have 16 D registers, so D16-D31 are invalid
3669  if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
3670  return -1;
3671 
3672  Parser.Lex(); // Eat identifier token.
3673 
3674  return RegNum;
3675 }
3676 
3677 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
3678 // If a recoverable error occurs, return 1. If an irrecoverable error
3679 // occurs, return -1. An irrecoverable error is one where tokens have been
3680 // consumed in the process of trying to parse the shifter (i.e., when it is
3681 // indeed a shifter operand, but malformed).
3682 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
3683  MCAsmParser &Parser = getParser();
3684  SMLoc S = Parser.getTok().getLoc();
3685  const AsmToken &Tok = Parser.getTok();
3686  if (Tok.isNot(AsmToken::Identifier))
3687  return -1;
3688 
3689  std::string lowerCase = Tok.getString().lower();
3691  .Case("asl", ARM_AM::lsl)
3692  .Case("lsl", ARM_AM::lsl)
3693  .Case("lsr", ARM_AM::lsr)
3694  .Case("asr", ARM_AM::asr)
3695  .Case("ror", ARM_AM::ror)
3696  .Case("rrx", ARM_AM::rrx)
3698 
3699  if (ShiftTy == ARM_AM::no_shift)
3700  return 1;
3701 
3702  Parser.Lex(); // Eat the operator.
3703 
3704  // The source register for the shift has already been added to the
3705  // operand list, so we need to pop it off and combine it into the shifted
3706  // register operand instead.
3707  std::unique_ptr<ARMOperand> PrevOp(
3708  (ARMOperand *)Operands.pop_back_val().release());
3709  if (!PrevOp->isReg())
3710  return Error(PrevOp->getStartLoc(), "shift must be of a register");
3711  int SrcReg = PrevOp->getReg();
3712 
3713  SMLoc EndLoc;
3714  int64_t Imm = 0;
3715  int ShiftReg = 0;
3716  if (ShiftTy == ARM_AM::rrx) {
3717  // RRX Doesn't have an explicit shift amount. The encoder expects
3718  // the shift register to be the same as the source register. Seems odd,
3719  // but OK.
3720  ShiftReg = SrcReg;
3721  } else {
3722  // Figure out if this is shifted by a constant or a register (for non-RRX).
3723  if (Parser.getTok().is(AsmToken::Hash) ||
3724  Parser.getTok().is(AsmToken::Dollar)) {
3725  Parser.Lex(); // Eat hash.
3726  SMLoc ImmLoc = Parser.getTok().getLoc();
3727  const MCExpr *ShiftExpr = nullptr;
3728  if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3729  Error(ImmLoc, "invalid immediate shift value");
3730  return -1;
3731  }
3732  // The expression must be evaluatable as an immediate.
3733  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3734  if (!CE) {
3735  Error(ImmLoc, "invalid immediate shift value");
3736  return -1;
3737  }
3738  // Range check the immediate.
3739  // lsl, ror: 0 <= imm <= 31
3740  // lsr, asr: 0 <= imm <= 32
3741  Imm = CE->getValue();
3742  if (Imm < 0 ||
3743  ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3744  ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3745  Error(ImmLoc, "immediate shift value out of range");
3746  return -1;
3747  }
3748  // shift by zero is a nop. Always send it through as lsl.
3749  // ('as' compatibility)
3750  if (Imm == 0)
3751  ShiftTy = ARM_AM::lsl;
3752  } else if (Parser.getTok().is(AsmToken::Identifier)) {
3753  SMLoc L = Parser.getTok().getLoc();
3754  EndLoc = Parser.getTok().getEndLoc();
3755  ShiftReg = tryParseRegister();
3756  if (ShiftReg == -1) {
3757  Error(L, "expected immediate or register in shift operand");
3758  return -1;
3759  }
3760  } else {
3761  Error(Parser.getTok().getLoc(),
3762  "expected immediate or register in shift operand");
3763  return -1;
3764  }
3765  }
3766 
3767  if (ShiftReg && ShiftTy != ARM_AM::rrx)
3768  Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3769  ShiftReg, Imm,
3770  S, EndLoc));
3771  else
3772  Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3773  S, EndLoc));
3774 
3775  return 0;
3776 }
3777 
3778 /// Try to parse a register name. The token must be an Identifier when called.
3779 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
3780 /// if there is a "writeback". 'true' if it's not a register.
3781 ///
3782 /// TODO this is likely to change to allow different register types and or to
3783 /// parse for a specific register type.
3784 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3785  MCAsmParser &Parser = getParser();
3786  SMLoc RegStartLoc = Parser.getTok().getLoc();
3787  SMLoc RegEndLoc = Parser.getTok().getEndLoc();
3788  int RegNo = tryParseRegister();
3789  if (RegNo == -1)
3790  return true;
3791 
3792  Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
3793 
3794  const AsmToken &ExclaimTok = Parser.getTok();
3795  if (ExclaimTok.is(AsmToken::Exclaim)) {
3796  Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3797  ExclaimTok.getLoc()));
3798  Parser.Lex(); // Eat exclaim token
3799  return false;
3800  }
3801 
3802  // Also check for an index operand. This is only legal for vector registers,
3803  // but that'll get caught OK in operand matching, so we don't need to
3804  // explicitly filter everything else out here.
3805  if (Parser.getTok().is(AsmToken::LBrac)) {
3806  SMLoc SIdx = Parser.getTok().getLoc();
3807  Parser.Lex(); // Eat left bracket token.
3808 
3809  const MCExpr *ImmVal;
3810  if (getParser().parseExpression(ImmVal))
3811  return true;
3812  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3813  if (!MCE)
3814  return TokError("immediate value expected for vector index");
3815 
3816  if (Parser.getTok().isNot(AsmToken::RBrac))
3817  return Error(Parser.getTok().getLoc(), "']' expected");
3818 
3819  SMLoc E = Parser.getTok().getEndLoc();
3820  Parser.Lex(); // Eat right bracket token.
3821 
3822  Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3823  SIdx, E,
3824  getContext()));
3825  }
3826 
3827  return false;
3828 }
3829 
3830 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
3831 /// instruction with a symbolic operand name.
3832 /// We accept "crN" syntax for GAS compatibility.
3833 /// <operand-name> ::= <prefix><number>
3834 /// If CoprocOp is 'c', then:
3835 /// <prefix> ::= c | cr
3836 /// If CoprocOp is 'p', then :
3837 /// <prefix> ::= p
3838 /// <number> ::= integer in range [0, 15]
3839 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3840  // Use the same layout as the tablegen'erated register name matcher. Ugly,
3841  // but efficient.
3842  if (Name.size() < 2 || Name[0] != CoprocOp)
3843  return -1;
3844  Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3845 
3846  switch (Name.size()) {
3847  default: return -1;
3848  case 1:
3849  switch (Name[0]) {
3850  default: return -1;
3851  case '0': return 0;
3852  case '1': return 1;
3853  case '2': return 2;
3854  case '3': return 3;
3855  case '4': return 4;
3856  case '5': return 5;
3857  case '6': return 6;
3858  case '7': return 7;
3859  case '8': return 8;
3860  case '9': return 9;
3861  }
3862  case 2:
3863  if (Name[0] != '1')
3864  return -1;
3865  switch (Name[1]) {
3866  default: return -1;
3867  // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3868  // However, old cores (v5/v6) did use them in that way.
3869  case '0': return 10;
3870  case '1': return 11;
3871  case '2': return 12;
3872  case '3': return 13;
3873  case '4': return 14;
3874  case '5': return 15;
3875  }
3876  }
3877 }
3878 
3879 /// parseITCondCode - Try to parse a condition code for an IT instruction.
3881 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3882  MCAsmParser &Parser = getParser();
3883  SMLoc S = Parser.getTok().getLoc();
3884  const AsmToken &Tok = Parser.getTok();
3885  if (!Tok.is(AsmToken::Identifier))
3886  return MatchOperand_NoMatch;
3887  unsigned CC = ARMCondCodeFromString(Tok.getString());
3888  if (CC == ~0U)
3889  return MatchOperand_NoMatch;
3890  Parser.Lex(); // Eat the token.
3891 
3892  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3893 
3894  return MatchOperand_Success;
3895 }
3896 
3897 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3898 /// token must be an Identifier when called, and if it is a coprocessor
3899 /// number, the token is eaten and the operand is added to the operand list.
3901 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3902  MCAsmParser &Parser = getParser();
3903  SMLoc S = Parser.getTok().getLoc();
3904  const AsmToken &Tok = Parser.getTok();
3905  if (Tok.isNot(AsmToken::Identifier))
3906  return MatchOperand_NoMatch;
3907 
3908  int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
3909  if (Num == -1)
3910  return MatchOperand_NoMatch;
3911  // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3912  if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3913  return MatchOperand_NoMatch;
3914 
3915  Parser.Lex(); // Eat identifier token.
3916  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3917  return MatchOperand_Success;
3918 }
3919 
3920 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3921 /// token must be an Identifier when called, and if it is a coprocessor
3922 /// number, the token is eaten and the operand is added to the operand list.
3924 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3925  MCAsmParser &Parser = getParser();
3926  SMLoc S = Parser.getTok().getLoc();
3927  const AsmToken &Tok = Parser.getTok();
3928  if (Tok.isNot(AsmToken::Identifier))
3929  return MatchOperand_NoMatch;
3930 
3931  int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
3932  if (Reg == -1)
3933  return MatchOperand_NoMatch;
3934 
3935  Parser.Lex(); // Eat identifier token.
3936  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3937  return MatchOperand_Success;
3938 }
3939 
3940 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3941 /// coproc_option : '{' imm0_255 '}'
3943 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3944  MCAsmParser &Parser = getParser();
3945  SMLoc S = Parser.getTok().getLoc();
3946 
3947  // If this isn't a '{', this isn't a coprocessor immediate operand.
3948  if (Parser.getTok().isNot(AsmToken::LCurly))
3949  return MatchOperand_NoMatch;
3950  Parser.Lex(); // Eat the '{'
3951 
3952  const MCExpr *Expr;
3953  SMLoc Loc = Parser.getTok().getLoc();
3954  if (getParser().parseExpression(Expr)) {
3955  Error(Loc, "illegal expression");
3956  return MatchOperand_ParseFail;
3957  }
3958  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3959  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3960  Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3961  return MatchOperand_ParseFail;
3962  }
3963  int Val = CE->getValue();
3964 
3965  // Check for and consume the closing '}'
3966  if (Parser.getTok().isNot(AsmToken::RCurly))
3967  return MatchOperand_ParseFail;
3968  SMLoc E = Parser.getTok().getEndLoc();
3969  Parser.Lex(); // Eat the '}'
3970 
3971  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3972  return MatchOperand_Success;
3973 }
3974 
3975 // For register list parsing, we need to map from raw GPR register numbering
3976 // to the enumeration values. The enumeration values aren't sorted by
3977 // register number due to our using "sp", "lr" and "pc" as canonical names.
3978 static unsigned getNextRegister(unsigned Reg) {
3979  // If this is a GPR, we need to do it manually, otherwise we can rely
3980  // on the sort ordering of the enumeration since the other reg-classes
3981  // are sane.
3982  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3983  return Reg + 1;
3984  switch(Reg) {
3985  default: llvm_unreachable("Invalid GPR number!");
3986  case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
3987  case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
3988  case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
3989  case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
3990  case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
3991  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3992  case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
3993  case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
3994  }
3995 }
3996 
3997 /// Parse a register list.
3998 bool ARMAsmParser::parseRegisterList(OperandVector &Operands,
3999  bool EnforceOrder) {
4000  MCAsmParser &Parser = getParser();
4001  if (Parser.getTok().isNot(AsmToken::LCurly))
4002  return TokError("Token is not a Left Curly Brace");
4003  SMLoc S = Parser.getTok().getLoc();
4004  Parser.Lex(); // Eat '{' token.
4005  SMLoc RegLoc = Parser.getTok().getLoc();
4006 
4007  // Check the first register in the list to see what register class
4008  // this is a list of.
4009  int Reg = tryParseRegister();
4010  if (Reg == -1)
4011  return Error(RegLoc, "register expected");
4012 
4013  // The reglist instructions have at most 16 registers, so reserve
4014  // space for that many.
4015  int EReg = 0;
4017 
4018  // Allow Q regs and just interpret them as the two D sub-registers.
4019  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4020  Reg = getDRegFromQReg(Reg);
4021  EReg = MRI->getEncodingValue(Reg);
4022  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4023  ++Reg;
4024  }
4025  const MCRegisterClass *RC;
4026  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4027  RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4028  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4029  RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4030  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4031  RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4032  else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4033  RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4034  else
4035  return Error(RegLoc, "invalid register in register list");
4036 
4037  // Store the register.
4038  EReg = MRI->getEncodingValue(Reg);
4039  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4040 
4041  // This starts immediately after the first register token in the list,
4042  // so we can see either a comma or a minus (range separator) as a legal
4043  // next token.
4044  while (Parser.getTok().is(AsmToken::Comma) ||
4045  Parser.getTok().is(AsmToken::Minus)) {
4046  if (Parser.getTok().is(AsmToken::Minus)) {
4047  Parser.Lex(); // Eat the minus.
4048  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4049  int EndReg = tryParseRegister();
4050  if (EndReg == -1)
4051  return Error(AfterMinusLoc, "register expected");
4052  // Allow Q regs and just interpret them as the two D sub-registers.
4053  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4054  EndReg = getDRegFromQReg(EndReg) + 1;
4055  // If the register is the same as the start reg, there's nothing
4056  // more to do.
4057  if (Reg == EndReg)
4058  continue;
4059  // The register must be in the same register class as the first.
4060  if (!RC->contains(EndReg))
4061  return Error(AfterMinusLoc, "invalid register in register list");
4062  // Ranges must go from low to high.
4063  if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4064  return Error(AfterMinusLoc, "bad range in register list");
4065 
4066  // Add all the registers in the range to the register list.
4067  while (Reg != EndReg) {
4068  Reg = getNextRegister(Reg);
4069  EReg = MRI->getEncodingValue(Reg);
4070  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4071  }
4072  continue;
4073  }
4074  Parser.Lex(); // Eat the comma.
4075  RegLoc = Parser.getTok().getLoc();
4076  int OldReg = Reg;
4077  const AsmToken RegTok = Parser.getTok();
4078  Reg = tryParseRegister();
4079  if (Reg == -1)
4080  return Error(RegLoc, "register expected");
4081  // Allow Q regs and just interpret them as the two D sub-registers.
4082  bool isQReg = false;
4083  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4084  Reg = getDRegFromQReg(Reg);
4085  isQReg = true;
4086  }
4087  if (!RC->contains(Reg) &&
4088  RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4089  ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4090  // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4091  // subset of GPRRegClassId except it contains APSR as well.
4092  RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4093  }
4094  if (Reg == ARM::VPR && (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4095  RC == &ARMMCRegisterClasses[ARM::DPRRegClassID])) {
4096  RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4097  EReg = MRI->getEncodingValue(Reg);
4098  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4099  continue;
4100  }
4101  // The register must be in the same register class as the first.
4102  if (!RC->contains(Reg))
4103  return Error(RegLoc, "invalid register in register list");
4104  // In most cases, the list must be monotonically increasing. An
4105  // exception is CLRM, which is order-independent anyway, so
4106  // there's no potential for confusion if you write clrm {r2,r1}
4107  // instead of clrm {r1,r2}.
4108  if (EnforceOrder &&
4109  MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4110  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4111  Warning(RegLoc, "register list not in ascending order");
4112  else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4113  return Error(RegLoc, "register list not in ascending order");
4114  }
4115  if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
4116  Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4117  ") in register list");
4118  continue;
4119  }
4120  // VFP register lists must also be contiguous.
4121  if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4122  RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4123  Reg != OldReg + 1)
4124  return Error(RegLoc, "non-contiguous register range");
4125  EReg = MRI->getEncodingValue(Reg);
4126  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4127  if (isQReg) {
4128  EReg = MRI->getEncodingValue(++Reg);
4129  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4130  }
4131  }
4132 
4133  if (Parser.getTok().isNot(AsmToken::RCurly))
4134  return Error(Parser.getTok().getLoc(), "'}' expected");
4135  SMLoc E = Parser.getTok().getEndLoc();
4136  Parser.Lex(); // Eat '}' token.
4137 
4138  // Push the register list operand.
4139  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4140 
4141  // The ARM system instruction variants for LDM/STM have a '^' token here.
4142  if (Parser.getTok().is(AsmToken::Caret)) {
4143  Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4144  Parser.Lex(); // Eat '^' token.
4145  }
4146 
4147  return false;
4148 }
4149 
4150 // Helper function to parse the lane index for vector lists.
4151 OperandMatchResultTy ARMAsmParser::
4152 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
4153  MCAsmParser &Parser = getParser();
4154  Index = 0; // Always return a defined index value.
4155  if (Parser.getTok().is(AsmToken::LBrac)) {
4156  Parser.Lex(); // Eat the '['.
4157  if (Parser.getTok().is(AsmToken::RBrac)) {
4158  // "Dn[]" is the 'all lanes' syntax.
4159  LaneKind = AllLanes;
4160  EndLoc = Parser.getTok().getEndLoc();
4161  Parser.Lex(); // Eat the ']'.
4162  return MatchOperand_Success;
4163  }
4164 
4165  // There's an optional '#' token here. Normally there wouldn't be, but
4166  // inline assemble puts one in, and it's friendly to accept that.
4167  if (Parser.getTok().is(AsmToken::Hash))
4168  Parser.Lex(); // Eat '#' or '$'.
4169 
4170  const MCExpr *LaneIndex;
4171  SMLoc Loc = Parser.getTok().getLoc();
4172  if (getParser().parseExpression(LaneIndex)) {
4173  Error(Loc, "illegal expression");
4174  return MatchOperand_ParseFail;
4175  }
4176  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4177  if (!CE) {
4178  Error(Loc, "lane index must be empty or an integer");
4179  return MatchOperand_ParseFail;
4180  }
4181  if (Parser.getTok().isNot(AsmToken::RBrac)) {
4182  Error(Parser.getTok().getLoc(), "']' expected");
4183  return MatchOperand_ParseFail;
4184  }
4185  EndLoc = Parser.getTok().getEndLoc();
4186  Parser.Lex(); // Eat the ']'.
4187  int64_t Val = CE->getValue();
4188 
4189  // FIXME: Make this range check context sensitive for .8, .16, .32.
4190  if (Val < 0 || Val > 7) {
4191  Error(Parser.getTok().getLoc(), "lane index out of range");
4192  return MatchOperand_ParseFail;
4193  }
4194  Index = Val;
4195  LaneKind = IndexedLane;
4196  return MatchOperand_Success;
4197  }
4198  LaneKind = NoLanes;
4199  return MatchOperand_Success;
4200 }
4201 
4202 // parse a vector register list
4204 ARMAsmParser::parseVectorList(OperandVector &Operands) {
4205  MCAsmParser &Parser = getParser();
4206  VectorLaneTy LaneKind;
4207  unsigned LaneIndex;
4208  SMLoc S = Parser.getTok().getLoc();
4209  // As an extension (to match gas), support a plain D register or Q register
4210  // (without encosing curly braces) as a single or double entry list,
4211  // respectively.
4212  if (Parser.getTok().is(AsmToken::Identifier)) {
4213  SMLoc E = Parser.getTok().getEndLoc();
4214  int Reg = tryParseRegister();
4215  if (Reg == -1)
4216  return MatchOperand_NoMatch;
4217  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4218  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4219  if (Res != MatchOperand_Success)
4220  return Res;
4221  switch (LaneKind) {
4222  case NoLanes:
4223  Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4224  break;
4225  case AllLanes:
4226  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4227  S, E));
4228  break;
4229  case IndexedLane:
4230  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4231  LaneIndex,
4232  false, S, E));
4233  break;
4234  }
4235  return MatchOperand_Success;
4236  }
4237  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4238  Reg = getDRegFromQReg(Reg);
4239  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4240  if (Res != MatchOperand_Success)
4241  return Res;
4242  switch (LaneKind) {
4243  case NoLanes:
4244  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4245  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4246  Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4247  break;
4248  case AllLanes:
4249  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4250  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4251  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4252  S, E));
4253  break;
4254  case IndexedLane:
4255  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4256  LaneIndex,
4257  false, S, E));
4258  break;
4259  }
4260  return MatchOperand_Success;
4261  }
4262  Error(S, "vector register expected");
4263  return MatchOperand_ParseFail;
4264  }
4265 
4266  if (Parser.getTok().isNot(AsmToken::LCurly))
4267  return MatchOperand_NoMatch;
4268 
4269  Parser.Lex(); // Eat '{' token.
4270  SMLoc RegLoc = Parser.getTok().getLoc();
4271 
4272  int Reg = tryParseRegister();
4273  if (Reg == -1) {
4274  Error(RegLoc, "register expected");
4275  return MatchOperand_ParseFail;
4276  }
4277  unsigned Count = 1;
4278  int Spacing = 0;
4279  unsigned FirstReg = Reg;
4280  // The list is of D registers, but we also allow Q regs and just interpret
4281  // them as the two D sub-registers.
4282  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4283  FirstReg = Reg = getDRegFromQReg(Reg);
4284  Spacing = 1; // double-spacing requires explicit D registers, otherwise
4285  // it's ambiguous with four-register single spaced.
4286  ++Reg;
4287  ++Count;
4288  }
4289 
4290  SMLoc E;
4291  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
4292  return MatchOperand_ParseFail;
4293 
4294  while (Parser.getTok().is(AsmToken::Comma) ||
4295  Parser.getTok().is(AsmToken::Minus)) {
4296  if (Parser.getTok().is(AsmToken::Minus)) {
4297  if (!Spacing)
4298  Spacing = 1; // Register range implies a single spaced list.
4299  else if (Spacing == 2) {
4300  Error(Parser.getTok().getLoc(),
4301  "sequential registers in double spaced list");
4302  return MatchOperand_ParseFail;
4303  }
4304  Parser.Lex(); // Eat the minus.
4305  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4306  int EndReg = tryParseRegister();
4307  if (EndReg == -1) {
4308  Error(AfterMinusLoc, "register expected");
4309  return MatchOperand_ParseFail;
4310  }
4311  // Allow Q regs and just interpret them as the two D sub-registers.
4312  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4313  EndReg = getDRegFromQReg(EndReg) + 1;
4314  // If the register is the same as the start reg, there's nothing
4315  // more to do.
4316  if (Reg == EndReg)
4317  continue;
4318  // The register must be in the same register class as the first.
4319  if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
4320  Error(AfterMinusLoc, "invalid register in register list");
4321  return MatchOperand_ParseFail;
4322  }
4323  // Ranges must go from low to high.
4324  if (Reg > EndReg) {
4325  Error(AfterMinusLoc, "bad range in register list");
4326  return MatchOperand_ParseFail;
4327  }
4328  // Parse the lane specifier if present.
4329  VectorLaneTy NextLaneKind;
4330  unsigned NextLaneIndex;
4331  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4333  return MatchOperand_ParseFail;
4334  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4335  Error(AfterMinusLoc, "mismatched lane index in register list");
4336  return MatchOperand_ParseFail;
4337  }
4338 
4339  // Add all the registers in the range to the register list.
4340  Count += EndReg - Reg;
4341  Reg = EndReg;
4342  continue;
4343  }
4344  Parser.Lex(); // Eat the comma.
4345  RegLoc = Parser.getTok().getLoc();
4346  int OldReg = Reg;
4347  Reg = tryParseRegister();
4348  if (Reg == -1) {
4349  Error(RegLoc, "register expected");
4350  return MatchOperand_ParseFail;
4351  }
4352  // vector register lists must be contiguous.
4353  // It's OK to use the enumeration values directly here rather, as the
4354  // VFP register classes have the enum sorted properly.
4355  //
4356  // The list is of D registers, but we also allow Q regs and just interpret
4357  // them as the two D sub-registers.
4358  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4359  if (!Spacing)
4360  Spacing = 1; // Register range implies a single spaced list.
4361  else if (Spacing == 2) {
4362  Error(RegLoc,
4363  "invalid register in double-spaced list (must be 'D' register')");
4364  return MatchOperand_ParseFail;
4365  }
4366  Reg = getDRegFromQReg(Reg);
4367  if (Reg != OldReg + 1) {
4368  Error(RegLoc, "non-contiguous register range");
4369  return MatchOperand_ParseFail;
4370  }
4371  ++Reg;
4372  Count += 2;
4373  // Parse the lane specifier if present.
4374  VectorLaneTy NextLaneKind;
4375  unsigned NextLaneIndex;
4376  SMLoc LaneLoc = Parser.getTok().getLoc();
4377  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4379  return MatchOperand_ParseFail;
4380  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4381  Error(LaneLoc, "mismatched lane index in register list");
4382  return MatchOperand_ParseFail;
4383  }
4384  continue;
4385  }
4386  // Normal D register.
4387  // Figure out the register spacing (single or double) of the list if
4388  // we don't know it already.
4389  if (!Spacing)
4390  Spacing = 1 + (Reg == OldReg + 2);
4391 
4392  // Just check that it's contiguous and keep going.
4393  if (Reg != OldReg + Spacing) {
4394  Error(RegLoc, "non-contiguous register range");
4395  return MatchOperand_ParseFail;
4396  }
4397  ++Count;
4398  // Parse the lane specifier if present.
4399  VectorLaneTy NextLaneKind;
4400  unsigned NextLaneIndex;
4401  SMLoc EndLoc = Parser.getTok().getLoc();
4402  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4403  return MatchOperand_ParseFail;
4404  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4405  Error(EndLoc, "mismatched lane index in register list");
4406  return MatchOperand_ParseFail;
4407  }
4408  }
4409 
4410  if (Parser.getTok().isNot(AsmToken::RCurly)) {
4411  Error(Parser.getTok().getLoc(), "'}' expected");
4412  return MatchOperand_ParseFail;
4413  }
4414  E = Parser.getTok().getEndLoc();
4415  Parser.Lex(); // Eat '}' token.
4416 
4417  switch (LaneKind) {
4418  case NoLanes:
4419  // Two-register operands have been converted to the
4420  // composite register classes.
4421  if (Count == 2) {
4422  const MCRegisterClass *RC = (Spacing == 1) ?
4423  &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4424  &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4425  FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4426  }
4427  Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
4428  (Spacing == 2), S, E));
4429  break;
4430  case AllLanes:
4431  // Two-register operands have been converted to the
4432  // composite register classes.
4433  if (Count == 2) {
4434  const MCRegisterClass *RC = (Spacing == 1) ?
4435  &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4436  &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4437  FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4438  }
4439  Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
4440  (Spacing == 2),
4441  S, E));
4442  break;
4443  case IndexedLane:
4444  Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4445  LaneIndex,
4446  (Spacing == 2),
4447  S, E));
4448  break;
4449  }
4450  return MatchOperand_Success;
4451 }
4452 
4453 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4455 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4456  MCAsmParser &Parser = getParser();
4457  SMLoc S = Parser.getTok().getLoc();
4458  const AsmToken &Tok = Parser.getTok();
4459  unsigned Opt;
4460 
4461  if (Tok.is(AsmToken::Identifier)) {
4462  StringRef OptStr = Tok.getString();
4463 
4464  Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4465  .Case("sy", ARM_MB::SY)
4466  .Case("st", ARM_MB::ST)
4467  .Case("ld", ARM_MB::LD)
4468  .Case("sh", ARM_MB::ISH)
4469  .Case("ish", ARM_MB::ISH)
4470  .Case("shst", ARM_MB::ISHST)
4471  .Case("ishst", ARM_MB::ISHST)
4472  .Case("ishld", ARM_MB::ISHLD)
4473  .Case("nsh", ARM_MB::NSH)
4474  .Case("un", ARM_MB::NSH)
4475  .Case("nshst", ARM_MB::NSHST)
4476  .Case("nshld", ARM_MB::NSHLD)
4477  .Case("unst", ARM_MB::NSHST)
4478  .Case("osh", ARM_MB::OSH)
4479  .Case("oshst", ARM_MB::OSHST)
4480  .Case("oshld", ARM_MB::OSHLD)
4481  .Default(~0U);
4482 
4483  // ishld, oshld, nshld and ld are only available from ARMv8.
4484  if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4485  Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4486  Opt = ~0U;
4487 
4488  if (Opt == ~0U)
4489  return MatchOperand_NoMatch;
4490 
4491  Parser.Lex(); // Eat identifier token.
4492  } else if (Tok.is(AsmToken::Hash) ||
4493  Tok.is(AsmToken::Dollar) ||
4494  Tok.is(AsmToken::Integer)) {
4495  if (Parser.getTok().isNot(AsmToken::Integer))
4496  Parser.Lex(); // Eat '#' or '$'.
4497  SMLoc Loc = Parser.getTok().getLoc();
4498 
4499  const MCExpr *MemBarrierID;
4500  if (getParser().parseExpression(MemBarrierID)) {
4501  Error(Loc, "illegal expression");
4502  return MatchOperand_ParseFail;
4503  }
4504 
4505  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4506  if (!CE) {
4507  Error(Loc, "constant expression expected");
4508  return MatchOperand_ParseFail;
4509  }
4510 
4511  int Val = CE->getValue();
4512  if (Val & ~0xf) {
4513  Error(Loc, "immediate value out of range");
4514  return MatchOperand_ParseFail;
4515  }
4516 
4517  Opt = ARM_MB::RESERVED_0 + Val;
4518  } else
4519  return MatchOperand_ParseFail;
4520 
4521  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4522  return MatchOperand_Success;
4523 }
4524 
4526 ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
4527  MCAsmParser &Parser = getParser();
4528  SMLoc S = Parser.getTok().getLoc();
4529  const AsmToken &Tok = Parser.getTok();
4530 
4531  if (Tok.isNot(AsmToken::Identifier))
4532  return MatchOperand_NoMatch;
4533 
4534  if (!Tok.getString().equals_lower("csync"))
4535  return MatchOperand_NoMatch;
4536 
4537  Parser.Lex(); // Eat identifier token.
4538 
4539  Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
4540  return MatchOperand_Success;
4541 }
4542 
4543 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4545 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4546  MCAsmParser &Parser = getParser();
4547  SMLoc S = Parser.getTok().getLoc();
4548  const AsmToken &Tok = Parser.getTok();
4549  unsigned Opt;
4550 
4551  if (Tok.is(AsmToken::Identifier)) {
4552  StringRef OptStr = Tok.getString();
4553 
4554  if (OptStr.equals_lower("sy"))
4555  Opt = ARM_ISB::SY;
4556  else
4557  return MatchOperand_NoMatch;
4558 
4559  Parser.Lex(); // Eat identifier token.
4560  } else if (Tok.is(AsmToken::Hash) ||
4561  Tok.is(AsmToken::Dollar) ||
4562  Tok.is(AsmToken::Integer)) {
4563  if (Parser.getTok().isNot(AsmToken::Integer))
4564  Parser.Lex(); // Eat '#' or '$'.
4565  SMLoc Loc = Parser.getTok().getLoc();
4566 
4567  const MCExpr *ISBarrierID;
4568  if (getParser().parseExpression(ISBarrierID)) {
4569  Error(Loc, "illegal expression");
4570  return MatchOperand_ParseFail;
4571  }
4572 
4573  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
4574  if (!CE) {
4575  Error(Loc, "constant expression expected");
4576  return MatchOperand_ParseFail;
4577  }
4578 
4579  int Val = CE->getValue();
4580  if (Val & ~0xf) {
4581  Error(Loc, "immediate value out of range");
4582  return MatchOperand_ParseFail;
4583  }
4584 
4585  Opt = ARM_ISB::RESERVED_0 + Val;
4586  } else
4587  return MatchOperand_ParseFail;
4588 
4589  Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
4590  (ARM_ISB::InstSyncBOpt)Opt, S));
4591  return MatchOperand_Success;
4592 }
4593 
4594 
4595 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
4597 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
4598  MCAsmParser &Parser = getParser();
4599  SMLoc S = Parser.getTok().getLoc();
4600  const AsmToken &Tok = Parser.getTok();
4601  if (!Tok.is(AsmToken::Identifier))
4602  return MatchOperand_NoMatch;
4603  StringRef IFlagsStr = Tok.getString();
4604 
4605  // An iflags string of "none" is interpreted to mean that none of the AIF
4606  // bits are set. Not a terribly useful instruction, but a valid encoding.
4607  unsigned IFlags = 0;
4608  if (IFlagsStr != "none") {
4609  for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
4610  unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
4611  .Case("a", ARM_PROC::A)
4612  .Case("i", ARM_PROC::I)
4613  .Case("f", ARM_PROC::F)
4614  .Default(~0U);
4615 
4616  // If some specific iflag is already set, it means that some letter is
4617  // present more than once, this is not acceptable.
4618  if (Flag == ~0U || (IFlags & Flag))
4619  return MatchOperand_NoMatch;
4620 
4621  IFlags |= Flag;
4622  }
4623  }
4624 
4625  Parser.Lex(); // Eat identifier token.
4626  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
4627  return MatchOperand_Success;
4628 }
4629 
4630 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
4632 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
4633  MCAsmParser &Parser = getParser();
4634  SMLoc S = Parser.getTok().getLoc();
4635  const AsmToken &Tok = Parser.getTok();
4636 
4637  if (Tok.is(AsmToken::Integer)) {
4638  int64_t Val = Tok.getIntVal();
4639  if (Val > 255 || Val < 0) {
4640  return MatchOperand_NoMatch;
4641  }
4642  unsigned SYSmvalue = Val & 0xFF;
4643  Parser.Lex();
4644  Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4645  return MatchOperand_Success;
4646  }
4647 
4648  if (!Tok.is(AsmToken::Identifier))
4649  return MatchOperand_NoMatch;
4650  StringRef Mask = Tok.getString();
4651 
4652  if (isMClass()) {
4653  auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
4654  if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
4655  return MatchOperand_NoMatch;
4656 
4657  unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
4658 
4659  Parser.Lex(); // Eat identifier token.
4660  Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4661  return MatchOperand_Success;
4662  }
4663 
4664  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
4665  size_t Start = 0, Next = Mask.find('_');
4666  StringRef Flags = "";
4667  std::string SpecReg = Mask.slice(Start, Next).lower();
4668  if (Next != StringRef::npos)
4669  Flags = Mask.slice(Next+1, Mask.size());
4670 
4671  // FlagsVal contains the complete mask:
4672  // 3-0: Mask
4673  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4674  unsigned FlagsVal = 0;
4675 
4676  if (SpecReg == "apsr") {
4677  FlagsVal = StringSwitch<unsigned>(Flags)
4678  .Case("nzcvq", 0x8) // same as CPSR_f
4679  .Case("g", 0x4) // same as CPSR_s
4680  .Case("nzcvqg", 0xc) // same as CPSR_fs
4681  .Default(~0U);
4682 
4683  if (FlagsVal == ~0U) {
4684  if (!Flags.empty())
4685  return MatchOperand_NoMatch;
4686  else
4687  FlagsVal = 8; // No flag
4688  }
4689  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4690  // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4691  if (Flags == "all" || Flags == "")
4692  Flags = "fc";
4693  for (int i = 0, e = Flags.size(); i != e; ++i) {
4694  unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4695  .Case("c", 1)
4696  .Case("x", 2)
4697  .Case("s", 4)
4698  .Case("f", 8)
4699  .Default(~0U);
4700 
4701  // If some specific flag is already set, it means that some letter is
4702  // present more than once, this is not acceptable.
4703  if (Flag == ~0U || (FlagsVal & Flag))
4704  return MatchOperand_NoMatch;
4705  FlagsVal |= Flag;
4706  }
4707  } else // No match for special register.
4708  return MatchOperand_NoMatch;
4709 
4710  // Special register without flags is NOT equivalent to "fc" flags.
4711  // NOTE: This is a divergence from gas' behavior. Uncommenting the following
4712  // two lines would enable gas compatibility at the expense of breaking
4713  // round-tripping.
4714  //
4715  // if (!FlagsVal)
4716  // FlagsVal = 0x9;
4717 
4718  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4719  if (SpecReg == "spsr")
4720  FlagsVal |= 16;
4721 
4722  Parser.Lex(); // Eat identifier token.
4723  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4724  return MatchOperand_Success;
4725 }
4726 
4727 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4728 /// use in the MRS/MSR instructions added to support virtualization.
4730 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4731  MCAsmParser &Parser = getParser();
4732  SMLoc S = Parser.getTok().getLoc();
4733  const AsmToken &Tok = Parser.getTok();
4734  if (!Tok.is(AsmToken::Identifier))
4735  return MatchOperand_NoMatch;
4736  StringRef RegName = Tok.getString();
4737 
4738  auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
4739  if (!TheReg)
4740  return MatchOperand_NoMatch;
4741  unsigned Encoding = TheReg->Encoding;
4742 
4743  Parser.Lex(); // Eat identifier token.
4744  Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4745  return MatchOperand_Success;
4746 }
4747 
4749 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4750  int High) {
4751  MCAsmParser &Parser = getParser();
4752  const AsmToken &Tok = Parser.getTok();
4753  if (Tok.isNot(AsmToken::Identifier)) {
4754  Error(Parser.getTok().getLoc(), Op + " operand expected.");
4755  return MatchOperand_ParseFail;
4756  }
4757  StringRef ShiftName = Tok.getString();
4758  std::string LowerOp = Op.lower();
4759  std::string UpperOp = Op.upper();
4760  if (ShiftName != LowerOp && ShiftName != UpperOp) {
4761  Error(Parser.getTok().getLoc(), Op + " operand expected.");
4762  return MatchOperand_ParseFail;
4763  }
4764  Parser.Lex(); // Eat shift type token.
4765 
4766  // There must be a '#' and a shift amount.
4767  if (Parser.getTok().isNot(AsmToken::Hash) &&
4768  Parser.getTok().isNot(AsmToken::Dollar)) {
4769  Error(Parser.getTok().getLoc(), "'#' expected");
4770  return MatchOperand_ParseFail;
4771  }
4772  Parser.Lex(); // Eat hash token.
4773 
4774  const MCExpr *ShiftAmount;
4775  SMLoc Loc = Parser.getTok().getLoc();
4776  SMLoc EndLoc;
4777  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4778  Error(Loc, "illegal expression");
4779  return MatchOperand_ParseFail;
4780  }
4781  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4782  if (!CE) {
4783  Error(Loc, "constant expression expected");
4784  return MatchOperand_ParseFail;
4785  }
4786  int Val = CE->getValue();
4787  if (Val < Low || Val > High) {
4788  Error(Loc, "immediate value out of range");
4789  return MatchOperand_ParseFail;
4790  }
4791 
4792  Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4793 
4794  return MatchOperand_Success;
4795 }
4796 
4798 ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4799  MCAsmParser &Parser = getParser();
4800  const AsmToken &Tok = Parser.getTok();
4801  SMLoc S = Tok.getLoc();
4802  if (Tok.isNot(AsmToken::Identifier)) {
4803  Error(S, "'be' or 'le' operand expected");
4804  return MatchOperand_ParseFail;
4805  }
4806  int Val = StringSwitch<int>(Tok.getString().lower())
4807  .Case("be", 1)
4808  .Case("le", 0)
4809  .Default(-1);
4810  Parser.Lex(); // Eat the token.
4811 
4812  if (Val == -1) {
4813  Error(S, "'be' or 'le' operand expected");
4814  return MatchOperand_ParseFail;
4815  }
4816  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
4817  getContext()),
4818  S, Tok.getEndLoc()));
4819  return MatchOperand_Success;
4820 }
4821 
4822 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4823 /// instructions. Legal values are:
4824 /// lsl #n 'n' in [0,31]
4825 /// asr #n 'n' in [1,32]
4826 /// n == 32 encoded as n == 0.
4828 ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4829  MCAsmParser &Parser = getParser();
4830  const AsmToken &Tok = Parser.getTok();
4831  SMLoc S = Tok.getLoc();
4832  if (Tok.isNot(AsmToken::Identifier)) {
4833  Error(S, "shift operator 'asr' or 'lsl' expected");
4834  return MatchOperand_ParseFail;
4835  }
4836  StringRef ShiftName = Tok.getString();
4837  bool isASR;
4838  if (ShiftName == "lsl" || ShiftName == "LSL")
4839  isASR = false;
4840  else if (ShiftName == "asr" || ShiftName == "ASR")
4841  isASR = true;
4842  else {
4843  Error(S, "shift operator 'asr' or 'lsl' expected");
4844  return MatchOperand_ParseFail;
4845  }
4846  Parser.Lex(); // Eat the operator.
4847 
4848  // A '#' and a shift amount.
4849  if (Parser.getTok().isNot(AsmToken::Hash) &&
4850  Parser.getTok().isNot(AsmToken::Dollar)) {
4851  Error(Parser.getTok().getLoc(), "'#' expected");
4852  return MatchOperand_ParseFail;
4853  }
4854  Parser.Lex(); // Eat hash token.
4855  SMLoc ExLoc = Parser.getTok().getLoc();
4856 
4857  const MCExpr *ShiftAmount;
4858  SMLoc EndLoc;
4859  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4860  Error(ExLoc, "malformed shift expression");
4861  return MatchOperand_ParseFail;
4862  }
4863  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4864  if (!CE) {
4865  Error(ExLoc, "shift amount must be an immediate");
4866  return MatchOperand_ParseFail;
4867  }
4868 
4869  int64_t Val = CE->getValue();
4870  if (isASR) {
4871  // Shift amount must be in [1,32]
4872  if (Val < 1 || Val > 32) {
4873  Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4874  return MatchOperand_ParseFail;
4875  }
4876  // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4877  if (isThumb() && Val == 32) {
4878  Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4879  return MatchOperand_ParseFail;
4880  }
4881  if (Val == 32) Val = 0;
4882  } else {
4883  // Shift amount must be in [1,32]
4884  if (Val < 0 || Val > 31) {
4885  Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4886  return MatchOperand_ParseFail;
4887  }
4888  }
4889 
4890  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4891 
4892  return MatchOperand_Success;
4893 }
4894 
4895 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4896 /// of instructions. Legal values are:
4897 /// ror #n 'n' in {0, 8, 16, 24}
4899 ARMAsmParser::parseRotImm(OperandVector &Operands) {
4900  MCAsmParser &Parser = getParser();
4901  const AsmToken &Tok = Parser.getTok();
4902  SMLoc S = Tok.getLoc();
4903  if (Tok.isNot(AsmToken::Identifier))
4904  return MatchOperand_NoMatch;
4905  StringRef ShiftName = Tok.getString();
4906  if (ShiftName != "ror" && ShiftName != "ROR")
4907  return MatchOperand_NoMatch;
4908  Parser.Lex(); // Eat the operator.
4909 
4910  // A '#' and a rotate amount.
4911  if (Parser.getTok().isNot(AsmToken::Hash) &&
4912  Parser.getTok().isNot(AsmToken::Dollar)) {
4913  Error(Parser.getTok().getLoc(), "'#' expected");
4914  return MatchOperand_ParseFail;
4915  }
4916  Parser.Lex(); // Eat hash token.
4917  SMLoc ExLoc = Parser.getTok().getLoc();
4918 
4919  const MCExpr *ShiftAmount;
4920  SMLoc EndLoc;
4921  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4922  Error(ExLoc, "malformed rotate expression");
4923  return MatchOperand_ParseFail;
4924  }
4925  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4926  if (!CE) {
4927  Error(ExLoc, "rotate amount must be an immediate");
4928  return MatchOperand_ParseFail;
4929  }
4930 
4931  int64_t Val = CE->getValue();
4932  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4933  // normally, zero is represented in asm by omitting the rotate operand
4934  // entirely.
4935  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4936  Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4937  return MatchOperand_ParseFail;
4938  }
4939 
4940  Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4941 
4942  return MatchOperand_Success;
4943 }
4944 
4946 ARMAsmParser::parseModImm(OperandVector &Operands) {
4947  MCAsmParser &Parser = getParser();
4948  MCAsmLexer &Lexer = getLexer();
4949  int64_t Imm1, Imm2;
4950 
4951  SMLoc S = Parser.getTok().getLoc();
4952 
4953  // 1) A mod_imm operand can appear in the place of a register name:
4954  // add r0, #mod_imm
4955  // add r0, r0, #mod_imm
4956  // to correctly handle the latter, we bail out as soon as we see an
4957  // identifier.
4958  //
4959  // 2) Similarly, we do not want to parse into complex operands:
4960  // mov r0, #mod_imm
4961  // mov r0, :lower16:(_foo)
4962  if (Parser.getTok().is(AsmToken::Identifier) ||
4963  Parser.getTok().is(AsmToken::Colon))
4964  return MatchOperand_NoMatch;
4965 
4966  // Hash (dollar) is optional as per the ARMARM
4967  if (Parser.getTok().is(AsmToken::Hash) ||
4968  Parser.getTok().is(AsmToken::Dollar)) {
4969  // Avoid parsing into complex operands (#:)
4970  if (Lexer.peekTok().is(AsmToken::Colon))
4971  return MatchOperand_NoMatch;
4972 
4973  // Eat the hash (dollar)
4974  Parser.Lex();
4975  }
4976 
4977  SMLoc Sx1, Ex1;
4978  Sx1 = Parser.getTok().getLoc();
4979  const MCExpr *Imm1Exp;
4980  if (getParser().parseExpression(Imm1Exp, Ex1)) {
4981  Error(Sx1, "malformed expression");
4982  return MatchOperand_ParseFail;
4983  }
4984 
4985  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4986 
4987  if (CE) {
4988  // Immediate must fit within 32-bits
4989  Imm1 = CE->getValue();
4990  int Enc = ARM_AM::getSOImmVal(Imm1);
4991  if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4992  // We have a match!
4993  Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4994  (Enc & 0xF00) >> 7,
4995  Sx1, Ex1));
4996  return MatchOperand_Success;
4997  }
4998 
4999  // We have parsed an immediate which is not for us, fallback to a plain
5000  // immediate. This can happen for instruction aliases. For an example,
5001  // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5002  // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5003  // instruction with a mod_imm operand. The alias is defined such that the
5004  // parser method is shared, that's why we have to do this here.
5005  if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5006  Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5007  return MatchOperand_Success;
5008  }
5009  } else {
5010  // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5011  // MCFixup). Fallback to a plain immediate.
5012  Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5013  return MatchOperand_Success;
5014  }
5015 
5016  // From this point onward, we expect the input to be a (#bits, #rot) pair
5017  if (Parser.getTok().isNot(AsmToken::Comma)) {
5018  Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
5019  return MatchOperand_ParseFail;
5020  }
5021 
5022  if (Imm1 & ~0xFF) {
5023  Error(Sx1, "immediate operand must a number in the range [0, 255]");
5024  return MatchOperand_ParseFail;
5025  }
5026 
5027  // Eat the comma
5028  Parser.Lex();
5029 
5030  // Repeat for #rot
5031  SMLoc Sx2, Ex2;
5032  Sx2 = Parser.getTok().getLoc();
5033 
5034  // Eat the optional hash (dollar)
5035  if (Parser.getTok().is(AsmToken::Hash) ||
5036  Parser.getTok().is(AsmToken::Dollar))
5037  Parser.Lex();
5038 
5039  const MCExpr *Imm2Exp;
5040  if (getParser().parseExpression(Imm2Exp, Ex2)) {
5041  Error(Sx2, "malformed expression");
5042  return MatchOperand_ParseFail;
5043  }
5044 
5045  CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5046 
5047  if (CE) {
5048  Imm2 = CE->getValue();
5049  if (!(Imm2 & ~0x1E)) {
5050  // We have a match!
5051  Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5052  return MatchOperand_Success;
5053  }
5054  Error(Sx2, "immediate operand must an even number in the range [0, 30]");
5055  return MatchOperand_ParseFail;
5056  } else {
5057  Error(Sx2, "constant expression expected");
5058  return MatchOperand_ParseFail;
5059  }
5060 }
5061 
5063 ARMAsmParser::parseBitfield(OperandVector &Operands) {
5064  MCAsmParser &Parser = getParser();
5065  SMLoc S = Parser.getTok().getLoc();
5066  // The bitfield descriptor is really two operands, the LSB and the width.
5067  if (Parser.getTok().isNot(AsmToken::Hash) &&
5068  Parser.getTok().isNot(AsmToken::Dollar)) {
5069  Error(Parser.getTok().getLoc(), "'#' expected");
5070  return MatchOperand_ParseFail;
5071  }
5072  Parser.Lex(); // Eat hash token.
5073 
5074  const MCExpr *LSBExpr;
5075  SMLoc E = Parser.getTok().getLoc();
5076  if (getParser().parseExpression(LSBExpr)) {
5077  Error(E, "malformed immediate expression");
5078  return MatchOperand_ParseFail;
5079  }
5080  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5081  if (!CE) {
5082  Error(E, "'lsb' operand must be an immediate");
5083  return MatchOperand_ParseFail;
5084  }
5085 
5086  int64_t LSB = CE->getValue();
5087  // The LSB must be in the range [0,31]
5088  if (LSB < 0 || LSB > 31) {
5089  Error(E, "'lsb' operand must be in the range [0,31]");
5090  return MatchOperand_ParseFail;
5091  }
5092  E = Parser.getTok().getLoc();
5093 
5094  // Expect another immediate operand.
5095  if (Parser.getTok().isNot(AsmToken::Comma)) {
5096  Error(Parser.getTok().getLoc(), "too few operands");
5097  return MatchOperand_ParseFail;
5098  }
5099  Parser.Lex(); // Eat hash token.
5100  if (Parser.getTok().isNot(AsmToken::Hash) &&
5101  Parser.getTok().isNot(AsmToken::Dollar)) {
5102  Error(Parser.getTok().getLoc(), "'#' expected");
5103  return MatchOperand_ParseFail;
5104  }
5105  Parser.Lex(); // Eat hash token.
5106 
5107  const MCExpr *WidthExpr;
5108  SMLoc EndLoc;
5109  if (getParser().parseExpression(WidthExpr, EndLoc)) {
5110  Error(E, "malformed immediate expression");
5111  return MatchOperand_ParseFail;
5112  }
5113  CE = dyn_cast<MCConstantExpr>(WidthExpr);
5114  if (!CE) {
5115  Error(E, "'width' operand must be an immediate");
5116  return MatchOperand_ParseFail;
5117  }
5118 
5119  int64_t Width = CE->getValue();
5120  // The LSB must be in the range [1,32-lsb]
5121  if (Width < 1 || Width > 32 - LSB) {
5122  Error(E, "'width' operand must be in the range [1,32-lsb]");
5123  return MatchOperand_ParseFail;
5124  }
5125 
5126  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5127 
5128  return MatchOperand_Success;
5129 }
5130 
5132 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5133  // Check for a post-index addressing register operand. Specifically:
5134  // postidx_reg := '+' register {, shift}
5135  // | '-' register {, shift}
5136  // | register {, shift}
5137 
5138  // This method must return MatchOperand_NoMatch without consuming any tokens
5139  // in the case where there is no match, as other alternatives take other
5140  // parse methods.
5141  MCAsmParser &Parser = getParser();
5142  AsmToken Tok = Parser.getTok();
5143  SMLoc S = Tok.getLoc();
5144  bool haveEaten = false;
5145  bool isAdd = true;
5146  if (Tok.is(AsmToken::Plus)) {
5147  Parser.Lex(); // Eat the '+' token.
5148  haveEaten = true;
5149  } else if (Tok.is(AsmToken::Minus)) {
5150  Parser.Lex(); // Eat the '-' token.
5151  isAdd = false;
5152  haveEaten = true;
5153  }
5154 
5155  SMLoc E = Parser.getTok().getEndLoc();
5156  int Reg = tryParseRegister();
5157  if (Reg == -1) {
5158  if (!haveEaten)
5159  return MatchOperand_NoMatch;
5160  Error(Parser.getTok().getLoc(), "register expected");
5161  return MatchOperand_ParseFail;
5162  }
5163 
5165  unsigned ShiftImm = 0;
5166  if (Parser.getTok().is(AsmToken::Comma)) {
5167  Parser.Lex(); // Eat the ','.
5168  if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5169  return MatchOperand_ParseFail;
5170 
5171  // FIXME: Only approximates end...may include intervening whitespace.
5172  E = Parser.getTok().getLoc();
5173  }
5174 
5175  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
5176  ShiftImm, S, E));
5177 
5178  return MatchOperand_Success;
5179 }
5180 
5182 ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5183  // Check for a post-index addressing register operand. Specifically:
5184  // am3offset := '+' register
5185  // | '-' register
5186  // | register
5187  // | # imm
5188  // | # + imm
5189  // | # - imm
5190 
5191  // This method must return MatchOperand_NoMatch without consuming any tokens
5192  // in the case where there is no match, as other alternatives take other
5193  // parse methods.
5194  MCAsmParser &Parser = getParser();
5195  AsmToken Tok = Parser.getTok();
5196  SMLoc S = Tok.getLoc();
5197 
5198  // Do immediates first, as we always parse those if we have a '#'.
5199  if (Parser.getTok().is(AsmToken::Hash) ||
5200  Parser.getTok().is(AsmToken::Dollar)) {
5201  Parser.Lex(); // Eat '#' or '$'.
5202  // Explicitly look for a '-', as we need to encode negative zero
5203  // differently.
5204  bool isNegative = Parser.getTok().is(AsmToken::Minus);
5205  const MCExpr *Offset;
5206  SMLoc E;
5207  if (getParser().parseExpression(Offset, E))
5208  return MatchOperand_ParseFail;
5210  if (!CE) {
5211  Error(S, "constant expression expected");
5212  return MatchOperand_ParseFail;
5213  }
5214  // Negative zero is encoded as the flag value
5215  // std::numeric_limits<int32_t>::min().
5216  int32_t Val = CE->getValue();
5217  if (isNegative && Val == 0)
5218  Val = std::numeric_limits<int32_t>::min();
5219 
5220  Operands.push_back(
5221  ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
5222 
5223  return MatchOperand_Success;
5224  }
5225 
5226  bool haveEaten = false;
5227  bool isAdd = true;
5228  if (Tok.is(AsmToken::Plus)) {
5229  Parser.Lex(); // Eat the '+' token.
5230  haveEaten = true;
5231  } else if (Tok.is(AsmToken::Minus)) {
5232  Parser.Lex(); // Eat the '-' token.
5233  isAdd = false;
5234  haveEaten = true;
5235  }
5236 
5237  Tok = Parser.getTok();
5238  int Reg = tryParseRegister();
5239  if (Reg == -1) {
5240  if (!haveEaten)
5241  return MatchOperand_NoMatch;
5242  Error(Tok.getLoc(), "register expected");
5243  return MatchOperand_ParseFail;
5244  }
5245 
5246  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
5247  0, S, Tok.getEndLoc()));
5248 
5249  return MatchOperand_Success;
5250 }
5251 
5252 /// Convert parsed operands to MCInst. Needed here because this instruction
5253 /// only has two register operands, but multiplication is commutative so
5254 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5255 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5256  const OperandVector &Operands) {
5257  ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
5258  ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
5259  // If we have a three-operand form, make sure to set Rn to be the operand
5260  // that isn't the same as Rd.
5261  unsigned RegOp = 4;
5262  if (Operands.size() == 6 &&
5263  ((ARMOperand &)*Operands[4]).getReg() ==
5264  ((ARMOperand &)*Operands[3]).getReg())
5265  RegOp = 5;
5266  ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
5267  Inst.addOperand(Inst.getOperand(0));
5268  ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
5269 }
5270 
5271 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5272  const OperandVector &Operands) {
5273  int CondOp = -1, ImmOp = -1;
5274  switch(Inst.getOpcode()) {
5275  case ARM::tB:
5276  case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
5277 
5278  case ARM::t2B:
5279  case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
5280 
5281  default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
5282  }
5283  // first decide whether or not the branch should be conditional
5284  // by looking at it's location relative to an IT block
5285  if(inITBlock()) {
5286  // inside an IT block we cannot have any conditional branches. any
5287  // such instructions needs to be converted to unconditional form
5288  switch(Inst.getOpcode()) {
5289  case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5290  case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5291  }
5292  } else {
5293  // outside IT blocks we can only have unconditional branches with AL
5294  // condition code or conditional branches with non-AL condition code
5295  unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
5296  switch(Inst.getOpcode()) {
5297  case ARM::tB:
5298  case ARM::tBcc:
5299  Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5300  break;
5301  case ARM::t2B:
5302  case ARM::t2Bcc:
5303  Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5304  break;
5305  }
5306  }
5307 
5308  // now decide on encoding size based on branch target range
5309  switch(Inst.getOpcode()) {
5310  // classify tB as either t2B or t1B based on range of immediate operand
5311  case ARM::tB: {
5312  ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5313  if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5314  Inst.setOpcode(ARM::t2B);
5315  break;
5316  }
5317  // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5318  case ARM::tBcc: {
5319  ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5320  if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5321  Inst.setOpcode(ARM::t2Bcc);
5322  break;
5323  }
5324  }
5325  ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
5326  ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
5327 }
5328 
5329 /// Parse an ARM memory expression, return false if successful else return true
5330 /// or an error. The first token must be a '[' when called.
5331 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5332  MCAsmParser &Parser = getParser();
5333  SMLoc S, E;
5334  if (Parser.getTok().