LLVM  6.0.0svn
ARMAsmParser.cpp
Go to the documentation of this file.
1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "ARMFeatures.h"
11 #include "Utils/ARMBaseInfo.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringMap.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/StringSwitch.h"
24 #include "llvm/ADT/Triple.h"
25 #include "llvm/ADT/Twine.h"
26 #include "llvm/MC/MCContext.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCInst.h"
29 #include "llvm/MC/MCInstrDesc.h"
30 #include "llvm/MC/MCInstrInfo.h"
38 #include "llvm/MC/MCRegisterInfo.h"
39 #include "llvm/MC/MCSection.h"
40 #include "llvm/MC/MCStreamer.h"
42 #include "llvm/MC/MCSymbol.h"
45 #include "llvm/Support/ARMEHABI.h"
46 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/SMLoc.h"
55 #include <algorithm>
56 #include <cassert>
57 #include <cstddef>
58 #include <cstdint>
59 #include <iterator>
60 #include <limits>
61 #include <memory>
62 #include <string>
63 #include <utility>
64 #include <vector>
65 
66 using namespace llvm;
67 
68 namespace {
69 
70 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
71 
72 static cl::opt<ImplicitItModeTy> ImplicitItMode(
73  "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
74  cl::desc("Allow conditional instructions outdside of an IT block"),
76  "Accept in both ISAs, emit implicit ITs in Thumb"),
78  "Warn in ARM, reject in Thumb"),
79  clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
80  "Accept in ARM, reject in Thumb"),
81  clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
82  "Warn in ARM, emit implicit ITs in Thumb")));
83 
84 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
85  cl::init(false));
86 
87 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
88 
89 class UnwindContext {
90  using Locs = SmallVector<SMLoc, 4>;
91 
92  MCAsmParser &Parser;
93  Locs FnStartLocs;
94  Locs CantUnwindLocs;
95  Locs PersonalityLocs;
96  Locs PersonalityIndexLocs;
97  Locs HandlerDataLocs;
98  int FPReg;
99 
100 public:
101  UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
102 
103  bool hasFnStart() const { return !FnStartLocs.empty(); }
104  bool cantUnwind() const { return !CantUnwindLocs.empty(); }
105  bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
106 
107  bool hasPersonality() const {
108  return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
109  }
110 
111  void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
112  void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
113  void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
114  void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
115  void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
116 
117  void saveFPReg(int Reg) { FPReg = Reg; }
118  int getFPReg() const { return FPReg; }
119 
120  void emitFnStartLocNotes() const {
121  for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
122  FI != FE; ++FI)
123  Parser.Note(*FI, ".fnstart was specified here");
124  }
125 
126  void emitCantUnwindLocNotes() const {
127  for (Locs::const_iterator UI = CantUnwindLocs.begin(),
128  UE = CantUnwindLocs.end(); UI != UE; ++UI)
129  Parser.Note(*UI, ".cantunwind was specified here");
130  }
131 
132  void emitHandlerDataLocNotes() const {
133  for (Locs::const_iterator HI = HandlerDataLocs.begin(),
134  HE = HandlerDataLocs.end(); HI != HE; ++HI)
135  Parser.Note(*HI, ".handlerdata was specified here");
136  }
137 
138  void emitPersonalityLocNotes() const {
139  for (Locs::const_iterator PI = PersonalityLocs.begin(),
140  PE = PersonalityLocs.end(),
141  PII = PersonalityIndexLocs.begin(),
142  PIE = PersonalityIndexLocs.end();
143  PI != PE || PII != PIE;) {
144  if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
145  Parser.Note(*PI++, ".personality was specified here");
146  else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
147  Parser.Note(*PII++, ".personalityindex was specified here");
148  else
149  llvm_unreachable(".personality and .personalityindex cannot be "
150  "at the same location");
151  }
152  }
153 
154  void reset() {
155  FnStartLocs = Locs();
156  CantUnwindLocs = Locs();
157  PersonalityLocs = Locs();
158  HandlerDataLocs = Locs();
159  PersonalityIndexLocs = Locs();
160  FPReg = ARM::SP;
161  }
162 };
163 
164 class ARMAsmParser : public MCTargetAsmParser {
165  const MCInstrInfo &MII;
166  const MCRegisterInfo *MRI;
167  UnwindContext UC;
168 
169  ARMTargetStreamer &getTargetStreamer() {
170  assert(getParser().getStreamer().getTargetStreamer() &&
171  "do not have a target streamer");
172  MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
173  return static_cast<ARMTargetStreamer &>(TS);
174  }
175 
176  // Map of register aliases registers via the .req directive.
177  StringMap<unsigned> RegisterReqs;
178 
179  bool NextSymbolIsThumb;
180 
181  bool useImplicitITThumb() const {
182  return ImplicitItMode == ImplicitItModeTy::Always ||
183  ImplicitItMode == ImplicitItModeTy::ThumbOnly;
184  }
185 
186  bool useImplicitITARM() const {
187  return ImplicitItMode == ImplicitItModeTy::Always ||
188  ImplicitItMode == ImplicitItModeTy::ARMOnly;
189  }
190 
191  struct {
192  ARMCC::CondCodes Cond; // Condition for IT block.
193  unsigned Mask:4; // Condition mask for instructions.
194  // Starting at first 1 (from lsb).
195  // '1' condition as indicated in IT.
196  // '0' inverse of condition (else).
197  // Count of instructions in IT block is
198  // 4 - trailingzeroes(mask)
199  // Note that this does not have the same encoding
200  // as in the IT instruction, which also depends
201  // on the low bit of the condition code.
202 
203  unsigned CurPosition; // Current position in parsing of IT
204  // block. In range [0,4], with 0 being the IT
205  // instruction itself. Initialized according to
206  // count of instructions in block. ~0U if no
207  // active IT block.
208 
209  bool IsExplicit; // true - The IT instruction was present in the
210  // input, we should not modify it.
211  // false - The IT instruction was added
212  // implicitly, we can extend it if that
213  // would be legal.
214  } ITState;
215 
216  SmallVector<MCInst, 4> PendingConditionalInsts;
217 
218  void flushPendingInstructions(MCStreamer &Out) override {
219  if (!inImplicitITBlock()) {
220  assert(PendingConditionalInsts.size() == 0);
221  return;
222  }
223 
224  // Emit the IT instruction
225  unsigned Mask = getITMaskEncoding();
226  MCInst ITInst;
227  ITInst.setOpcode(ARM::t2IT);
228  ITInst.addOperand(MCOperand::createImm(ITState.Cond));
229  ITInst.addOperand(MCOperand::createImm(Mask));
230  Out.EmitInstruction(ITInst, getSTI());
231 
232  // Emit the conditonal instructions
233  assert(PendingConditionalInsts.size() <= 4);
234  for (const MCInst &Inst : PendingConditionalInsts) {
235  Out.EmitInstruction(Inst, getSTI());
236  }
237  PendingConditionalInsts.clear();
238 
239  // Clear the IT state
240  ITState.Mask = 0;
241  ITState.CurPosition = ~0U;
242  }
243 
244  bool inITBlock() { return ITState.CurPosition != ~0U; }
245  bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
246  bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
247 
248  bool lastInITBlock() {
249  return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
250  }
251 
252  void forwardITPosition() {
253  if (!inITBlock()) return;
254  // Move to the next instruction in the IT block, if there is one. If not,
255  // mark the block as done, except for implicit IT blocks, which we leave
256  // open until we find an instruction that can't be added to it.
257  unsigned TZ = countTrailingZeros(ITState.Mask);
258  if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
259  ITState.CurPosition = ~0U; // Done with the IT block after this.
260  }
261 
262  // Rewind the state of the current IT block, removing the last slot from it.
263  void rewindImplicitITPosition() {
264  assert(inImplicitITBlock());
265  assert(ITState.CurPosition > 1);
266  ITState.CurPosition--;
267  unsigned TZ = countTrailingZeros(ITState.Mask);
268  unsigned NewMask = 0;
269  NewMask |= ITState.Mask & (0xC << TZ);
270  NewMask |= 0x2 << TZ;
271  ITState.Mask = NewMask;
272  }
273 
274  // Rewind the state of the current IT block, removing the last slot from it.
275  // If we were at the first slot, this closes the IT block.
276  void discardImplicitITBlock() {
277  assert(inImplicitITBlock());
278  assert(ITState.CurPosition == 1);
279  ITState.CurPosition = ~0U;
280  }
281 
282  // Return the low-subreg of a given Q register.
283  unsigned getDRegFromQReg(unsigned QReg) const {
284  return MRI->getSubReg(QReg, ARM::dsub_0);
285  }
286 
287  // Get the encoding of the IT mask, as it will appear in an IT instruction.
288  unsigned getITMaskEncoding() {
289  assert(inITBlock());
290  unsigned Mask = ITState.Mask;
291  unsigned TZ = countTrailingZeros(Mask);
292  if ((ITState.Cond & 1) == 0) {
293  assert(Mask && TZ <= 3 && "illegal IT mask value!");
294  Mask ^= (0xE << TZ) & 0xF;
295  }
296  return Mask;
297  }
298 
299  // Get the condition code corresponding to the current IT block slot.
300  ARMCC::CondCodes currentITCond() {
301  unsigned MaskBit;
302  if (ITState.CurPosition == 1)
303  MaskBit = 1;
304  else
305  MaskBit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
306 
307  return MaskBit ? ITState.Cond : ARMCC::getOppositeCondition(ITState.Cond);
308  }
309 
310  // Invert the condition of the current IT block slot without changing any
311  // other slots in the same block.
312  void invertCurrentITCondition() {
313  if (ITState.CurPosition == 1) {
314  ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
315  } else {
316  ITState.Mask ^= 1 << (5 - ITState.CurPosition);
317  }
318  }
319 
320  // Returns true if the current IT block is full (all 4 slots used).
321  bool isITBlockFull() {
322  return inITBlock() && (ITState.Mask & 1);
323  }
324 
325  // Extend the current implicit IT block to have one more slot with the given
326  // condition code.
327  void extendImplicitITBlock(ARMCC::CondCodes Cond) {
328  assert(inImplicitITBlock());
329  assert(!isITBlockFull());
330  assert(Cond == ITState.Cond ||
331  Cond == ARMCC::getOppositeCondition(ITState.Cond));
332  unsigned TZ = countTrailingZeros(ITState.Mask);
333  unsigned NewMask = 0;
334  // Keep any existing condition bits.
335  NewMask |= ITState.Mask & (0xE << TZ);
336  // Insert the new condition bit.
337  NewMask |= (Cond == ITState.Cond) << TZ;
338  // Move the trailing 1 down one bit.
339  NewMask |= 1 << (TZ - 1);
340  ITState.Mask = NewMask;
341  }
342 
343  // Create a new implicit IT block with a dummy condition code.
344  void startImplicitITBlock() {
345  assert(!inITBlock());
346  ITState.Cond = ARMCC::AL;
347  ITState.Mask = 8;
348  ITState.CurPosition = 1;
349  ITState.IsExplicit = false;
350  }
351 
352  // Create a new explicit IT block with the given condition and mask. The mask
353  // should be in the parsed format, with a 1 implying 't', regardless of the
354  // low bit of the condition.
355  void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
356  assert(!inITBlock());
357  ITState.Cond = Cond;
358  ITState.Mask = Mask;
359  ITState.CurPosition = 0;
360  ITState.IsExplicit = true;
361  }
362 
363  void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
364  return getParser().Note(L, Msg, Range);
365  }
366 
367  bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
368  return getParser().Warning(L, Msg, Range);
369  }
370 
371  bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
372  return getParser().Error(L, Msg, Range);
373  }
374 
375  bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
376  unsigned ListNo, bool IsARPop = false);
377  bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
378  unsigned ListNo);
379 
380  int tryParseRegister();
381  bool tryParseRegisterWithWriteBack(OperandVector &);
382  int tryParseShiftRegister(OperandVector &);
383  bool parseRegisterList(OperandVector &);
384  bool parseMemory(OperandVector &);
385  bool parseOperand(OperandVector &, StringRef Mnemonic);
386  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
387  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
388  unsigned &ShiftAmount);
389  bool parseLiteralValues(unsigned Size, SMLoc L);
390  bool parseDirectiveThumb(SMLoc L);
391  bool parseDirectiveARM(SMLoc L);
392  bool parseDirectiveThumbFunc(SMLoc L);
393  bool parseDirectiveCode(SMLoc L);
394  bool parseDirectiveSyntax(SMLoc L);
395  bool parseDirectiveReq(StringRef Name, SMLoc L);
396  bool parseDirectiveUnreq(SMLoc L);
397  bool parseDirectiveArch(SMLoc L);
398  bool parseDirectiveEabiAttr(SMLoc L);
399  bool parseDirectiveCPU(SMLoc L);
400  bool parseDirectiveFPU(SMLoc L);
401  bool parseDirectiveFnStart(SMLoc L);
402  bool parseDirectiveFnEnd(SMLoc L);
403  bool parseDirectiveCantUnwind(SMLoc L);
404  bool parseDirectivePersonality(SMLoc L);
405  bool parseDirectiveHandlerData(SMLoc L);
406  bool parseDirectiveSetFP(SMLoc L);
407  bool parseDirectivePad(SMLoc L);
408  bool parseDirectiveRegSave(SMLoc L, bool IsVector);
409  bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
410  bool parseDirectiveLtorg(SMLoc L);
411  bool parseDirectiveEven(SMLoc L);
412  bool parseDirectivePersonalityIndex(SMLoc L);
413  bool parseDirectiveUnwindRaw(SMLoc L);
414  bool parseDirectiveTLSDescSeq(SMLoc L);
415  bool parseDirectiveMovSP(SMLoc L);
416  bool parseDirectiveObjectArch(SMLoc L);
417  bool parseDirectiveArchExtension(SMLoc L);
418  bool parseDirectiveAlign(SMLoc L);
419  bool parseDirectiveThumbSet(SMLoc L);
420 
421  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
422  bool &CarrySetting, unsigned &ProcessorIMod,
423  StringRef &ITMask);
424  void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
425  bool &CanAcceptCarrySet,
426  bool &CanAcceptPredicationCode);
427 
428  void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
429  OperandVector &Operands);
430  bool isThumb() const {
431  // FIXME: Can tablegen auto-generate this?
432  return getSTI().getFeatureBits()[ARM::ModeThumb];
433  }
434 
435  bool isThumbOne() const {
436  return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
437  }
438 
439  bool isThumbTwo() const {
440  return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
441  }
442 
443  bool hasThumb() const {
444  return getSTI().getFeatureBits()[ARM::HasV4TOps];
445  }
446 
447  bool hasThumb2() const {
448  return getSTI().getFeatureBits()[ARM::FeatureThumb2];
449  }
450 
451  bool hasV6Ops() const {
452  return getSTI().getFeatureBits()[ARM::HasV6Ops];
453  }
454 
455  bool hasV6T2Ops() const {
456  return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
457  }
458 
459  bool hasV6MOps() const {
460  return getSTI().getFeatureBits()[ARM::HasV6MOps];
461  }
462 
463  bool hasV7Ops() const {
464  return getSTI().getFeatureBits()[ARM::HasV7Ops];
465  }
466 
467  bool hasV8Ops() const {
468  return getSTI().getFeatureBits()[ARM::HasV8Ops];
469  }
470 
471  bool hasV8MBaseline() const {
472  return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
473  }
474 
475  bool hasV8MMainline() const {
476  return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
477  }
478 
479  bool has8MSecExt() const {
480  return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
481  }
482 
483  bool hasARM() const {
484  return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
485  }
486 
487  bool hasDSP() const {
488  return getSTI().getFeatureBits()[ARM::FeatureDSP];
489  }
490 
491  bool hasD16() const {
492  return getSTI().getFeatureBits()[ARM::FeatureD16];
493  }
494 
495  bool hasV8_1aOps() const {
496  return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
497  }
498 
499  bool hasRAS() const {
500  return getSTI().getFeatureBits()[ARM::FeatureRAS];
501  }
502 
503  void SwitchMode() {
504  MCSubtargetInfo &STI = copySTI();
505  uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
506  setAvailableFeatures(FB);
507  }
508 
509  void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
510 
511  bool isMClass() const {
512  return getSTI().getFeatureBits()[ARM::FeatureMClass];
513  }
514 
515  /// @name Auto-generated Match Functions
516  /// {
517 
518 #define GET_ASSEMBLER_HEADER
519 #include "ARMGenAsmMatcher.inc"
520 
521  /// }
522 
523  OperandMatchResultTy parseITCondCode(OperandVector &);
524  OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
525  OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
526  OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
527  OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
528  OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
529  OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
530  OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
531  OperandMatchResultTy parseBankedRegOperand(OperandVector &);
532  OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
533  int High);
534  OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
535  return parsePKHImm(O, "lsl", 0, 31);
536  }
537  OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
538  return parsePKHImm(O, "asr", 1, 32);
539  }
540  OperandMatchResultTy parseSetEndImm(OperandVector &);
541  OperandMatchResultTy parseShifterImm(OperandVector &);
542  OperandMatchResultTy parseRotImm(OperandVector &);
543  OperandMatchResultTy parseModImm(OperandVector &);
544  OperandMatchResultTy parseBitfield(OperandVector &);
545  OperandMatchResultTy parsePostIdxReg(OperandVector &);
546  OperandMatchResultTy parseAM3Offset(OperandVector &);
547  OperandMatchResultTy parseFPImm(OperandVector &);
548  OperandMatchResultTy parseVectorList(OperandVector &);
549  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
550  SMLoc &EndLoc);
551 
552  // Asm Match Converter Methods
553  void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
554  void cvtThumbBranches(MCInst &Inst, const OperandVector &);
555 
556  bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
557  bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
558  bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
559  bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
560  bool isITBlockTerminator(MCInst &Inst) const;
561 
562 public:
563  enum ARMMatchResultTy {
564  Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
565  Match_RequiresNotITBlock,
566  Match_RequiresV6,
567  Match_RequiresThumb2,
568  Match_RequiresV8,
569  Match_RequiresFlagSetting,
570 #define GET_OPERAND_DIAGNOSTIC_TYPES
571 #include "ARMGenAsmMatcher.inc"
572 
573  };
574 
575  ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
576  const MCInstrInfo &MII, const MCTargetOptions &Options)
577  : MCTargetAsmParser(Options, STI), MII(MII), UC(Parser) {
579 
580  // Cache the MCRegisterInfo.
581  MRI = getContext().getRegisterInfo();
582 
583  // Initialize the set of available features.
584  setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
585 
586  // Add build attributes based on the selected target.
587  if (AddBuildAttributes)
588  getTargetStreamer().emitTargetAttributes(STI);
589 
590  // Not in an ITBlock to start with.
591  ITState.CurPosition = ~0U;
592 
593  NextSymbolIsThumb = false;
594  }
595 
596  // Implementation of the MCTargetAsmParser interface:
597  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
598  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
599  SMLoc NameLoc, OperandVector &Operands) override;
600  bool ParseDirective(AsmToken DirectiveID) override;
601 
602  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
603  unsigned Kind) override;
604  unsigned checkTargetMatchPredicate(MCInst &Inst) override;
605 
606  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
607  OperandVector &Operands, MCStreamer &Out,
608  uint64_t &ErrorInfo,
609  bool MatchingInlineAsm) override;
610  unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
611  uint64_t &ErrorInfo, bool MatchingInlineAsm,
612  bool &EmitInITBlock, MCStreamer &Out);
613  void onLabelParsed(MCSymbol *Symbol) override;
614 };
615 
616 /// ARMOperand - Instances of this class represent a parsed ARM machine
617 /// operand.
618 class ARMOperand : public MCParsedAsmOperand {
619  enum KindTy {
620  k_CondCode,
621  k_CCOut,
622  k_ITCondMask,
623  k_CoprocNum,
624  k_CoprocReg,
625  k_CoprocOption,
626  k_Immediate,
627  k_MemBarrierOpt,
628  k_InstSyncBarrierOpt,
629  k_Memory,
630  k_PostIndexRegister,
631  k_MSRMask,
632  k_BankedReg,
633  k_ProcIFlags,
634  k_VectorIndex,
635  k_Register,
636  k_RegisterList,
637  k_DPRRegisterList,
638  k_SPRRegisterList,
639  k_VectorList,
640  k_VectorListAllLanes,
641  k_VectorListIndexed,
642  k_ShiftedRegister,
643  k_ShiftedImmediate,
644  k_ShifterImmediate,
645  k_RotateImmediate,
646  k_ModifiedImmediate,
647  k_ConstantPoolImmediate,
648  k_BitfieldDescriptor,
649  k_Token,
650  } Kind;
651 
652  SMLoc StartLoc, EndLoc, AlignmentLoc;
653  SmallVector<unsigned, 8> Registers;
654 
655  struct CCOp {
656  ARMCC::CondCodes Val;
657  };
658 
659  struct CopOp {
660  unsigned Val;
661  };
662 
663  struct CoprocOptionOp {
664  unsigned Val;
665  };
666 
667  struct ITMaskOp {
668  unsigned Mask:4;
669  };
670 
671  struct MBOptOp {
672  ARM_MB::MemBOpt Val;
673  };
674 
675  struct ISBOptOp {
677  };
678 
679  struct IFlagsOp {
680  ARM_PROC::IFlags Val;
681  };
682 
683  struct MMaskOp {
684  unsigned Val;
685  };
686 
687  struct BankedRegOp {
688  unsigned Val;
689  };
690 
691  struct TokOp {
692  const char *Data;
693  unsigned Length;
694  };
695 
696  struct RegOp {
697  unsigned RegNum;
698  };
699 
700  // A vector register list is a sequential list of 1 to 4 registers.
701  struct VectorListOp {
702  unsigned RegNum;
703  unsigned Count;
704  unsigned LaneIndex;
705  bool isDoubleSpaced;
706  };
707 
708  struct VectorIndexOp {
709  unsigned Val;
710  };
711 
712  struct ImmOp {
713  const MCExpr *Val;
714  };
715 
716  /// Combined record for all forms of ARM address expressions.
717  struct MemoryOp {
718  unsigned BaseRegNum;
719  // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
720  // was specified.
721  const MCConstantExpr *OffsetImm; // Offset immediate value
722  unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
723  ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
724  unsigned ShiftImm; // shift for OffsetReg.
725  unsigned Alignment; // 0 = no alignment specified
726  // n = alignment in bytes (2, 4, 8, 16, or 32)
727  unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
728  };
729 
730  struct PostIdxRegOp {
731  unsigned RegNum;
732  bool isAdd;
733  ARM_AM::ShiftOpc ShiftTy;
734  unsigned ShiftImm;
735  };
736 
737  struct ShifterImmOp {
738  bool isASR;
739  unsigned Imm;
740  };
741 
742  struct RegShiftedRegOp {
743  ARM_AM::ShiftOpc ShiftTy;
744  unsigned SrcReg;
745  unsigned ShiftReg;
746  unsigned ShiftImm;
747  };
748 
749  struct RegShiftedImmOp {
750  ARM_AM::ShiftOpc ShiftTy;
751  unsigned SrcReg;
752  unsigned ShiftImm;
753  };
754 
755  struct RotImmOp {
756  unsigned Imm;
757  };
758 
759  struct ModImmOp {
760  unsigned Bits;
761  unsigned Rot;
762  };
763 
764  struct BitfieldOp {
765  unsigned LSB;
766  unsigned Width;
767  };
768 
769  union {
770  struct CCOp CC;
771  struct CopOp Cop;
772  struct CoprocOptionOp CoprocOption;
773  struct MBOptOp MBOpt;
774  struct ISBOptOp ISBOpt;
775  struct ITMaskOp ITMask;
776  struct IFlagsOp IFlags;
777  struct MMaskOp MMask;
778  struct BankedRegOp BankedReg;
779  struct TokOp Tok;
780  struct RegOp Reg;
781  struct VectorListOp VectorList;
782  struct VectorIndexOp VectorIndex;
783  struct ImmOp Imm;
784  struct MemoryOp Memory;
785  struct PostIdxRegOp PostIdxReg;
786  struct ShifterImmOp ShifterImm;
787  struct RegShiftedRegOp RegShiftedReg;
788  struct RegShiftedImmOp RegShiftedImm;
789  struct RotImmOp RotImm;
790  struct ModImmOp ModImm;
791  struct BitfieldOp Bitfield;
792  };
793 
794 public:
795  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
796 
797  /// getStartLoc - Get the location of the first token of this operand.
798  SMLoc getStartLoc() const override { return StartLoc; }
799 
800  /// getEndLoc - Get the location of the last token of this operand.
801  SMLoc getEndLoc() const override { return EndLoc; }
802 
803  /// getLocRange - Get the range between the first and last token of this
804  /// operand.
805  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
806 
807  /// getAlignmentLoc - Get the location of the Alignment token of this operand.
808  SMLoc getAlignmentLoc() const {
809  assert(Kind == k_Memory && "Invalid access!");
810  return AlignmentLoc;
811  }
812 
813  ARMCC::CondCodes getCondCode() const {
814  assert(Kind == k_CondCode && "Invalid access!");
815  return CC.Val;
816  }
817 
818  unsigned getCoproc() const {
819  assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
820  return Cop.Val;
821  }
822 
823  StringRef getToken() const {
824  assert(Kind == k_Token && "Invalid access!");
825  return StringRef(Tok.Data, Tok.Length);
826  }
827 
828  unsigned getReg() const override {
829  assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
830  return Reg.RegNum;
831  }
832 
833  const SmallVectorImpl<unsigned> &getRegList() const {
834  assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
835  Kind == k_SPRRegisterList) && "Invalid access!");
836  return Registers;
837  }
838 
839  const MCExpr *getImm() const {
840  assert(isImm() && "Invalid access!");
841  return Imm.Val;
842  }
843 
844  const MCExpr *getConstantPoolImm() const {
845  assert(isConstantPoolImm() && "Invalid access!");
846  return Imm.Val;
847  }
848 
849  unsigned getVectorIndex() const {
850  assert(Kind == k_VectorIndex && "Invalid access!");
851  return VectorIndex.Val;
852  }
853 
854  ARM_MB::MemBOpt getMemBarrierOpt() const {
855  assert(Kind == k_MemBarrierOpt && "Invalid access!");
856  return MBOpt.Val;
857  }
858 
859  ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
860  assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
861  return ISBOpt.Val;
862  }
863 
864  ARM_PROC::IFlags getProcIFlags() const {
865  assert(Kind == k_ProcIFlags && "Invalid access!");
866  return IFlags.Val;
867  }
868 
869  unsigned getMSRMask() const {
870  assert(Kind == k_MSRMask && "Invalid access!");
871  return MMask.Val;
872  }
873 
874  unsigned getBankedReg() const {
875  assert(Kind == k_BankedReg && "Invalid access!");
876  return BankedReg.Val;
877  }
878 
879  bool isCoprocNum() const { return Kind == k_CoprocNum; }
880  bool isCoprocReg() const { return Kind == k_CoprocReg; }
881  bool isCoprocOption() const { return Kind == k_CoprocOption; }
882  bool isCondCode() const { return Kind == k_CondCode; }
883  bool isCCOut() const { return Kind == k_CCOut; }
884  bool isITMask() const { return Kind == k_ITCondMask; }
885  bool isITCondCode() const { return Kind == k_CondCode; }
886  bool isImm() const override {
887  return Kind == k_Immediate;
888  }
889 
890  bool isARMBranchTarget() const {
891  if (!isImm()) return false;
892 
893  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
894  return CE->getValue() % 4 == 0;
895  return true;
896  }
897 
898 
899  bool isThumbBranchTarget() const {
900  if (!isImm()) return false;
901 
902  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
903  return CE->getValue() % 2 == 0;
904  return true;
905  }
906 
907  // checks whether this operand is an unsigned offset which fits is a field
908  // of specified width and scaled by a specific number of bits
909  template<unsigned width, unsigned scale>
910  bool isUnsignedOffset() const {
911  if (!isImm()) return false;
912  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
913  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
914  int64_t Val = CE->getValue();
915  int64_t Align = 1LL << scale;
916  int64_t Max = Align * ((1LL << width) - 1);
917  return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
918  }
919  return false;
920  }
921 
922  // checks whether this operand is an signed offset which fits is a field
923  // of specified width and scaled by a specific number of bits
924  template<unsigned width, unsigned scale>
925  bool isSignedOffset() const {
926  if (!isImm()) return false;
927  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
928  if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
929  int64_t Val = CE->getValue();
930  int64_t Align = 1LL << scale;
931  int64_t Max = Align * ((1LL << (width-1)) - 1);
932  int64_t Min = -Align * (1LL << (width-1));
933  return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
934  }
935  return false;
936  }
937 
938  // checks whether this operand is a memory operand computed as an offset
939  // applied to PC. the offset may have 8 bits of magnitude and is represented
940  // with two bits of shift. textually it may be either [pc, #imm], #imm or
941  // relocable expression...
942  bool isThumbMemPC() const {
943  int64_t Val = 0;
944  if (isImm()) {
945  if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
946  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
947  if (!CE) return false;
948  Val = CE->getValue();
949  }
950  else if (isMem()) {
951  if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
952  if(Memory.BaseRegNum != ARM::PC) return false;
953  Val = Memory.OffsetImm->getValue();
954  }
955  else return false;
956  return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
957  }
958 
959  bool isFPImm() const {
960  if (!isImm()) return false;
961  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
962  if (!CE) return false;
963  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
964  return Val != -1;
965  }
966 
967  template<int64_t N, int64_t M>
968  bool isImmediate() const {
969  if (!isImm()) return false;
970  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
971  if (!CE) return false;
972  int64_t Value = CE->getValue();
973  return Value >= N && Value <= M;
974  }
975 
976  template<int64_t N, int64_t M>
977  bool isImmediateS4() const {
978  if (!isImm()) return false;
979  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
980  if (!CE) return false;
981  int64_t Value = CE->getValue();
982  return ((Value & 3) == 0) && Value >= N && Value <= M;
983  }
984 
985  bool isFBits16() const {
986  return isImmediate<0, 17>();
987  }
988  bool isFBits32() const {
989  return isImmediate<1, 33>();
990  }
991  bool isImm8s4() const {
992  return isImmediateS4<-1020, 1020>();
993  }
994  bool isImm0_1020s4() const {
995  return isImmediateS4<0, 1020>();
996  }
997  bool isImm0_508s4() const {
998  return isImmediateS4<0, 508>();
999  }
1000  bool isImm0_508s4Neg() const {
1001  if (!isImm()) return false;
1002  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1003  if (!CE) return false;
1004  int64_t Value = -CE->getValue();
1005  // explicitly exclude zero. we want that to use the normal 0_508 version.
1006  return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1007  }
1008 
1009  bool isImm0_4095Neg() const {
1010  if (!isImm()) return false;
1011  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1012  if (!CE) return false;
1013  int64_t Value = -CE->getValue();
1014  return Value > 0 && Value < 4096;
1015  }
1016 
1017  bool isImm0_7() const {
1018  return isImmediate<0, 7>();
1019  }
1020 
1021  bool isImm1_16() const {
1022  return isImmediate<1, 16>();
1023  }
1024 
1025  bool isImm1_32() const {
1026  return isImmediate<1, 32>();
1027  }
1028 
1029  bool isImm8_255() const {
1030  return isImmediate<8, 255>();
1031  }
1032 
1033  bool isImm256_65535Expr() const {
1034  if (!isImm()) return false;
1035  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1036  // If it's not a constant expression, it'll generate a fixup and be
1037  // handled later.
1038  if (!CE) return true;
1039  int64_t Value = CE->getValue();
1040  return Value >= 256 && Value < 65536;
1041  }
1042 
1043  bool isImm0_65535Expr() const {
1044  if (!isImm()) return false;
1045  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1046  // If it's not a constant expression, it'll generate a fixup and be
1047  // handled later.
1048  if (!CE) return true;
1049  int64_t Value = CE->getValue();
1050  return Value >= 0 && Value < 65536;
1051  }
1052 
1053  bool isImm24bit() const {
1054  return isImmediate<0, 0xffffff + 1>();
1055  }
1056 
1057  bool isImmThumbSR() const {
1058  return isImmediate<1, 33>();
1059  }
1060 
1061  bool isPKHLSLImm() const {
1062  return isImmediate<0, 32>();
1063  }
1064 
1065  bool isPKHASRImm() const {
1066  return isImmediate<0, 33>();
1067  }
1068 
1069  bool isAdrLabel() const {
1070  // If we have an immediate that's not a constant, treat it as a label
1071  // reference needing a fixup.
1072  if (isImm() && !isa<MCConstantExpr>(getImm()))
1073  return true;
1074 
1075  // If it is a constant, it must fit into a modified immediate encoding.
1076  if (!isImm()) return false;
1077  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1078  if (!CE) return false;
1079  int64_t Value = CE->getValue();
1080  return (ARM_AM::getSOImmVal(Value) != -1 ||
1081  ARM_AM::getSOImmVal(-Value) != -1);
1082  }
1083 
1084  bool isT2SOImm() const {
1085  // If we have an immediate that's not a constant, treat it as an expression
1086  // needing a fixup.
1087  if (isImm() && !isa<MCConstantExpr>(getImm())) {
1088  // We want to avoid matching :upper16: and :lower16: as we want these
1089  // expressions to match in isImm0_65535Expr()
1090  const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1091  return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1092  ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1093  }
1094  if (!isImm()) return false;
1095  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1096  if (!CE) return false;
1097  int64_t Value = CE->getValue();
1098  return ARM_AM::getT2SOImmVal(Value) != -1;
1099  }
1100 
1101  bool isT2SOImmNot() const {
1102  if (!isImm()) return false;
1103  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1104  if (!CE) return false;
1105  int64_t Value = CE->getValue();
1106  return ARM_AM::getT2SOImmVal(Value) == -1 &&
1107  ARM_AM::getT2SOImmVal(~Value) != -1;
1108  }
1109 
1110  bool isT2SOImmNeg() const {
1111  if (!isImm()) return false;
1112  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1113  if (!CE) return false;
1114  int64_t Value = CE->getValue();
1115  // Only use this when not representable as a plain so_imm.
1116  return ARM_AM::getT2SOImmVal(Value) == -1 &&
1117  ARM_AM::getT2SOImmVal(-Value) != -1;
1118  }
1119 
1120  bool isSetEndImm() const {
1121  if (!isImm()) return false;
1122  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1123  if (!CE) return false;
1124  int64_t Value = CE->getValue();
1125  return Value == 1 || Value == 0;
1126  }
1127 
1128  bool isReg() const override { return Kind == k_Register; }
1129  bool isRegList() const { return Kind == k_RegisterList; }
1130  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1131  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1132  bool isToken() const override { return Kind == k_Token; }
1133  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1134  bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1135  bool isMem() const override { return Kind == k_Memory; }
1136  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1137  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
1138  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
1139  bool isRotImm() const { return Kind == k_RotateImmediate; }
1140  bool isModImm() const { return Kind == k_ModifiedImmediate; }
1141 
1142  bool isModImmNot() const {
1143  if (!isImm()) return false;
1144  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1145  if (!CE) return false;
1146  int64_t Value = CE->getValue();
1147  return ARM_AM::getSOImmVal(~Value) != -1;
1148  }
1149 
1150  bool isModImmNeg() const {
1151  if (!isImm()) return false;
1152  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1153  if (!CE) return false;
1154  int64_t Value = CE->getValue();
1155  return ARM_AM::getSOImmVal(Value) == -1 &&
1156  ARM_AM::getSOImmVal(-Value) != -1;
1157  }
1158 
1159  bool isThumbModImmNeg1_7() const {
1160  if (!isImm()) return false;
1161  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1162  if (!CE) return false;
1163  int32_t Value = -(int32_t)CE->getValue();
1164  return 0 < Value && Value < 8;
1165  }
1166 
1167  bool isThumbModImmNeg8_255() const {
1168  if (!isImm()) return false;
1169  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1170  if (!CE) return false;
1171  int32_t Value = -(int32_t)CE->getValue();
1172  return 7 < Value && Value < 256;
1173  }
1174 
1175  bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1176  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1177  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
1178  bool isPostIdxReg() const {
1179  return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
1180  }
1181  bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1182  if (!isMem())
1183  return false;
1184  // No offset of any kind.
1185  return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1186  (alignOK || Memory.Alignment == Alignment);
1187  }
1188  bool isMemPCRelImm12() const {
1189  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1190  return false;
1191  // Base register must be PC.
1192  if (Memory.BaseRegNum != ARM::PC)
1193  return false;
1194  // Immediate offset in range [-4095, 4095].
1195  if (!Memory.OffsetImm) return true;
1196  int64_t Val = Memory.OffsetImm->getValue();
1197  return (Val > -4096 && Val < 4096) ||
1198  (Val == std::numeric_limits<int32_t>::min());
1199  }
1200 
1201  bool isAlignedMemory() const {
1202  return isMemNoOffset(true);
1203  }
1204 
1205  bool isAlignedMemoryNone() const {
1206  return isMemNoOffset(false, 0);
1207  }
1208 
1209  bool isDupAlignedMemoryNone() const {
1210  return isMemNoOffset(false, 0);
1211  }
1212 
1213  bool isAlignedMemory16() const {
1214  if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1215  return true;
1216  return isMemNoOffset(false, 0);
1217  }
1218 
1219  bool isDupAlignedMemory16() const {
1220  if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1221  return true;
1222  return isMemNoOffset(false, 0);
1223  }
1224 
1225  bool isAlignedMemory32() const {
1226  if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1227  return true;
1228  return isMemNoOffset(false, 0);
1229  }
1230 
1231  bool isDupAlignedMemory32() const {
1232  if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1233  return true;
1234  return isMemNoOffset(false, 0);
1235  }
1236 
1237  bool isAlignedMemory64() const {
1238  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1239  return true;
1240  return isMemNoOffset(false, 0);
1241  }
1242 
1243  bool isDupAlignedMemory64() const {
1244  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1245  return true;
1246  return isMemNoOffset(false, 0);
1247  }
1248 
1249  bool isAlignedMemory64or128() const {
1250  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1251  return true;
1252  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1253  return true;
1254  return isMemNoOffset(false, 0);
1255  }
1256 
1257  bool isDupAlignedMemory64or128() const {
1258  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1259  return true;
1260  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1261  return true;
1262  return isMemNoOffset(false, 0);
1263  }
1264 
1265  bool isAlignedMemory64or128or256() const {
1266  if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1267  return true;
1268  if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1269  return true;
1270  if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1271  return true;
1272  return isMemNoOffset(false, 0);
1273  }
1274 
1275  bool isAddrMode2() const {
1276  if (!isMem() || Memory.Alignment != 0) return false;
1277  // Check for register offset.
1278  if (Memory.OffsetRegNum) return true;
1279  // Immediate offset in range [-4095, 4095].
1280  if (!Memory.OffsetImm) return true;
1281  int64_t Val = Memory.OffsetImm->getValue();
1282  return Val > -4096 && Val < 4096;
1283  }
1284 
1285  bool isAM2OffsetImm() const {
1286  if (!isImm()) return false;
1287  // Immediate offset in range [-4095, 4095].
1288  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1289  if (!CE) return false;
1290  int64_t Val = CE->getValue();
1291  return (Val == std::numeric_limits<int32_t>::min()) ||
1292  (Val > -4096 && Val < 4096);
1293  }
1294 
1295  bool isAddrMode3() const {
1296  // If we have an immediate that's not a constant, treat it as a label
1297  // reference needing a fixup. If it is a constant, it's something else
1298  // and we reject it.
1299  if (isImm() && !isa<MCConstantExpr>(getImm()))
1300  return true;
1301  if (!isMem() || Memory.Alignment != 0) return false;
1302  // No shifts are legal for AM3.
1303  if (Memory.ShiftType != ARM_AM::no_shift) return false;
1304  // Check for register offset.
1305  if (Memory.OffsetRegNum) return true;
1306  // Immediate offset in range [-255, 255].
1307  if (!Memory.OffsetImm) return true;
1308  int64_t Val = Memory.OffsetImm->getValue();
1309  // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
1310  // have to check for this too.
1311  return (Val > -256 && Val < 256) ||
1312  Val == std::numeric_limits<int32_t>::min();
1313  }
1314 
1315  bool isAM3Offset() const {
1316  if (Kind != k_Immediate && Kind != k_PostIndexRegister)
1317  return false;
1318  if (Kind == k_PostIndexRegister)
1319  return PostIdxReg.ShiftTy == ARM_AM::no_shift;
1320  // Immediate offset in range [-255, 255].
1321  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1322  if (!CE) return false;
1323  int64_t Val = CE->getValue();
1324  // Special case, #-0 is std::numeric_limits<int32_t>::min().
1325  return (Val > -256 && Val < 256) ||
1326  Val == std::numeric_limits<int32_t>::min();
1327  }
1328 
1329  bool isAddrMode5() const {
1330  // If we have an immediate that's not a constant, treat it as a label
1331  // reference needing a fixup. If it is a constant, it's something else
1332  // and we reject it.
1333  if (isImm() && !isa<MCConstantExpr>(getImm()))
1334  return true;
1335  if (!isMem() || Memory.Alignment != 0) return false;
1336  // Check for register offset.
1337  if (Memory.OffsetRegNum) return false;
1338  // Immediate offset in range [-1020, 1020] and a multiple of 4.
1339  if (!Memory.OffsetImm) return true;
1340  int64_t Val = Memory.OffsetImm->getValue();
1341  return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1342  Val == std::numeric_limits<int32_t>::min();
1343  }
1344 
1345  bool isAddrMode5FP16() const {
1346  // If we have an immediate that's not a constant, treat it as a label
1347  // reference needing a fixup. If it is a constant, it's something else
1348  // and we reject it.
1349  if (isImm() && !isa<MCConstantExpr>(getImm()))
1350  return true;
1351  if (!isMem() || Memory.Alignment != 0) return false;
1352  // Check for register offset.
1353  if (Memory.OffsetRegNum) return false;
1354  // Immediate offset in range [-510, 510] and a multiple of 2.
1355  if (!Memory.OffsetImm) return true;
1356  int64_t Val = Memory.OffsetImm->getValue();
1357  return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1358  Val == std::numeric_limits<int32_t>::min();
1359  }
1360 
1361  bool isMemTBB() const {
1362  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1363  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1364  return false;
1365  return true;
1366  }
1367 
1368  bool isMemTBH() const {
1369  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1370  Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1371  Memory.Alignment != 0 )
1372  return false;
1373  return true;
1374  }
1375 
1376  bool isMemRegOffset() const {
1377  if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1378  return false;
1379  return true;
1380  }
1381 
1382  bool isT2MemRegOffset() const {
1383  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1384  Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1385  return false;
1386  // Only lsl #{0, 1, 2, 3} allowed.
1387  if (Memory.ShiftType == ARM_AM::no_shift)
1388  return true;
1389  if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1390  return false;
1391  return true;
1392  }
1393 
1394  bool isMemThumbRR() const {
1395  // Thumb reg+reg addressing is simple. Just two registers, a base and
1396  // an offset. No shifts, negations or any other complicating factors.
1397  if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1398  Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1399  return false;
1400  return isARMLowRegister(Memory.BaseRegNum) &&
1401  (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1402  }
1403 
1404  bool isMemThumbRIs4() const {
1405  if (!isMem() || Memory.OffsetRegNum != 0 ||
1406  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1407  return false;
1408  // Immediate offset, multiple of 4 in range [0, 124].
1409  if (!Memory.OffsetImm) return true;
1410  int64_t Val = Memory.OffsetImm->getValue();
1411  return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1412  }
1413 
1414  bool isMemThumbRIs2() const {
1415  if (!isMem() || Memory.OffsetRegNum != 0 ||
1416  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1417  return false;
1418  // Immediate offset, multiple of 4 in range [0, 62].
1419  if (!Memory.OffsetImm) return true;
1420  int64_t Val = Memory.OffsetImm->getValue();
1421  return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1422  }
1423 
1424  bool isMemThumbRIs1() const {
1425  if (!isMem() || Memory.OffsetRegNum != 0 ||
1426  !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1427  return false;
1428  // Immediate offset in range [0, 31].
1429  if (!Memory.OffsetImm) return true;
1430  int64_t Val = Memory.OffsetImm->getValue();
1431  return Val >= 0 && Val <= 31;
1432  }
1433 
1434  bool isMemThumbSPI() const {
1435  if (!isMem() || Memory.OffsetRegNum != 0 ||
1436  Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1437  return false;
1438  // Immediate offset, multiple of 4 in range [0, 1020].
1439  if (!Memory.OffsetImm) return true;
1440  int64_t Val = Memory.OffsetImm->getValue();
1441  return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1442  }
1443 
1444  bool isMemImm8s4Offset() const {
1445  // If we have an immediate that's not a constant, treat it as a label
1446  // reference needing a fixup. If it is a constant, it's something else
1447  // and we reject it.
1448  if (isImm() && !isa<MCConstantExpr>(getImm()))
1449  return true;
1450  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1451  return false;
1452  // Immediate offset a multiple of 4 in range [-1020, 1020].
1453  if (!Memory.OffsetImm) return true;
1454  int64_t Val = Memory.OffsetImm->getValue();
1455  // Special case, #-0 is std::numeric_limits<int32_t>::min().
1456  return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1457  Val == std::numeric_limits<int32_t>::min();
1458  }
1459 
1460  bool isMemImm0_1020s4Offset() const {
1461  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1462  return false;
1463  // Immediate offset a multiple of 4 in range [0, 1020].
1464  if (!Memory.OffsetImm) return true;
1465  int64_t Val = Memory.OffsetImm->getValue();
1466  return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1467  }
1468 
1469  bool isMemImm8Offset() const {
1470  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1471  return false;
1472  // Base reg of PC isn't allowed for these encodings.
1473  if (Memory.BaseRegNum == ARM::PC) return false;
1474  // Immediate offset in range [-255, 255].
1475  if (!Memory.OffsetImm) return true;
1476  int64_t Val = Memory.OffsetImm->getValue();
1477  return (Val == std::numeric_limits<int32_t>::min()) ||
1478  (Val > -256 && Val < 256);
1479  }
1480 
1481  bool isMemPosImm8Offset() const {
1482  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1483  return false;
1484  // Immediate offset in range [0, 255].
1485  if (!Memory.OffsetImm) return true;
1486  int64_t Val = Memory.OffsetImm->getValue();
1487  return Val >= 0 && Val < 256;
1488  }
1489 
1490  bool isMemNegImm8Offset() const {
1491  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1492  return false;
1493  // Base reg of PC isn't allowed for these encodings.
1494  if (Memory.BaseRegNum == ARM::PC) return false;
1495  // Immediate offset in range [-255, -1].
1496  if (!Memory.OffsetImm) return false;
1497  int64_t Val = Memory.OffsetImm->getValue();
1498  return (Val == std::numeric_limits<int32_t>::min()) ||
1499  (Val > -256 && Val < 0);
1500  }
1501 
1502  bool isMemUImm12Offset() const {
1503  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1504  return false;
1505  // Immediate offset in range [0, 4095].
1506  if (!Memory.OffsetImm) return true;
1507  int64_t Val = Memory.OffsetImm->getValue();
1508  return (Val >= 0 && Val < 4096);
1509  }
1510 
1511  bool isMemImm12Offset() const {
1512  // If we have an immediate that's not a constant, treat it as a label
1513  // reference needing a fixup. If it is a constant, it's something else
1514  // and we reject it.
1515 
1516  if (isImm() && !isa<MCConstantExpr>(getImm()))
1517  return true;
1518 
1519  if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1520  return false;
1521  // Immediate offset in range [-4095, 4095].
1522  if (!Memory.OffsetImm) return true;
1523  int64_t Val = Memory.OffsetImm->getValue();
1524  return (Val > -4096 && Val < 4096) ||
1525  (Val == std::numeric_limits<int32_t>::min());
1526  }
1527 
1528  bool isConstPoolAsmImm() const {
1529  // Delay processing of Constant Pool Immediate, this will turn into
1530  // a constant. Match no other operand
1531  return (isConstantPoolImm());
1532  }
1533 
1534  bool isPostIdxImm8() const {
1535  if (!isImm()) return false;
1536  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1537  if (!CE) return false;
1538  int64_t Val = CE->getValue();
1539  return (Val > -256 && Val < 256) ||
1540  (Val == std::numeric_limits<int32_t>::min());
1541  }
1542 
1543  bool isPostIdxImm8s4() const {
1544  if (!isImm()) return false;
1545  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1546  if (!CE) return false;
1547  int64_t Val = CE->getValue();
1548  return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1549  (Val == std::numeric_limits<int32_t>::min());
1550  }
1551 
1552  bool isMSRMask() const { return Kind == k_MSRMask; }
1553  bool isBankedReg() const { return Kind == k_BankedReg; }
1554  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1555 
1556  // NEON operands.
1557  bool isSingleSpacedVectorList() const {
1558  return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1559  }
1560 
1561  bool isDoubleSpacedVectorList() const {
1562  return Kind == k_VectorList && VectorList.isDoubleSpaced;
1563  }
1564 
1565  bool isVecListOneD() const {
1566  if (!isSingleSpacedVectorList()) return false;
1567  return VectorList.Count == 1;
1568  }
1569 
1570  bool isVecListDPair() const {
1571  if (!isSingleSpacedVectorList()) return false;
1572  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1573  .contains(VectorList.RegNum));
1574  }
1575 
1576  bool isVecListThreeD() const {
1577  if (!isSingleSpacedVectorList()) return false;
1578  return VectorList.Count == 3;
1579  }
1580 
1581  bool isVecListFourD() const {
1582  if (!isSingleSpacedVectorList()) return false;
1583  return VectorList.Count == 4;
1584  }
1585 
1586  bool isVecListDPairSpaced() const {
1587  if (Kind != k_VectorList) return false;
1588  if (isSingleSpacedVectorList()) return false;
1589  return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1590  .contains(VectorList.RegNum));
1591  }
1592 
1593  bool isVecListThreeQ() const {
1594  if (!isDoubleSpacedVectorList()) return false;
1595  return VectorList.Count == 3;
1596  }
1597 
1598  bool isVecListFourQ() const {
1599  if (!isDoubleSpacedVectorList()) return false;
1600  return VectorList.Count == 4;
1601  }
1602 
1603  bool isSingleSpacedVectorAllLanes() const {
1604  return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1605  }
1606 
1607  bool isDoubleSpacedVectorAllLanes() const {
1608  return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1609  }
1610 
1611  bool isVecListOneDAllLanes() const {
1612  if (!isSingleSpacedVectorAllLanes()) return false;
1613  return VectorList.Count == 1;
1614  }
1615 
1616  bool isVecListDPairAllLanes() const {
1617  if (!isSingleSpacedVectorAllLanes()) return false;
1618  return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1619  .contains(VectorList.RegNum));
1620  }
1621 
1622  bool isVecListDPairSpacedAllLanes() const {
1623  if (!isDoubleSpacedVectorAllLanes()) return false;
1624  return VectorList.Count == 2;
1625  }
1626 
1627  bool isVecListThreeDAllLanes() const {
1628  if (!isSingleSpacedVectorAllLanes()) return false;
1629  return VectorList.Count == 3;
1630  }
1631 
1632  bool isVecListThreeQAllLanes() const {
1633  if (!isDoubleSpacedVectorAllLanes()) return false;
1634  return VectorList.Count == 3;
1635  }
1636 
1637  bool isVecListFourDAllLanes() const {
1638  if (!isSingleSpacedVectorAllLanes()) return false;
1639  return VectorList.Count == 4;
1640  }
1641 
1642  bool isVecListFourQAllLanes() const {
1643  if (!isDoubleSpacedVectorAllLanes()) return false;
1644  return VectorList.Count == 4;
1645  }
1646 
1647  bool isSingleSpacedVectorIndexed() const {
1648  return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1649  }
1650 
1651  bool isDoubleSpacedVectorIndexed() const {
1652  return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1653  }
1654 
1655  bool isVecListOneDByteIndexed() const {
1656  if (!isSingleSpacedVectorIndexed()) return false;
1657  return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1658  }
1659 
1660  bool isVecListOneDHWordIndexed() const {
1661  if (!isSingleSpacedVectorIndexed()) return false;
1662  return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1663  }
1664 
1665  bool isVecListOneDWordIndexed() const {
1666  if (!isSingleSpacedVectorIndexed()) return false;
1667  return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1668  }
1669 
1670  bool isVecListTwoDByteIndexed() const {
1671  if (!isSingleSpacedVectorIndexed()) return false;
1672  return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1673  }
1674 
1675  bool isVecListTwoDHWordIndexed() const {
1676  if (!isSingleSpacedVectorIndexed()) return false;
1677  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1678  }
1679 
1680  bool isVecListTwoQWordIndexed() const {
1681  if (!isDoubleSpacedVectorIndexed()) return false;
1682  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1683  }
1684 
1685  bool isVecListTwoQHWordIndexed() const {
1686  if (!isDoubleSpacedVectorIndexed()) return false;
1687  return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1688  }
1689 
1690  bool isVecListTwoDWordIndexed() const {
1691  if (!isSingleSpacedVectorIndexed()) return false;
1692  return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1693  }
1694 
1695  bool isVecListThreeDByteIndexed() const {
1696  if (!isSingleSpacedVectorIndexed()) return false;
1697  return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1698  }
1699 
1700  bool isVecListThreeDHWordIndexed() const {
1701  if (!isSingleSpacedVectorIndexed()) return false;
1702  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1703  }
1704 
1705  bool isVecListThreeQWordIndexed() const {
1706  if (!isDoubleSpacedVectorIndexed()) return false;
1707  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1708  }
1709 
1710  bool isVecListThreeQHWordIndexed() const {
1711  if (!isDoubleSpacedVectorIndexed()) return false;
1712  return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1713  }
1714 
1715  bool isVecListThreeDWordIndexed() const {
1716  if (!isSingleSpacedVectorIndexed()) return false;
1717  return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1718  }
1719 
1720  bool isVecListFourDByteIndexed() const {
1721  if (!isSingleSpacedVectorIndexed()) return false;
1722  return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1723  }
1724 
1725  bool isVecListFourDHWordIndexed() const {
1726  if (!isSingleSpacedVectorIndexed()) return false;
1727  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1728  }
1729 
1730  bool isVecListFourQWordIndexed() const {
1731  if (!isDoubleSpacedVectorIndexed()) return false;
1732  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1733  }
1734 
1735  bool isVecListFourQHWordIndexed() const {
1736  if (!isDoubleSpacedVectorIndexed()) return false;
1737  return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1738  }
1739 
1740  bool isVecListFourDWordIndexed() const {
1741  if (!isSingleSpacedVectorIndexed()) return false;
1742  return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1743  }
1744 
1745  bool isVectorIndex8() const {
1746  if (Kind != k_VectorIndex) return false;
1747  return VectorIndex.Val < 8;
1748  }
1749 
1750  bool isVectorIndex16() const {
1751  if (Kind != k_VectorIndex) return false;
1752  return VectorIndex.Val < 4;
1753  }
1754 
1755  bool isVectorIndex32() const {
1756  if (Kind != k_VectorIndex) return false;
1757  return VectorIndex.Val < 2;
1758  }
1759 
1760  bool isNEONi8splat() const {
1761  if (!isImm()) return false;
1762  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1763  // Must be a constant.
1764  if (!CE) return false;
1765  int64_t Value = CE->getValue();
1766  // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1767  // value.
1768  return Value >= 0 && Value < 256;
1769  }
1770 
1771  bool isNEONi16splat() const {
1772  if (isNEONByteReplicate(2))
1773  return false; // Leave that for bytes replication and forbid by default.
1774  if (!isImm())
1775  return false;
1776  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1777  // Must be a constant.
1778  if (!CE) return false;
1779  unsigned Value = CE->getValue();
1780  return ARM_AM::isNEONi16splat(Value);
1781  }
1782 
1783  bool isNEONi16splatNot() const {
1784  if (!isImm())
1785  return false;
1786  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1787  // Must be a constant.
1788  if (!CE) return false;
1789  unsigned Value = CE->getValue();
1790  return ARM_AM::isNEONi16splat(~Value & 0xffff);
1791  }
1792 
1793  bool isNEONi32splat() const {
1794  if (isNEONByteReplicate(4))
1795  return false; // Leave that for bytes replication and forbid by default.
1796  if (!isImm())
1797  return false;
1798  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1799  // Must be a constant.
1800  if (!CE) return false;
1801  unsigned Value = CE->getValue();
1802  return ARM_AM::isNEONi32splat(Value);
1803  }
1804 
1805  bool isNEONi32splatNot() const {
1806  if (!isImm())
1807  return false;
1808  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1809  // Must be a constant.
1810  if (!CE) return false;
1811  unsigned Value = CE->getValue();
1812  return ARM_AM::isNEONi32splat(~Value);
1813  }
1814 
1815  bool isNEONByteReplicate(unsigned NumBytes) const {
1816  if (!isImm())
1817  return false;
1818  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1819  // Must be a constant.
1820  if (!CE)
1821  return false;
1822  int64_t Value = CE->getValue();
1823  if (!Value)
1824  return false; // Don't bother with zero.
1825 
1826  unsigned char B = Value & 0xff;
1827  for (unsigned i = 1; i < NumBytes; ++i) {
1828  Value >>= 8;
1829  if ((Value & 0xff) != B)
1830  return false;
1831  }
1832  return true;
1833  }
1834 
1835  bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); }
1836  bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); }
1837 
1838  bool isNEONi32vmov() const {
1839  if (isNEONByteReplicate(4))
1840  return false; // Let it to be classified as byte-replicate case.
1841  if (!isImm())
1842  return false;
1843  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1844  // Must be a constant.
1845  if (!CE)
1846  return false;
1847  int64_t Value = CE->getValue();
1848  // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1849  // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1850  // FIXME: This is probably wrong and a copy and paste from previous example
1851  return (Value >= 0 && Value < 256) ||
1852  (Value >= 0x0100 && Value <= 0xff00) ||
1853  (Value >= 0x010000 && Value <= 0xff0000) ||
1854  (Value >= 0x01000000 && Value <= 0xff000000) ||
1855  (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1856  (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1857  }
1858 
1859  bool isNEONi32vmovNeg() const {
1860  if (!isImm()) return false;
1861  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1862  // Must be a constant.
1863  if (!CE) return false;
1864  int64_t Value = ~CE->getValue();
1865  // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1866  // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1867  // FIXME: This is probably wrong and a copy and paste from previous example
1868  return (Value >= 0 && Value < 256) ||
1869  (Value >= 0x0100 && Value <= 0xff00) ||
1870  (Value >= 0x010000 && Value <= 0xff0000) ||
1871  (Value >= 0x01000000 && Value <= 0xff000000) ||
1872  (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1873  (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1874  }
1875 
1876  bool isNEONi64splat() const {
1877  if (!isImm()) return false;
1878  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1879  // Must be a constant.
1880  if (!CE) return false;
1881  uint64_t Value = CE->getValue();
1882  // i64 value with each byte being either 0 or 0xff.
1883  for (unsigned i = 0; i < 8; ++i, Value >>= 8)
1884  if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1885  return true;
1886  }
1887 
1888  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1889  // Add as immediates when possible. Null MCExpr = 0.
1890  if (!Expr)
1892  else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1893  Inst.addOperand(MCOperand::createImm(CE->getValue()));
1894  else
1895  Inst.addOperand(MCOperand::createExpr(Expr));
1896  }
1897 
1898  void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
1899  assert(N == 1 && "Invalid number of operands!");
1900  addExpr(Inst, getImm());
1901  }
1902 
1903  void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
1904  assert(N == 1 && "Invalid number of operands!");
1905  addExpr(Inst, getImm());
1906  }
1907 
1908  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1909  assert(N == 2 && "Invalid number of operands!");
1910  Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1911  unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1912  Inst.addOperand(MCOperand::createReg(RegNum));
1913  }
1914 
1915  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1916  assert(N == 1 && "Invalid number of operands!");
1917  Inst.addOperand(MCOperand::createImm(getCoproc()));
1918  }
1919 
1920  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1921  assert(N == 1 && "Invalid number of operands!");
1922  Inst.addOperand(MCOperand::createImm(getCoproc()));
1923  }
1924 
1925  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1926  assert(N == 1 && "Invalid number of operands!");
1927  Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
1928  }
1929 
1930  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1931  assert(N == 1 && "Invalid number of operands!");
1932  Inst.addOperand(MCOperand::createImm(ITMask.Mask));
1933  }
1934 
1935  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1936  assert(N == 1 && "Invalid number of operands!");
1937  Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1938  }
1939 
1940  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1941  assert(N == 1 && "Invalid number of operands!");
1943  }
1944 
1945  void addRegOperands(MCInst &Inst, unsigned N) const {
1946  assert(N == 1 && "Invalid number of operands!");
1948  }
1949 
1950  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1951  assert(N == 3 && "Invalid number of operands!");
1952  assert(isRegShiftedReg() &&
1953  "addRegShiftedRegOperands() on non-RegShiftedReg!");
1954  Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
1955  Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
1957  ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1958  }
1959 
1960  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1961  assert(N == 2 && "Invalid number of operands!");
1962  assert(isRegShiftedImm() &&
1963  "addRegShiftedImmOperands() on non-RegShiftedImm!");
1964  Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
1965  // Shift of #32 is encoded as 0 where permitted
1966  unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1968  ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1969  }
1970 
1971  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1972  assert(N == 1 && "Invalid number of operands!");
1973  Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
1974  ShifterImm.Imm));
1975  }
1976 
1977  void addRegListOperands(MCInst &Inst, unsigned N) const {
1978  assert(N == 1 && "Invalid number of operands!");
1979  const SmallVectorImpl<unsigned> &RegList = getRegList();
1981  I = RegList.begin(), E = RegList.end(); I != E; ++I)
1983  }
1984 
1985  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1986  addRegListOperands(Inst, N);
1987  }
1988 
1989  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1990  addRegListOperands(Inst, N);
1991  }
1992 
1993  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1994  assert(N == 1 && "Invalid number of operands!");
1995  // Encoded as val>>3. The printer handles display as 8, 16, 24.
1996  Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
1997  }
1998 
1999  void addModImmOperands(MCInst &Inst, unsigned N) const {
2000  assert(N == 1 && "Invalid number of operands!");
2001 
2002  // Support for fixups (MCFixup)
2003  if (isImm())
2004  return addImmOperands(Inst, N);
2005 
2006  Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2007  }
2008 
2009  void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2010  assert(N == 1 && "Invalid number of operands!");
2011  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2012  uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2013  Inst.addOperand(MCOperand::createImm(Enc));
2014  }
2015 
2016  void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2017  assert(N == 1 && "Invalid number of operands!");
2018  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2019  uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2020  Inst.addOperand(MCOperand::createImm(Enc));
2021  }
2022 
2023  void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2024  assert(N == 1 && "Invalid number of operands!");
2025  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2026  uint32_t Val = -CE->getValue();
2027  Inst.addOperand(MCOperand::createImm(Val));
2028  }
2029 
2030  void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2031  assert(N == 1 && "Invalid number of operands!");
2032  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2033  uint32_t Val = -CE->getValue();
2034  Inst.addOperand(MCOperand::createImm(Val));
2035  }
2036 
2037  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2038  assert(N == 1 && "Invalid number of operands!");
2039  // Munge the lsb/width into a bitfield mask.
2040  unsigned lsb = Bitfield.LSB;
2041  unsigned width = Bitfield.Width;
2042  // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2043  uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2044  (32 - (lsb + width)));
2045  Inst.addOperand(MCOperand::createImm(Mask));
2046  }
2047 
2048  void addImmOperands(MCInst &Inst, unsigned N) const {
2049  assert(N == 1 && "Invalid number of operands!");
2050  addExpr(Inst, getImm());
2051  }
2052 
2053  void addFBits16Operands(MCInst &Inst, unsigned N) const {
2054  assert(N == 1 && "Invalid number of operands!");
2055  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2056  Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2057  }
2058 
2059  void addFBits32Operands(MCInst &Inst, unsigned N) const {
2060  assert(N == 1 && "Invalid number of operands!");
2061  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2062  Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2063  }
2064 
2065  void addFPImmOperands(MCInst &Inst, unsigned N) const {
2066  assert(N == 1 && "Invalid number of operands!");
2067  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2068  int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2069  Inst.addOperand(MCOperand::createImm(Val));
2070  }
2071 
2072  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2073  assert(N == 1 && "Invalid number of operands!");
2074  // FIXME: We really want to scale the value here, but the LDRD/STRD
2075  // instruction don't encode operands that way yet.
2076  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2078  }
2079 
2080  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2081  assert(N == 1 && "Invalid number of operands!");
2082  // The immediate is scaled by four in the encoding and is stored
2083  // in the MCInst as such. Lop off the low two bits here.
2084  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2085  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2086  }
2087 
2088  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2089  assert(N == 1 && "Invalid number of operands!");
2090  // The immediate is scaled by four in the encoding and is stored
2091  // in the MCInst as such. Lop off the low two bits here.
2092  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2093  Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2094  }
2095 
2096  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2097  assert(N == 1 && "Invalid number of operands!");
2098  // The immediate is scaled by four in the encoding and is stored
2099  // in the MCInst as such. Lop off the low two bits here.
2100  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2101  Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2102  }
2103 
2104  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2105  assert(N == 1 && "Invalid number of operands!");
2106  // The constant encodes as the immediate-1, and we store in the instruction
2107  // the bits as encoded, so subtract off one here.
2108  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2109  Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2110  }
2111 
2112  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2113  assert(N == 1 && "Invalid number of operands!");
2114  // The constant encodes as the immediate-1, and we store in the instruction
2115  // the bits as encoded, so subtract off one here.
2116  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2117  Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2118  }
2119 
2120  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2121  assert(N == 1 && "Invalid number of operands!");
2122  // The constant encodes as the immediate, except for 32, which encodes as
2123  // zero.
2124  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2125  unsigned Imm = CE->getValue();
2126  Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2127  }
2128 
2129  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2130  assert(N == 1 && "Invalid number of operands!");
2131  // An ASR value of 32 encodes as 0, so that's how we want to add it to
2132  // the instruction as well.
2133  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2134  int Val = CE->getValue();
2135  Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2136  }
2137 
2138  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2139  assert(N == 1 && "Invalid number of operands!");
2140  // The operand is actually a t2_so_imm, but we have its bitwise
2141  // negation in the assembly source, so twiddle it here.
2142  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2144  }
2145 
2146  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2147  assert(N == 1 && "Invalid number of operands!");
2148  // The operand is actually a t2_so_imm, but we have its
2149  // negation in the assembly source, so twiddle it here.
2150  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2152  }
2153 
2154  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2155  assert(N == 1 && "Invalid number of operands!");
2156  // The operand is actually an imm0_4095, but we have its
2157  // negation in the assembly source, so twiddle it here.
2158  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2160  }
2161 
2162  void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2163  if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2164  Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2165  return;
2166  }
2167 
2168  const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2169  assert(SR && "Unknown value type!");
2171  }
2172 
2173  void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2174  assert(N == 1 && "Invalid number of operands!");
2175  if (isImm()) {
2176  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2177  if (CE) {
2179  return;
2180  }
2181 
2182  const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2183 
2184  assert(SR && "Unknown value type!");
2186  return;
2187  }
2188 
2189  assert(isMem() && "Unknown value type!");
2190  assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2191  Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
2192  }
2193 
2194  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2195  assert(N == 1 && "Invalid number of operands!");
2196  Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2197  }
2198 
2199  void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2200  assert(N == 1 && "Invalid number of operands!");
2201  Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2202  }
2203 
2204  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2205  assert(N == 1 && "Invalid number of operands!");
2206  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2207  }
2208 
2209  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2210  assert(N == 1 && "Invalid number of operands!");
2211  int32_t Imm = Memory.OffsetImm->getValue();
2212  Inst.addOperand(MCOperand::createImm(Imm));
2213  }
2214 
2215  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2216  assert(N == 1 && "Invalid number of operands!");
2217  assert(isImm() && "Not an immediate!");
2218 
2219  // If we have an immediate that's not a constant, treat it as a label
2220  // reference needing a fixup.
2221  if (!isa<MCConstantExpr>(getImm())) {
2222  Inst.addOperand(MCOperand::createExpr(getImm()));
2223  return;
2224  }
2225 
2226  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2227  int Val = CE->getValue();
2228  Inst.addOperand(MCOperand::createImm(Val));
2229  }
2230 
2231  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2232  assert(N == 2 && "Invalid number of operands!");
2233  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2234  Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2235  }
2236 
2237  void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2238  addAlignedMemoryOperands(Inst, N);
2239  }
2240 
2241  void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2242  addAlignedMemoryOperands(Inst, N);
2243  }
2244 
2245  void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2246  addAlignedMemoryOperands(Inst, N);
2247  }
2248 
2249  void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2250  addAlignedMemoryOperands(Inst, N);
2251  }
2252 
2253  void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2254  addAlignedMemoryOperands(Inst, N);
2255  }
2256 
2257  void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2258  addAlignedMemoryOperands(Inst, N);
2259  }
2260 
2261  void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2262  addAlignedMemoryOperands(Inst, N);
2263  }
2264 
2265  void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2266  addAlignedMemoryOperands(Inst, N);
2267  }
2268 
2269  void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2270  addAlignedMemoryOperands(Inst, N);
2271  }
2272 
2273  void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2274  addAlignedMemoryOperands(Inst, N);
2275  }
2276 
2277  void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2278  addAlignedMemoryOperands(Inst, N);
2279  }
2280 
2281  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2282  assert(N == 3 && "Invalid number of operands!");
2283  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2284  if (!Memory.OffsetRegNum) {
2285  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2286  // Special case for #-0
2287  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2288  if (Val < 0) Val = -Val;
2289  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2290  } else {
2291  // For register offset, we encode the shift type and negation flag
2292  // here.
2293  Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2294  Memory.ShiftImm, Memory.ShiftType);
2295  }
2296  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2297  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2298  Inst.addOperand(MCOperand::createImm(Val));
2299  }
2300 
2301  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2302  assert(N == 2 && "Invalid number of operands!");
2303  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2304  assert(CE && "non-constant AM2OffsetImm operand!");
2305  int32_t Val = CE->getValue();
2306  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2307  // Special case for #-0
2308  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2309  if (Val < 0) Val = -Val;
2310  Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2312  Inst.addOperand(MCOperand::createImm(Val));
2313  }
2314 
2315  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2316  assert(N == 3 && "Invalid number of operands!");
2317  // If we have an immediate that's not a constant, treat it as a label
2318  // reference needing a fixup. If it is a constant, it's something else
2319  // and we reject it.
2320  if (isImm()) {
2321  Inst.addOperand(MCOperand::createExpr(getImm()));
2324  return;
2325  }
2326 
2327  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2328  if (!Memory.OffsetRegNum) {
2329  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2330  // Special case for #-0
2331  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2332  if (Val < 0) Val = -Val;
2333  Val = ARM_AM::getAM3Opc(AddSub, Val);
2334  } else {
2335  // For register offset, we encode the shift type and negation flag
2336  // here.
2337  Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2338  }
2339  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2340  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2341  Inst.addOperand(MCOperand::createImm(Val));
2342  }
2343 
2344  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2345  assert(N == 2 && "Invalid number of operands!");
2346  if (Kind == k_PostIndexRegister) {
2347  int32_t Val =
2348  ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2349  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2350  Inst.addOperand(MCOperand::createImm(Val));
2351  return;
2352  }
2353 
2354  // Constant offset.
2355  const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2356  int32_t Val = CE->getValue();
2357  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2358  // Special case for #-0
2359  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2360  if (Val < 0) Val = -Val;
2361  Val = ARM_AM::getAM3Opc(AddSub, Val);
2363  Inst.addOperand(MCOperand::createImm(Val));
2364  }
2365 
2366  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2367  assert(N == 2 && "Invalid number of operands!");
2368  // If we have an immediate that's not a constant, treat it as a label
2369  // reference needing a fixup. If it is a constant, it's something else
2370  // and we reject it.
2371  if (isImm()) {
2372  Inst.addOperand(MCOperand::createExpr(getImm()));
2374  return;
2375  }
2376 
2377  // The lower two bits are always zero and as such are not encoded.
2378  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2379  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2380  // Special case for #-0
2381  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2382  if (Val < 0) Val = -Val;
2383  Val = ARM_AM::getAM5Opc(AddSub, Val);
2384  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2385  Inst.addOperand(MCOperand::createImm(Val));
2386  }
2387 
2388  void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
2389  assert(N == 2 && "Invalid number of operands!");
2390  // If we have an immediate that's not a constant, treat it as a label
2391  // reference needing a fixup. If it is a constant, it's something else
2392  // and we reject it.
2393  if (isImm()) {
2394  Inst.addOperand(MCOperand::createExpr(getImm()));
2396  return;
2397  }
2398 
2399  // The lower bit is always zero and as such is not encoded.
2400  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
2401  ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2402  // Special case for #-0
2403  if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2404  if (Val < 0) Val = -Val;
2405  Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
2406  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2407  Inst.addOperand(MCOperand::createImm(Val));
2408  }
2409 
2410  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2411  assert(N == 2 && "Invalid number of operands!");
2412  // If we have an immediate that's not a constant, treat it as a label
2413  // reference needing a fixup. If it is a constant, it's something else
2414  // and we reject it.
2415  if (isImm()) {
2416  Inst.addOperand(MCOperand::createExpr(getImm()));
2418  return;
2419  }
2420 
2421  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2422  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2423  Inst.addOperand(MCOperand::createImm(Val));
2424  }
2425 
2426  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2427  assert(N == 2 && "Invalid number of operands!");
2428  // The lower two bits are always zero and as such are not encoded.
2429  int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2430  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2431  Inst.addOperand(MCOperand::createImm(Val));
2432  }
2433 
2434  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2435  assert(N == 2 && "Invalid number of operands!");
2436  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2437  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2438  Inst.addOperand(MCOperand::createImm(Val));
2439  }
2440 
2441  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2442  addMemImm8OffsetOperands(Inst, N);
2443  }
2444 
2445  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2446  addMemImm8OffsetOperands(Inst, N);
2447  }
2448 
2449  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2450  assert(N == 2 && "Invalid number of operands!");
2451  // If this is an immediate, it's a label reference.
2452  if (isImm()) {
2453  addExpr(Inst, getImm());
2455  return;
2456  }
2457 
2458  // Otherwise, it's a normal memory reg+offset.
2459  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2460  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2461  Inst.addOperand(MCOperand::createImm(Val));
2462  }
2463 
2464  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2465  assert(N == 2 && "Invalid number of operands!");
2466  // If this is an immediate, it's a label reference.
2467  if (isImm()) {
2468  addExpr(Inst, getImm());
2470  return;
2471  }
2472 
2473  // Otherwise, it's a normal memory reg+offset.
2474  int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2475  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2476  Inst.addOperand(MCOperand::createImm(Val));
2477  }
2478 
2479  void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
2480  assert(N == 1 && "Invalid number of operands!");
2481  // This is container for the immediate that we will create the constant
2482  // pool from
2483  addExpr(Inst, getConstantPoolImm());
2484  return;
2485  }
2486 
2487  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2488  assert(N == 2 && "Invalid number of operands!");
2489  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2490  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2491  }
2492 
2493  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2494  assert(N == 2 && "Invalid number of operands!");
2495  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2496  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2497  }
2498 
2499  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2500  assert(N == 3 && "Invalid number of operands!");
2501  unsigned Val =
2503  Memory.ShiftImm, Memory.ShiftType);
2504  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2505  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2506  Inst.addOperand(MCOperand::createImm(Val));
2507  }
2508 
2509  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2510  assert(N == 3 && "Invalid number of operands!");
2511  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2512  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2513  Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
2514  }
2515 
2516  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2517  assert(N == 2 && "Invalid number of operands!");
2518  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2519  Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2520  }
2521 
2522  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2523  assert(N == 2 && "Invalid number of operands!");
2524  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2525  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2526  Inst.addOperand(MCOperand::createImm(Val));
2527  }
2528 
2529  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2530  assert(N == 2 && "Invalid number of operands!");
2531  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2532  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2533  Inst.addOperand(MCOperand::createImm(Val));
2534  }
2535 
2536  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2537  assert(N == 2 && "Invalid number of operands!");
2538  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2539  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2540  Inst.addOperand(MCOperand::createImm(Val));
2541  }
2542 
2543  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2544  assert(N == 2 && "Invalid number of operands!");
2545  int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2546  Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2547  Inst.addOperand(MCOperand::createImm(Val));
2548  }
2549 
2550  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2551  assert(N == 1 && "Invalid number of operands!");
2552  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2553  assert(CE && "non-constant post-idx-imm8 operand!");
2554  int Imm = CE->getValue();
2555  bool isAdd = Imm >= 0;
2556  if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2557  Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2558  Inst.addOperand(MCOperand::createImm(Imm));
2559  }
2560 
2561  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2562  assert(N == 1 && "Invalid number of operands!");
2563  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2564  assert(CE && "non-constant post-idx-imm8s4 operand!");
2565  int Imm = CE->getValue();
2566  bool isAdd = Imm >= 0;
2567  if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2568  // Immediate is scaled by 4.
2569  Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2570  Inst.addOperand(MCOperand::createImm(Imm));
2571  }
2572 
2573  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2574  assert(N == 2 && "Invalid number of operands!");
2575  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2576  Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
2577  }
2578 
2579  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2580  assert(N == 2 && "Invalid number of operands!");
2581  Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2582  // The sign, shift type, and shift amount are encoded in a single operand
2583  // using the AM2 encoding helpers.
2584  ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2585  unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2586  PostIdxReg.ShiftTy);
2587  Inst.addOperand(MCOperand::createImm(Imm));
2588  }
2589 
2590  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2591  assert(N == 1 && "Invalid number of operands!");
2592  Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
2593  }
2594 
2595  void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2596  assert(N == 1 && "Invalid number of operands!");
2597  Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
2598  }
2599 
2600  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2601  assert(N == 1 && "Invalid number of operands!");
2602  Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
2603  }
2604 
2605  void addVecListOperands(MCInst &Inst, unsigned N) const {
2606  assert(N == 1 && "Invalid number of operands!");
2607  Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2608  }
2609 
2610  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2611  assert(N == 2 && "Invalid number of operands!");
2612  Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2613  Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
2614  }
2615 
2616  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2617  assert(N == 1 && "Invalid number of operands!");
2618  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2619  }
2620 
2621  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2622  assert(N == 1 && "Invalid number of operands!");
2623  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2624  }
2625 
2626  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2627  assert(N == 1 && "Invalid number of operands!");
2628  Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2629  }
2630 
2631  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2632  assert(N == 1 && "Invalid number of operands!");
2633  // The immediate encodes the type of constant as well as the value.
2634  // Mask in that this is an i8 splat.
2635  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2636  Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
2637  }
2638 
2639  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2640  assert(N == 1 && "Invalid number of operands!");
2641  // The immediate encodes the type of constant as well as the value.
2642  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2643  unsigned Value = CE->getValue();
2644  Value = ARM_AM::encodeNEONi16splat(Value);
2645  Inst.addOperand(MCOperand::createImm(Value));
2646  }
2647 
2648  void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2649  assert(N == 1 && "Invalid number of operands!");
2650  // The immediate encodes the type of constant as well as the value.
2651  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2652  unsigned Value = CE->getValue();
2653  Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2654  Inst.addOperand(MCOperand::createImm(Value));
2655  }
2656 
2657  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2658  assert(N == 1 && "Invalid number of operands!");
2659  // The immediate encodes the type of constant as well as the value.
2660  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2661  unsigned Value = CE->getValue();
2662  Value = ARM_AM::encodeNEONi32splat(Value);
2663  Inst.addOperand(MCOperand::createImm(Value));
2664  }
2665 
2666  void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
2667  assert(N == 1 && "Invalid number of operands!");
2668  // The immediate encodes the type of constant as well as the value.
2669  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2670  unsigned Value = CE->getValue();
2671  Value = ARM_AM::encodeNEONi32splat(~Value);
2672  Inst.addOperand(MCOperand::createImm(Value));
2673  }
2674 
2675  void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const {
2676  assert(N == 1 && "Invalid number of operands!");
2677  // The immediate encodes the type of constant as well as the value.
2678  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2679  unsigned Value = CE->getValue();
2680  assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2681  Inst.getOpcode() == ARM::VMOVv16i8) &&
2682  "All vmvn instructions that wants to replicate non-zero byte "
2683  "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2684  unsigned B = ((~Value) & 0xff);
2685  B |= 0xe00; // cmode = 0b1110
2687  }
2688 
2689  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2690  assert(N == 1 && "Invalid number of operands!");
2691  // The immediate encodes the type of constant as well as the value.
2692  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2693  unsigned Value = CE->getValue();
2694  if (Value >= 256 && Value <= 0xffff)
2695  Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2696  else if (Value > 0xffff && Value <= 0xffffff)
2697  Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2698  else if (Value > 0xffffff)
2699  Value = (Value >> 24) | 0x600;
2700  Inst.addOperand(MCOperand::createImm(Value));
2701  }
2702 
2703  void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const {
2704  assert(N == 1 && "Invalid number of operands!");
2705  // The immediate encodes the type of constant as well as the value.
2706  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2707  unsigned Value = CE->getValue();
2708  assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2709  Inst.getOpcode() == ARM::VMOVv16i8) &&
2710  "All instructions that wants to replicate non-zero byte "
2711  "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2712  unsigned B = Value & 0xff;
2713  B |= 0xe00; // cmode = 0b1110
2715  }
2716 
2717  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2718  assert(N == 1 && "Invalid number of operands!");
2719  // The immediate encodes the type of constant as well as the value.
2720  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2721  unsigned Value = ~CE->getValue();
2722  if (Value >= 256 && Value <= 0xffff)
2723  Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2724  else if (Value > 0xffff && Value <= 0xffffff)
2725  Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2726  else if (Value > 0xffffff)
2727  Value = (Value >> 24) | 0x600;
2728  Inst.addOperand(MCOperand::createImm(Value));
2729  }
2730 
2731  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2732  assert(N == 1 && "Invalid number of operands!");
2733  // The immediate encodes the type of constant as well as the value.
2734  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2735  uint64_t Value = CE->getValue();
2736  unsigned Imm = 0;
2737  for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2738  Imm |= (Value & 1) << i;
2739  }
2740  Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
2741  }
2742 
2743  void print(raw_ostream &OS) const override;
2744 
2745  static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
2746  auto Op = make_unique<ARMOperand>(k_ITCondMask);
2747  Op->ITMask.Mask = Mask;
2748  Op->StartLoc = S;
2749  Op->EndLoc = S;
2750  return Op;
2751  }
2752 
2753  static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
2754  SMLoc S) {
2755  auto Op = make_unique<ARMOperand>(k_CondCode);
2756  Op->CC.Val = CC;
2757  Op->StartLoc = S;
2758  Op->EndLoc = S;
2759  return Op;
2760  }
2761 
2762  static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
2763  auto Op = make_unique<ARMOperand>(k_CoprocNum);
2764  Op->Cop.Val = CopVal;
2765  Op->StartLoc = S;
2766  Op->EndLoc = S;
2767  return Op;
2768  }
2769 
2770  static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
2771  auto Op = make_unique<ARMOperand>(k_CoprocReg);
2772  Op->Cop.Val = CopVal;
2773  Op->StartLoc = S;
2774  Op->EndLoc = S;
2775  return Op;
2776  }
2777 
2778  static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
2779  SMLoc E) {
2780  auto Op = make_unique<ARMOperand>(k_CoprocOption);
2781  Op->Cop.Val = Val;
2782  Op->StartLoc = S;
2783  Op->EndLoc = E;
2784  return Op;
2785  }
2786 
2787  static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
2788  auto Op = make_unique<ARMOperand>(k_CCOut);
2789  Op->Reg.RegNum = RegNum;
2790  Op->StartLoc = S;
2791  Op->EndLoc = S;
2792  return Op;
2793  }
2794 
2795  static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
2796  auto Op = make_unique<ARMOperand>(k_Token);
2797  Op->Tok.Data = Str.data();
2798  Op->Tok.Length = Str.size();
2799  Op->StartLoc = S;
2800  Op->EndLoc = S;
2801  return Op;
2802  }
2803 
2804  static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
2805  SMLoc E) {
2806  auto Op = make_unique<ARMOperand>(k_Register);
2807  Op->Reg.RegNum = RegNum;
2808  Op->StartLoc = S;
2809  Op->EndLoc = E;
2810  return Op;
2811  }
2812 
2813  static std::unique_ptr<ARMOperand>
2814  CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2815  unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
2816  SMLoc E) {
2817  auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
2818  Op->RegShiftedReg.ShiftTy = ShTy;
2819  Op->RegShiftedReg.SrcReg = SrcReg;
2820  Op->RegShiftedReg.ShiftReg = ShiftReg;
2821  Op->RegShiftedReg.ShiftImm = ShiftImm;
2822  Op->StartLoc = S;
2823  Op->EndLoc = E;
2824  return Op;
2825  }
2826 
2827  static std::unique_ptr<ARMOperand>
2828  CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2829  unsigned ShiftImm, SMLoc S, SMLoc E) {
2830  auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
2831  Op->RegShiftedImm.ShiftTy = ShTy;
2832  Op->RegShiftedImm.SrcReg = SrcReg;
2833  Op->RegShiftedImm.ShiftImm = ShiftImm;
2834  Op->StartLoc = S;
2835  Op->EndLoc = E;
2836  return Op;
2837  }
2838 
2839  static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
2840  SMLoc S, SMLoc E) {
2841  auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
2842  Op->ShifterImm.isASR = isASR;
2843  Op->ShifterImm.Imm = Imm;
2844  Op->StartLoc = S;
2845  Op->EndLoc = E;
2846  return Op;
2847  }
2848 
2849  static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
2850  SMLoc E) {
2851  auto Op = make_unique<ARMOperand>(k_RotateImmediate);
2852  Op->RotImm.Imm = Imm;
2853  Op->StartLoc = S;
2854  Op->EndLoc = E;
2855  return Op;
2856  }
2857 
2858  static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
2859  SMLoc S, SMLoc E) {
2860  auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
2861  Op->ModImm.Bits = Bits;
2862  Op->ModImm.Rot = Rot;
2863  Op->StartLoc = S;
2864  Op->EndLoc = E;
2865  return Op;
2866  }
2867 
2868  static std::unique_ptr<ARMOperand>
2869  CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2870  auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
2871  Op->Imm.Val = Val;
2872  Op->StartLoc = S;
2873  Op->EndLoc = E;
2874  return Op;
2875  }
2876 
2877  static std::unique_ptr<ARMOperand>
2878  CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
2879  auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
2880  Op->Bitfield.LSB = LSB;
2881  Op->Bitfield.Width = Width;
2882  Op->StartLoc = S;
2883  Op->EndLoc = E;
2884  return Op;
2885  }
2886 
2887  static std::unique_ptr<ARMOperand>
2888  CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
2889  SMLoc StartLoc, SMLoc EndLoc) {
2890  assert(Regs.size() > 0 && "RegList contains no registers?");
2891  KindTy Kind = k_RegisterList;
2892 
2893  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
2894  Kind = k_DPRRegisterList;
2895  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2896  contains(Regs.front().second))
2897  Kind = k_SPRRegisterList;
2898 
2899  // Sort based on the register encoding values.
2900  array_pod_sort(Regs.begin(), Regs.end());
2901 
2902  auto Op = make_unique<ARMOperand>(Kind);
2903  for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator
2904  I = Regs.begin(), E = Regs.end(); I != E; ++I)
2905  Op->Registers.push_back(I->second);
2906  Op->StartLoc = StartLoc;
2907  Op->EndLoc = EndLoc;
2908  return Op;
2909  }
2910 
2911  static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
2912  unsigned Count,
2913  bool isDoubleSpaced,
2914  SMLoc S, SMLoc E) {
2915  auto Op = make_unique<ARMOperand>(k_VectorList);
2916  Op->VectorList.RegNum = RegNum;
2917  Op->VectorList.Count = Count;
2918  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2919  Op->StartLoc = S;
2920  Op->EndLoc = E;
2921  return Op;
2922  }
2923 
2924  static std::unique_ptr<ARMOperand>
2925  CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
2926  SMLoc S, SMLoc E) {
2927  auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
2928  Op->VectorList.RegNum = RegNum;
2929  Op->VectorList.Count = Count;
2930  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2931  Op->StartLoc = S;
2932  Op->EndLoc = E;
2933  return Op;
2934  }
2935 
2936  static std::unique_ptr<ARMOperand>
2937  CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
2938  bool isDoubleSpaced, SMLoc S, SMLoc E) {
2939  auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
2940  Op->VectorList.RegNum = RegNum;
2941  Op->VectorList.Count = Count;
2942  Op->VectorList.LaneIndex = Index;
2943  Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2944  Op->StartLoc = S;
2945  Op->EndLoc = E;
2946  return Op;
2947  }
2948 
2949  static std::unique_ptr<ARMOperand>
2950  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2951  auto Op = make_unique<ARMOperand>(k_VectorIndex);
2952  Op->VectorIndex.Val = Idx;
2953  Op->StartLoc = S;
2954  Op->EndLoc = E;
2955  return Op;
2956  }
2957 
2958  static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
2959  SMLoc E) {
2960  auto Op = make_unique<ARMOperand>(k_Immediate);
2961  Op->Imm.Val = Val;
2962  Op->StartLoc = S;
2963  Op->EndLoc = E;
2964  return Op;
2965  }
2966 
2967  static std::unique_ptr<ARMOperand>
2968  CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
2969  unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
2970  unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
2971  SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
2972  auto Op = make_unique<ARMOperand>(k_Memory);
2973  Op->Memory.BaseRegNum = BaseRegNum;
2974  Op->Memory.OffsetImm = OffsetImm;
2975  Op->Memory.OffsetRegNum = OffsetRegNum;
2976  Op->Memory.ShiftType = ShiftType;
2977  Op->Memory.ShiftImm = ShiftImm;
2978  Op->Memory.Alignment = Alignment;
2979  Op->Memory.isNegative = isNegative;
2980  Op->StartLoc = S;
2981  Op->EndLoc = E;
2982  Op->AlignmentLoc = AlignmentLoc;
2983  return Op;
2984  }
2985 
2986  static std::unique_ptr<ARMOperand>
2987  CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
2988  unsigned ShiftImm, SMLoc S, SMLoc E) {
2989  auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
2990  Op->PostIdxReg.RegNum = RegNum;
2991  Op->PostIdxReg.isAdd = isAdd;
2992  Op->PostIdxReg.ShiftTy = ShiftTy;
2993  Op->PostIdxReg.ShiftImm = ShiftImm;
2994  Op->StartLoc = S;
2995  Op->EndLoc = E;
2996  return Op;
2997  }
2998 
2999  static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3000  SMLoc S) {
3001  auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
3002  Op->MBOpt.Val = Opt;
3003  Op->StartLoc = S;
3004  Op->EndLoc = S;
3005  return Op;
3006  }
3007 
3008  static std::unique_ptr<ARMOperand>
3009  CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3010  auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3011  Op->ISBOpt.Val = Opt;
3012  Op->StartLoc = S;
3013  Op->EndLoc = S;
3014  return Op;
3015  }
3016 
3017  static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3018  SMLoc S) {
3019  auto Op = make_unique<ARMOperand>(k_ProcIFlags);
3020  Op->IFlags.Val = IFlags;
3021  Op->StartLoc = S;
3022  Op->EndLoc = S;
3023  return Op;
3024  }
3025 
3026  static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3027  auto Op = make_unique<ARMOperand>(k_MSRMask);
3028  Op->MMask.Val = MMask;
3029  Op->StartLoc = S;
3030  Op->EndLoc = S;
3031  return Op;
3032  }
3033 
3034  static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3035  auto Op = make_unique<ARMOperand>(k_BankedReg);
3036  Op->BankedReg.Val = Reg;
3037  Op->StartLoc = S;
3038  Op->EndLoc = S;
3039  return Op;
3040  }
3041 };
3042 
3043 } // end anonymous namespace.
3044 
3045 void ARMOperand::print(raw_ostream &OS) const {
3046  switch (Kind) {
3047  case k_CondCode:
3048  OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3049  break;
3050  case k_CCOut:
3051  OS << "<ccout " << getReg() << ">";
3052  break;
3053  case k_ITCondMask: {
3054  static const char *const MaskStr[] = {
3055  "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
3056  "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
3057  };
3058  assert((ITMask.Mask & 0xf) == ITMask.Mask);
3059  OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3060  break;
3061  }
3062  case k_CoprocNum:
3063  OS << "<coprocessor number: " << getCoproc() << ">";
3064  break;
3065  case k_CoprocReg:
3066  OS << "<coprocessor register: " << getCoproc() << ">";
3067  break;
3068  case k_CoprocOption:
3069  OS << "<coprocessor option: " << CoprocOption.Val << ">";
3070  break;
3071  case k_MSRMask:
3072  OS << "<mask: " << getMSRMask() << ">";
3073  break;
3074  case k_BankedReg:
3075  OS << "<banked reg: " << getBankedReg() << ">";
3076  break;
3077  case k_Immediate:
3078  OS << *getImm();
3079  break;
3080  case k_MemBarrierOpt:
3081  OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3082  break;
3083  case k_InstSyncBarrierOpt:
3084  OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3085  break;
3086  case k_Memory:
3087  OS << "<memory "
3088  << " base:" << Memory.BaseRegNum;
3089  OS << ">";
3090  break;
3091  case k_PostIndexRegister:
3092  OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3093  << PostIdxReg.RegNum;
3094  if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3095  OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3096  << PostIdxReg.ShiftImm;
3097  OS << ">";
3098  break;
3099  case k_ProcIFlags: {
3100  OS << "<ARM_PROC::";
3101  unsigned IFlags = getProcIFlags();
3102  for (int i=2; i >= 0; --i)
3103  if (IFlags & (1 << i))
3104  OS << ARM_PROC::IFlagsToString(1 << i);
3105  OS << ">";
3106  break;
3107  }
3108  case k_Register:
3109  OS << "<register " << getReg() << ">";
3110  break;
3111  case k_ShifterImmediate:
3112  OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3113  << " #" << ShifterImm.Imm << ">";
3114  break;
3115  case k_ShiftedRegister:
3116  OS << "<so_reg_reg "
3117  << RegShiftedReg.SrcReg << " "
3118  << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
3119  << " " << RegShiftedReg.ShiftReg << ">";
3120  break;
3121  case k_ShiftedImmediate:
3122  OS << "<so_reg_imm "
3123  << RegShiftedImm.SrcReg << " "
3124  << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
3125  << " #" << RegShiftedImm.ShiftImm << ">";
3126  break;
3127  case k_RotateImmediate:
3128  OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3129  break;
3130  case k_ModifiedImmediate:
3131  OS << "<mod_imm #" << ModImm.Bits << ", #"
3132  << ModImm.Rot << ")>";
3133  break;
3134  case k_ConstantPoolImmediate:
3135  OS << "<constant_pool_imm #" << *getConstantPoolImm();
3136  break;
3137  case k_BitfieldDescriptor:
3138  OS << "<bitfield " << "lsb: " << Bitfield.LSB
3139  << ", width: " << Bitfield.Width << ">";
3140  break;
3141  case k_RegisterList:
3142  case k_DPRRegisterList:
3143  case k_SPRRegisterList: {
3144  OS << "<register_list ";
3145 
3146  const SmallVectorImpl<unsigned> &RegList = getRegList();
3148  I = RegList.begin(), E = RegList.end(); I != E; ) {
3149  OS << *I;
3150  if (++I < E) OS << ", ";
3151  }
3152 
3153  OS << ">";
3154  break;
3155  }
3156  case k_VectorList:
3157  OS << "<vector_list " << VectorList.Count << " * "
3158  << VectorList.RegNum << ">";
3159  break;
3160  case k_VectorListAllLanes:
3161  OS << "<vector_list(all lanes) " << VectorList.Count << " * "
3162  << VectorList.RegNum << ">";
3163  break;
3164  case k_VectorListIndexed:
3165  OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
3166  << VectorList.Count << " * " << VectorList.RegNum << ">";
3167  break;
3168  case k_Token:
3169  OS << "'" << getToken() << "'";
3170  break;
3171  case k_VectorIndex:
3172  OS << "<vectorindex " << getVectorIndex() << ">";
3173  break;
3174  }
3175 }
3176 
3177 /// @name Auto-generated Match Functions
3178 /// {
3179 
3180 static unsigned MatchRegisterName(StringRef Name);
3181 
3182 /// }
3183 
3184 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
3185  SMLoc &StartLoc, SMLoc &EndLoc) {
3186  const AsmToken &Tok = getParser().getTok();
3187  StartLoc = Tok.getLoc();
3188  EndLoc = Tok.getEndLoc();
3189  RegNo = tryParseRegister();
3190 
3191  return (RegNo == (unsigned)-1);
3192 }
3193 
3194 /// Try to parse a register name. The token must be an Identifier when called,
3195 /// and if it is a register name the token is eaten and the register number is
3196 /// returned. Otherwise return -1.
3197 int ARMAsmParser::tryParseRegister() {
3198  MCAsmParser &Parser = getParser();
3199  const AsmToken &Tok = Parser.getTok();
3200  if (Tok.isNot(AsmToken::Identifier)) return -1;
3201 
3202  std::string lowerCase = Tok.getString().lower();
3203  unsigned RegNum = MatchRegisterName(lowerCase);
3204  if (!RegNum) {
3205  RegNum = StringSwitch<unsigned>(lowerCase)
3206  .Case("r13", ARM::SP)
3207  .Case("r14", ARM::LR)
3208  .Case("r15", ARM::PC)
3209  .Case("ip", ARM::R12)
3210  // Additional register name aliases for 'gas' compatibility.
3211  .Case("a1", ARM::R0)
3212  .Case("a2", ARM::R1)
3213  .Case("a3", ARM::R2)
3214  .Case("a4", ARM::R3)
3215  .Case("v1", ARM::R4)
3216  .Case("v2", ARM::R5)
3217  .Case("v3", ARM::R6)
3218  .Case("v4", ARM::R7)
3219  .Case("v5", ARM::R8)
3220  .Case("v6", ARM::R9)
3221  .Case("v7", ARM::R10)
3222  .Case("v8", ARM::R11)
3223  .Case("sb", ARM::R9)
3224  .Case("sl", ARM::R10)
3225  .Case("fp", ARM::R11)
3226  .Default(0);
3227  }
3228  if (!RegNum) {
3229  // Check for aliases registered via .req. Canonicalize to lower case.
3230  // That's more consistent since register names are case insensitive, and
3231  // it's how the original entry was passed in from MC/MCParser/AsmParser.
3232  StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
3233  // If no match, return failure.
3234  if (Entry == RegisterReqs.end())
3235  return -1;
3236  Parser.Lex(); // Eat identifier token.
3237  return Entry->getValue();
3238  }
3239 
3240  // Some FPUs only have 16 D registers, so D16-D31 are invalid
3241  if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
3242  return -1;
3243 
3244  Parser.Lex(); // Eat identifier token.
3245 
3246  return RegNum;
3247 }
3248 
3249 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
3250 // If a recoverable error occurs, return 1. If an irrecoverable error
3251 // occurs, return -1. An irrecoverable error is one where tokens have been
3252 // consumed in the process of trying to parse the shifter (i.e., when it is
3253 // indeed a shifter operand, but malformed).
3254 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
3255  MCAsmParser &Parser = getParser();
3256  SMLoc S = Parser.getTok().getLoc();
3257  const AsmToken &Tok = Parser.getTok();
3258  if (Tok.isNot(AsmToken::Identifier))
3259  return -1;
3260 
3261  std::string lowerCase = Tok.getString().lower();
3263  .Case("asl", ARM_AM::lsl)
3264  .Case("lsl", ARM_AM::lsl)
3265  .Case("lsr", ARM_AM::lsr)
3266  .Case("asr", ARM_AM::asr)
3267  .Case("ror", ARM_AM::ror)
3268  .Case("rrx", ARM_AM::rrx)
3270 
3271  if (ShiftTy == ARM_AM::no_shift)
3272  return 1;
3273 
3274  Parser.Lex(); // Eat the operator.
3275 
3276  // The source register for the shift has already been added to the
3277  // operand list, so we need to pop it off and combine it into the shifted
3278  // register operand instead.
3279  std::unique_ptr<ARMOperand> PrevOp(
3280  (ARMOperand *)Operands.pop_back_val().release());
3281  if (!PrevOp->isReg())
3282  return Error(PrevOp->getStartLoc(), "shift must be of a register");
3283  int SrcReg = PrevOp->getReg();
3284 
3285  SMLoc EndLoc;
3286  int64_t Imm = 0;
3287  int ShiftReg = 0;
3288  if (ShiftTy == ARM_AM::rrx) {
3289  // RRX Doesn't have an explicit shift amount. The encoder expects
3290  // the shift register to be the same as the source register. Seems odd,
3291  // but OK.
3292  ShiftReg = SrcReg;
3293  } else {
3294  // Figure out if this is shifted by a constant or a register (for non-RRX).
3295  if (Parser.getTok().is(AsmToken::Hash) ||
3296  Parser.getTok().is(AsmToken::Dollar)) {
3297  Parser.Lex(); // Eat hash.
3298  SMLoc ImmLoc = Parser.getTok().getLoc();
3299  const MCExpr *ShiftExpr = nullptr;
3300  if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3301  Error(ImmLoc, "invalid immediate shift value");
3302  return -1;
3303  }
3304  // The expression must be evaluatable as an immediate.
3305  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3306  if (!CE) {
3307  Error(ImmLoc, "invalid immediate shift value");
3308  return -1;
3309  }
3310  // Range check the immediate.
3311  // lsl, ror: 0 <= imm <= 31
3312  // lsr, asr: 0 <= imm <= 32
3313  Imm = CE->getValue();
3314  if (Imm < 0 ||
3315  ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3316  ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3317  Error(ImmLoc, "immediate shift value out of range");
3318  return -1;
3319  }
3320  // shift by zero is a nop. Always send it through as lsl.
3321  // ('as' compatibility)
3322  if (Imm == 0)
3323  ShiftTy = ARM_AM::lsl;
3324  } else if (Parser.getTok().is(AsmToken::Identifier)) {
3325  SMLoc L = Parser.getTok().getLoc();
3326  EndLoc = Parser.getTok().getEndLoc();
3327  ShiftReg = tryParseRegister();
3328  if (ShiftReg == -1) {
3329  Error(L, "expected immediate or register in shift operand");
3330  return -1;
3331  }
3332  } else {
3333  Error(Parser.getTok().getLoc(),
3334  "expected immediate or register in shift operand");
3335  return -1;
3336  }
3337  }
3338 
3339  if (ShiftReg && ShiftTy != ARM_AM::rrx)
3340  Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3341  ShiftReg, Imm,
3342  S, EndLoc));
3343  else
3344  Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3345  S, EndLoc));
3346 
3347  return 0;
3348 }
3349 
3350 /// Try to parse a register name. The token must be an Identifier when called.
3351 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
3352 /// if there is a "writeback". 'true' if it's not a register.
3353 ///
3354 /// TODO this is likely to change to allow different register types and or to
3355 /// parse for a specific register type.
3356 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3357  MCAsmParser &Parser = getParser();
3358  const AsmToken &RegTok = Parser.getTok();
3359  int RegNo = tryParseRegister();
3360  if (RegNo == -1)
3361  return true;
3362 
3363  Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
3364  RegTok.getEndLoc()));
3365 
3366  const AsmToken &ExclaimTok = Parser.getTok();
3367  if (ExclaimTok.is(AsmToken::Exclaim)) {
3368  Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3369  ExclaimTok.getLoc()));
3370  Parser.Lex(); // Eat exclaim token
3371  return false;
3372  }
3373 
3374  // Also check for an index operand. This is only legal for vector registers,
3375  // but that'll get caught OK in operand matching, so we don't need to
3376  // explicitly filter everything else out here.
3377  if (Parser.getTok().is(AsmToken::LBrac)) {
3378  SMLoc SIdx = Parser.getTok().getLoc();
3379  Parser.Lex(); // Eat left bracket token.
3380 
3381  const MCExpr *ImmVal;
3382  if (getParser().parseExpression(ImmVal))
3383  return true;
3384  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3385  if (!MCE)
3386  return TokError("immediate value expected for vector index");
3387 
3388  if (Parser.getTok().isNot(AsmToken::RBrac))
3389  return Error(Parser.getTok().getLoc(), "']' expected");
3390 
3391  SMLoc E = Parser.getTok().getEndLoc();
3392  Parser.Lex(); // Eat right bracket token.
3393 
3394  Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3395  SIdx, E,
3396  getContext()));
3397  }
3398 
3399  return false;
3400 }
3401 
3402 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
3403 /// instruction with a symbolic operand name.
3404 /// We accept "crN" syntax for GAS compatibility.
3405 /// <operand-name> ::= <prefix><number>
3406 /// If CoprocOp is 'c', then:
3407 /// <prefix> ::= c | cr
3408 /// If CoprocOp is 'p', then :
3409 /// <prefix> ::= p
3410 /// <number> ::= integer in range [0, 15]
3411 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3412  // Use the same layout as the tablegen'erated register name matcher. Ugly,
3413  // but efficient.
3414  if (Name.size() < 2 || Name[0] != CoprocOp)
3415  return -1;
3416  Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3417 
3418  switch (Name.size()) {
3419  default: return -1;
3420  case 1:
3421  switch (Name[0]) {
3422  default: return -1;
3423  case '0': return 0;
3424  case '1': return 1;
3425  case '2': return 2;
3426  case '3': return 3;
3427  case '4': return 4;
3428  case '5': return 5;
3429  case '6': return 6;
3430  case '7': return 7;
3431  case '8': return 8;
3432  case '9': return 9;
3433  }
3434  case 2:
3435  if (Name[0] != '1')
3436  return -1;
3437  switch (Name[1]) {
3438  default: return -1;
3439  // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3440  // However, old cores (v5/v6) did use them in that way.
3441  case '0': return 10;
3442  case '1': return 11;
3443  case '2': return 12;
3444  case '3': return 13;
3445  case '4': return 14;
3446  case '5': return 15;
3447  }
3448  }
3449 }
3450 
3451 /// parseITCondCode - Try to parse a condition code for an IT instruction.
3453 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3454  MCAsmParser &Parser = getParser();
3455  SMLoc S = Parser.getTok().getLoc();
3456  const AsmToken &Tok = Parser.getTok();
3457  if (!Tok.is(AsmToken::Identifier))
3458  return MatchOperand_NoMatch;
3459  unsigned CC = ARMCondCodeFromString(Tok.getString());
3460  if (CC == ~0U)
3461  return MatchOperand_NoMatch;
3462  Parser.Lex(); // Eat the token.
3463 
3464  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3465 
3466  return MatchOperand_Success;
3467 }
3468 
3469 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3470 /// token must be an Identifier when called, and if it is a coprocessor
3471 /// number, the token is eaten and the operand is added to the operand list.
3473 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3474  MCAsmParser &Parser = getParser();
3475  SMLoc S = Parser.getTok().getLoc();
3476  const AsmToken &Tok = Parser.getTok();
3477  if (Tok.isNot(AsmToken::Identifier))
3478  return MatchOperand_NoMatch;
3479 
3480  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
3481  if (Num == -1)
3482  return MatchOperand_NoMatch;
3483  // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3484  if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3485  return MatchOperand_NoMatch;
3486 
3487  Parser.Lex(); // Eat identifier token.
3488  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3489  return MatchOperand_Success;
3490 }
3491 
3492 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3493 /// token must be an Identifier when called, and if it is a coprocessor
3494 /// number, the token is eaten and the operand is added to the operand list.
3496 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3497  MCAsmParser &Parser = getParser();
3498  SMLoc S = Parser.getTok().getLoc();
3499  const AsmToken &Tok = Parser.getTok();
3500  if (Tok.isNot(AsmToken::Identifier))
3501  return MatchOperand_NoMatch;
3502 
3503  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
3504  if (Reg == -1)
3505  return MatchOperand_NoMatch;
3506 
3507  Parser.Lex(); // Eat identifier token.
3508  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3509  return MatchOperand_Success;
3510 }
3511 
3512 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3513 /// coproc_option : '{' imm0_255 '}'
3515 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3516  MCAsmParser &Parser = getParser();
3517  SMLoc S = Parser.getTok().getLoc();
3518 
3519  // If this isn't a '{', this isn't a coprocessor immediate operand.
3520  if (Parser.getTok().isNot(AsmToken::LCurly))
3521  return MatchOperand_NoMatch;
3522  Parser.Lex(); // Eat the '{'
3523 
3524  const MCExpr *Expr;
3525  SMLoc Loc = Parser.getTok().getLoc();
3526  if (getParser().parseExpression(Expr)) {
3527  Error(Loc, "illegal expression");
3528  return MatchOperand_ParseFail;
3529  }
3530  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3531  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3532  Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3533  return MatchOperand_ParseFail;
3534  }
3535  int Val = CE->getValue();
3536 
3537  // Check for and consume the closing '}'
3538  if (Parser.getTok().isNot(AsmToken::RCurly))
3539  return MatchOperand_ParseFail;
3540  SMLoc E = Parser.getTok().getEndLoc();
3541  Parser.Lex(); // Eat the '}'
3542 
3543  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3544  return MatchOperand_Success;
3545 }
3546 
3547 // For register list parsing, we need to map from raw GPR register numbering
3548 // to the enumeration values. The enumeration values aren't sorted by
3549 // register number due to our using "sp", "lr" and "pc" as canonical names.
3550 static unsigned getNextRegister(unsigned Reg) {
3551  // If this is a GPR, we need to do it manually, otherwise we can rely
3552  // on the sort ordering of the enumeration since the other reg-classes
3553  // are sane.
3554  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3555  return Reg + 1;
3556  switch(Reg) {
3557  default: llvm_unreachable("Invalid GPR number!");
3558  case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
3559  case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
3560  case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
3561  case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
3562  case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
3563  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3564  case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
3565  case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
3566  }
3567 }
3568 
3569 /// Parse a register list.
3570 bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
3571  MCAsmParser &Parser = getParser();
3572  if (Parser.getTok().isNot(AsmToken::LCurly))
3573  return TokError("Token is not a Left Curly Brace");
3574  SMLoc S = Parser.getTok().getLoc();
3575  Parser.Lex(); // Eat '{' token.
3576  SMLoc RegLoc = Parser.getTok().getLoc();
3577 
3578  // Check the first register in the list to see what register class
3579  // this is a list of.
3580  int Reg = tryParseRegister();
3581  if (Reg == -1)
3582  return Error(RegLoc, "register expected");
3583 
3584  // The reglist instructions have at most 16 registers, so reserve
3585  // space for that many.
3586  int EReg = 0;
3588 
3589  // Allow Q regs and just interpret them as the two D sub-registers.
3590  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3591  Reg = getDRegFromQReg(Reg);
3592  EReg = MRI->getEncodingValue(Reg);
3593  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3594  ++Reg;
3595  }
3596  const MCRegisterClass *RC;
3597  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3598  RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3599  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3600  RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3601  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3602  RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3603  else
3604  return Error(RegLoc, "invalid register in register list");
3605 
3606  // Store the register.
3607  EReg = MRI->getEncodingValue(Reg);
3608  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3609 
3610  // This starts immediately after the first register token in the list,
3611  // so we can see either a comma or a minus (range separator) as a legal
3612  // next token.
3613  while (Parser.getTok().is(AsmToken::Comma) ||
3614  Parser.getTok().is(AsmToken::Minus)) {
3615  if (Parser.getTok().is(AsmToken::Minus)) {
3616  Parser.Lex(); // Eat the minus.
3617  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3618  int EndReg = tryParseRegister();
3619  if (EndReg == -1)
3620  return Error(AfterMinusLoc, "register expected");
3621  // Allow Q regs and just interpret them as the two D sub-registers.
3622  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3623  EndReg = getDRegFromQReg(EndReg) + 1;
3624  // If the register is the same as the start reg, there's nothing
3625  // more to do.
3626  if (Reg == EndReg)
3627  continue;
3628  // The register must be in the same register class as the first.
3629  if (!RC->contains(EndReg))
3630  return Error(AfterMinusLoc, "invalid register in register list");
3631  // Ranges must go from low to high.
3632  if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3633  return Error(AfterMinusLoc, "bad range in register list");
3634 
3635  // Add all the registers in the range to the register list.
3636  while (Reg != EndReg) {
3637  Reg = getNextRegister(Reg);
3638  EReg = MRI->getEncodingValue(Reg);
3639  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3640  }
3641  continue;
3642  }
3643  Parser.Lex(); // Eat the comma.
3644  RegLoc = Parser.getTok().getLoc();
3645  int OldReg = Reg;
3646  const AsmToken RegTok = Parser.getTok();
3647  Reg = tryParseRegister();
3648  if (Reg == -1)
3649  return Error(RegLoc, "register expected");
3650  // Allow Q regs and just interpret them as the two D sub-registers.
3651  bool isQReg = false;
3652  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3653  Reg = getDRegFromQReg(Reg);
3654  isQReg = true;
3655  }
3656  // The register must be in the same register class as the first.
3657  if (!RC->contains(Reg))
3658  return Error(RegLoc, "invalid register in register list");
3659  // List must be monotonically increasing.
3660  if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3661  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3662  Warning(RegLoc, "register list not in ascending order");
3663  else
3664  return Error(RegLoc, "register list not in ascending order");
3665  }
3666  if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3667  Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3668  ") in register list");
3669  continue;
3670  }
3671  // VFP register lists must also be contiguous.
3672  if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3673  Reg != OldReg + 1)
3674  return Error(RegLoc, "non-contiguous register range");
3675  EReg = MRI->getEncodingValue(Reg);
3676  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3677  if (isQReg) {
3678  EReg = MRI->getEncodingValue(++Reg);
3679  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3680  }
3681  }
3682 
3683  if (Parser.getTok().isNot(AsmToken::RCurly))
3684  return Error(Parser.getTok().getLoc(), "'}' expected");
3685  SMLoc E = Parser.getTok().getEndLoc();
3686  Parser.Lex(); // Eat '}' token.
3687 
3688  // Push the register list operand.
3689  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3690 
3691  // The ARM system instruction variants for LDM/STM have a '^' token here.
3692  if (Parser.getTok().is(AsmToken::Caret)) {
3693  Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3694  Parser.Lex(); // Eat '^' token.
3695  }
3696 
3697  return false;
3698 }
3699 
3700 // Helper function to parse the lane index for vector lists.
3701 OperandMatchResultTy ARMAsmParser::
3702 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3703  MCAsmParser &Parser = getParser();
3704  Index = 0; // Always return a defined index value.
3705  if (Parser.getTok().is(AsmToken::LBrac)) {
3706  Parser.Lex(); // Eat the '['.
3707  if (Parser.getTok().is(AsmToken::RBrac)) {
3708  // "Dn[]" is the 'all lanes' syntax.
3709  LaneKind = AllLanes;
3710  EndLoc = Parser.getTok().getEndLoc();
3711  Parser.Lex(); // Eat the ']'.
3712  return MatchOperand_Success;
3713  }
3714 
3715  // There's an optional '#' token here. Normally there wouldn't be, but
3716  // inline assemble puts one in, and it's friendly to accept that.
3717  if (Parser.getTok().is(AsmToken::Hash))
3718  Parser.Lex(); // Eat '#' or '$'.
3719 
3720  const MCExpr *LaneIndex;
3721  SMLoc Loc = Parser.getTok().getLoc();
3722  if (getParser().parseExpression(LaneIndex)) {
3723  Error(Loc, "illegal expression");
3724  return MatchOperand_ParseFail;
3725  }
3726  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3727  if (!CE) {
3728  Error(Loc, "lane index must be empty or an integer");
3729  return MatchOperand_ParseFail;
3730  }
3731  if (Parser.getTok().isNot(AsmToken::RBrac)) {
3732  Error(Parser.getTok().getLoc(), "']' expected");
3733  return MatchOperand_ParseFail;
3734  }
3735  EndLoc = Parser.getTok().getEndLoc();
3736  Parser.Lex(); // Eat the ']'.
3737  int64_t Val = CE->getValue();
3738 
3739  // FIXME: Make this range check context sensitive for .8, .16, .32.
3740  if (Val < 0 || Val > 7) {
3741  Error(Parser.getTok().getLoc(), "lane index out of range");
3742  return MatchOperand_ParseFail;
3743  }
3744  Index = Val;
3745  LaneKind = IndexedLane;
3746  return MatchOperand_Success;
3747  }
3748  LaneKind = NoLanes;
3749  return MatchOperand_Success;
3750 }
3751 
3752 // parse a vector register list
3754 ARMAsmParser::parseVectorList(OperandVector &Operands) {
3755  MCAsmParser &Parser = getParser();
3756  VectorLaneTy LaneKind;
3757  unsigned LaneIndex;
3758  SMLoc S = Parser.getTok().getLoc();
3759  // As an extension (to match gas), support a plain D register or Q register
3760  // (without encosing curly braces) as a single or double entry list,
3761  // respectively.
3762  if (Parser.getTok().is(AsmToken::Identifier)) {
3763  SMLoc E = Parser.getTok().getEndLoc();
3764  int Reg = tryParseRegister();
3765  if (Reg == -1)
3766  return MatchOperand_NoMatch;
3767  if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3768  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3769  if (Res != MatchOperand_Success)
3770  return Res;
3771  switch (LaneKind) {
3772  case NoLanes:
3773  Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3774  break;
3775  case AllLanes:
3776  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3777  S, E));
3778  break;
3779  case IndexedLane:
3780  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3781  LaneIndex,
3782  false, S, E));
3783  break;
3784  }
3785  return MatchOperand_Success;
3786  }
3787  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3788  Reg = getDRegFromQReg(Reg);
3789  OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3790  if (Res != MatchOperand_Success)
3791  return Res;
3792  switch (LaneKind) {
3793  case NoLanes:
3794  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3795  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3796  Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3797  break;
3798  case AllLanes:
3799  Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3800  &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3801  Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3802  S, E));
3803  break;
3804  case IndexedLane:
3805  Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3806  LaneIndex,
3807  false, S, E));
3808  break;
3809  }
3810  return MatchOperand_Success;
3811  }
3812  Error(S, "vector register expected");
3813  return MatchOperand_ParseFail;
3814  }
3815 
3816  if (Parser.getTok().isNot(AsmToken::LCurly))
3817  return MatchOperand_NoMatch;
3818 
3819  Parser.Lex(); // Eat '{' token.
3820  SMLoc RegLoc = Parser.getTok().getLoc();
3821 
3822  int Reg = tryParseRegister();
3823  if (Reg == -1) {
3824  Error(RegLoc, "register expected");
3825  return MatchOperand_ParseFail;
3826  }
3827  unsigned Count = 1;
3828  int Spacing = 0;
3829  unsigned FirstReg = Reg;
3830  // The list is of D registers, but we also allow Q regs and just interpret
3831  // them as the two D sub-registers.
3832  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3833  FirstReg = Reg = getDRegFromQReg(Reg);
3834  Spacing = 1; // double-spacing requires explicit D registers, otherwise
3835  // it's ambiguous with four-register single spaced.
3836  ++Reg;
3837  ++Count;
3838  }
3839 
3840  SMLoc E;
3841  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3842  return MatchOperand_ParseFail;
3843 
3844  while (Parser.getTok().is(AsmToken::Comma) ||
3845  Parser.getTok().is(AsmToken::Minus)) {
3846  if (Parser.getTok().is(AsmToken::Minus)) {
3847  if (!Spacing)
3848  Spacing = 1; // Register range implies a single spaced list.
3849  else if (Spacing == 2) {
3850  Error(Parser.getTok().getLoc(),
3851  "sequential registers in double spaced list");
3852  return MatchOperand_ParseFail;
3853  }
3854  Parser.Lex(); // Eat the minus.
3855  SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3856  int EndReg = tryParseRegister();
3857  if (EndReg == -1) {
3858  Error(AfterMinusLoc, "register expected");
3859  return MatchOperand_ParseFail;
3860  }
3861  // Allow Q regs and just interpret them as the two D sub-registers.
3862  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3863  EndReg = getDRegFromQReg(EndReg) + 1;
3864  // If the register is the same as the start reg, there's nothing
3865  // more to do.
3866  if (Reg == EndReg)
3867  continue;
3868  // The register must be in the same register class as the first.
3869  if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3870  Error(AfterMinusLoc, "invalid register in register list");
3871  return MatchOperand_ParseFail;
3872  }
3873  // Ranges must go from low to high.
3874  if (Reg > EndReg) {
3875  Error(AfterMinusLoc, "bad range in register list");
3876  return MatchOperand_ParseFail;
3877  }
3878  // Parse the lane specifier if present.
3879  VectorLaneTy NextLaneKind;
3880  unsigned NextLaneIndex;
3881  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3883  return MatchOperand_ParseFail;
3884  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3885  Error(AfterMinusLoc, "mismatched lane index in register list");
3886  return MatchOperand_ParseFail;
3887  }
3888 
3889  // Add all the registers in the range to the register list.
3890  Count += EndReg - Reg;
3891  Reg = EndReg;
3892  continue;
3893  }
3894  Parser.Lex(); // Eat the comma.
3895  RegLoc = Parser.getTok().getLoc();
3896  int OldReg = Reg;
3897  Reg = tryParseRegister();
3898  if (Reg == -1) {
3899  Error(RegLoc, "register expected");
3900  return MatchOperand_ParseFail;
3901  }
3902  // vector register lists must be contiguous.
3903  // It's OK to use the enumeration values directly here rather, as the
3904  // VFP register classes have the enum sorted properly.
3905  //
3906  // The list is of D registers, but we also allow Q regs and just interpret
3907  // them as the two D sub-registers.
3908  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3909  if (!Spacing)
3910  Spacing = 1; // Register range implies a single spaced list.
3911  else if (Spacing == 2) {
3912  Error(RegLoc,
3913  "invalid register in double-spaced list (must be 'D' register')");
3914  return MatchOperand_ParseFail;
3915  }
3916  Reg = getDRegFromQReg(Reg);
3917  if (Reg != OldReg + 1) {
3918  Error(RegLoc, "non-contiguous register range");
3919  return MatchOperand_ParseFail;
3920  }
3921  ++Reg;
3922  Count += 2;
3923  // Parse the lane specifier if present.
3924  VectorLaneTy NextLaneKind;
3925  unsigned NextLaneIndex;
3926  SMLoc LaneLoc = Parser.getTok().getLoc();
3927  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3929  return MatchOperand_ParseFail;
3930  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3931  Error(LaneLoc, "mismatched lane index in register list");
3932  return MatchOperand_ParseFail;
3933  }
3934  continue;
3935  }
3936  // Normal D register.
3937  // Figure out the register spacing (single or double) of the list if
3938  // we don't know it already.
3939  if (!Spacing)
3940  Spacing = 1 + (Reg == OldReg + 2);
3941 
3942  // Just check that it's contiguous and keep going.
3943  if (Reg != OldReg + Spacing) {
3944  Error(RegLoc, "non-contiguous register range");
3945  return MatchOperand_ParseFail;
3946  }
3947  ++Count;
3948  // Parse the lane specifier if present.
3949  VectorLaneTy NextLaneKind;
3950  unsigned NextLaneIndex;
3951  SMLoc EndLoc = Parser.getTok().getLoc();
3952  if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3953  return MatchOperand_ParseFail;
3954  if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3955  Error(EndLoc, "mismatched lane index in register list");
3956  return MatchOperand_ParseFail;
3957  }
3958  }
3959 
3960  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3961  Error(Parser.getTok().getLoc(), "'}' expected");
3962  return MatchOperand_ParseFail;
3963  }
3964  E = Parser.getTok().getEndLoc();
3965  Parser.Lex(); // Eat '}' token.
3966 
3967  switch (LaneKind) {
3968  case NoLanes:
3969  // Two-register operands have been converted to the
3970  // composite register classes.
3971  if (Count == 2) {
3972  const MCRegisterClass *RC = (Spacing == 1) ?
3973  &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3974  &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3975  FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3976  }
3977  Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3978  (Spacing == 2), S, E));
3979  break;
3980  case AllLanes:
3981  // Two-register operands have been converted to the
3982  // composite register classes.
3983  if (Count == 2) {
3984  const MCRegisterClass *RC = (Spacing == 1) ?
3985  &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3986  &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3987  FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3988  }
3989  Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3990  (Spacing == 2),
3991  S, E));
3992  break;
3993  case IndexedLane:
3994  Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3995  LaneIndex,
3996  (Spacing == 2),
3997  S, E));
3998  break;
3999  }
4000  return MatchOperand_Success;
4001 }
4002 
4003 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4005 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4006  MCAsmParser &Parser = getParser();
4007  SMLoc S = Parser.getTok().getLoc();
4008  const AsmToken &Tok = Parser.getTok();
4009  unsigned Opt;
4010 
4011  if (Tok.is(AsmToken::Identifier)) {
4012  StringRef OptStr = Tok.getString();
4013 
4014  Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4015  .Case("sy", ARM_MB::SY)
4016  .Case("st", ARM_MB::ST)
4017  .Case("ld", ARM_MB::LD)
4018  .Case("sh", ARM_MB::ISH)
4019  .Case("ish", ARM_MB::ISH)
4020  .Case("shst", ARM_MB::ISHST)
4021  .Case("ishst", ARM_MB::ISHST)
4022  .Case("ishld", ARM_MB::ISHLD)
4023  .Case("nsh", ARM_MB::NSH)
4024  .Case("un", ARM_MB::NSH)
4025  .Case("nshst", ARM_MB::NSHST)
4026  .Case("nshld", ARM_MB::NSHLD)
4027  .Case("unst", ARM_MB::NSHST)
4028  .Case("osh", ARM_MB::OSH)
4029  .Case("oshst", ARM_MB::OSHST)
4030  .Case("oshld", ARM_MB::OSHLD)
4031  .Default(~0U);
4032 
4033  // ishld, oshld, nshld and ld are only available from ARMv8.
4034  if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4035  Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4036  Opt = ~0U;
4037 
4038  if (Opt == ~0U)
4039  return MatchOperand_NoMatch;
4040 
4041  Parser.Lex(); // Eat identifier token.
4042  } else if (Tok.is(AsmToken::Hash) ||
4043  Tok.is(AsmToken::Dollar) ||
4044  Tok.is(AsmToken::Integer)) {
4045  if (Parser.getTok().isNot(AsmToken::Integer))
4046  Parser.Lex(); // Eat '#' or '$'.
4047  SMLoc Loc = Parser.getTok().getLoc();
4048 
4049  const MCExpr *MemBarrierID;
4050  if (getParser().parseExpression(MemBarrierID)) {
4051  Error(Loc, "illegal expression");
4052  return MatchOperand_ParseFail;
4053  }
4054 
4055  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4056  if (!CE) {
4057  Error(Loc, "constant expression expected");
4058  return MatchOperand_ParseFail;
4059  }
4060 
4061  int Val = CE->getValue();
4062  if (Val & ~0xf) {
4063  Error(Loc, "immediate value out of range");
4064  return MatchOperand_ParseFail;
4065  }
4066 
4067  Opt = ARM_MB::RESERVED_0 + Val;
4068  } else
4069  return MatchOperand_ParseFail;
4070 
4071  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4072  return MatchOperand_Success;
4073 }
4074 
4075 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4077 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4078  MCAsmParser &Parser = getParser();
4079  SMLoc S = Parser.getTok().getLoc();
4080  const AsmToken &Tok = Parser.getTok();
4081  unsigned Opt;
4082 
4083  if (Tok.is(AsmToken::Identifier)) {
4084  StringRef OptStr = Tok.getString();
4085 
4086  if (OptStr.equals_lower("sy"))
4087  Opt = ARM_ISB::SY;
4088  else
4089  return MatchOperand_NoMatch;
4090 
4091  Parser.Lex(); // Eat identifier token.
4092  } else if (Tok.is(AsmToken::Hash) ||
4093  Tok.is(AsmToken::Dollar) ||
4094  Tok.is(AsmToken::Integer)) {
4095  if (Parser.getTok().isNot(AsmToken::Integer))
4096  Parser.Lex(); // Eat '#' or '$'.
4097  SMLoc Loc = Parser.getTok().getLoc();
4098 
4099  const MCExpr *ISBarrierID;
4100  if (getParser().parseExpression(ISBarrierID)) {
4101  Error(Loc, "illegal expression");
4102  return MatchOperand_ParseFail;
4103  }
4104 
4105  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
4106  if (!CE) {
4107  Error(Loc, "constant expression expected");
4108  return MatchOperand_ParseFail;
4109  }
4110 
4111  int Val = CE->getValue();
4112  if (Val & ~0xf) {
4113  Error(Loc, "immediate value out of range");
4114  return MatchOperand_ParseFail;
4115  }
4116 
4117  Opt = ARM_ISB::RESERVED_0 + Val;
4118  } else
4119  return MatchOperand_ParseFail;
4120 
4121  Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
4122  (ARM_ISB::InstSyncBOpt)Opt, S));
4123  return MatchOperand_Success;
4124 }
4125 
4126 
4127 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
4129 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
4130  MCAsmParser &Parser = getParser();
4131  SMLoc S = Parser.getTok().getLoc();
4132  const AsmToken &Tok = Parser.getTok();
4133  if (!Tok.is(AsmToken::Identifier))
4134  return MatchOperand_NoMatch;
4135  StringRef IFlagsStr = Tok.getString();
4136 
4137  // An iflags string of "none" is interpreted to mean that none of the AIF
4138  // bits are set. Not a terribly useful instruction, but a valid encoding.
4139  unsigned IFlags = 0;
4140  if (IFlagsStr != "none") {
4141  for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
4142  unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
4143  .Case("a", ARM_PROC::A)
4144  .Case("i", ARM_PROC::I)
4145  .Case("f", ARM_PROC::F)
4146  .Default(~0U);
4147 
4148  // If some specific iflag is already set, it means that some letter is
4149  // present more than once, this is not acceptable.
4150  if (Flag == ~0U || (IFlags & Flag))
4151  return MatchOperand_NoMatch;
4152 
4153  IFlags |= Flag;
4154  }
4155  }
4156 
4157  Parser.Lex(); // Eat identifier token.
4158  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
4159  return MatchOperand_Success;
4160 }
4161 
4162 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
4164 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
4165  MCAsmParser &Parser = getParser();
4166  SMLoc S = Parser.getTok().getLoc();
4167  const AsmToken &Tok = Parser.getTok();
4168  if (!Tok.is(AsmToken::Identifier))
4169  return MatchOperand_NoMatch;
4170  StringRef Mask = Tok.getString();
4171 
4172  if (isMClass()) {
4173  auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
4174  if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
4175  return MatchOperand_NoMatch;
4176 
4177  unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
4178 
4179  Parser.Lex(); // Eat identifier token.
4180  Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4181  return MatchOperand_Success;
4182  }
4183 
4184  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
4185  size_t Start = 0, Next = Mask.find('_');
4186  StringRef Flags = "";
4187  std::string SpecReg = Mask.slice(Start, Next).lower();
4188  if (Next != StringRef::npos)
4189  Flags = Mask.slice(Next+1, Mask.size());
4190 
4191  // FlagsVal contains the complete mask:
4192  // 3-0: Mask
4193  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4194  unsigned FlagsVal = 0;
4195 
4196  if (SpecReg == "apsr") {
4197  FlagsVal = StringSwitch<unsigned>(Flags)
4198  .Case("nzcvq", 0x8) // same as CPSR_f
4199  .Case("g", 0x4) // same as CPSR_s
4200  .Case("nzcvqg", 0xc) // same as CPSR_fs
4201  .Default(~0U);
4202 
4203  if (FlagsVal == ~0U) {
4204  if (!Flags.empty())
4205  return MatchOperand_NoMatch;
4206  else
4207  FlagsVal = 8; // No flag
4208  }
4209  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4210  // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4211  if (Flags == "all" || Flags == "")
4212  Flags = "fc";
4213  for (int i = 0, e = Flags.size(); i != e; ++i) {
4214  unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4215  .Case("c", 1)
4216  .Case("x", 2)
4217  .Case("s", 4)
4218  .Case("f", 8)
4219  .Default(~0U);
4220 
4221  // If some specific flag is already set, it means that some letter is
4222  // present more than once, this is not acceptable.
4223  if (Flag == ~0U || (FlagsVal & Flag))
4224  return MatchOperand_NoMatch;
4225  FlagsVal |= Flag;
4226  }
4227  } else // No match for special register.
4228  return MatchOperand_NoMatch;
4229 
4230  // Special register without flags is NOT equivalent to "fc" flags.
4231  // NOTE: This is a divergence from gas' behavior. Uncommenting the following
4232  // two lines would enable gas compatibility at the expense of breaking
4233  // round-tripping.
4234  //
4235  // if (!FlagsVal)
4236  // FlagsVal = 0x9;
4237 
4238  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4239  if (SpecReg == "spsr")
4240  FlagsVal |= 16;
4241 
4242  Parser.Lex(); // Eat identifier token.
4243  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4244  return MatchOperand_Success;
4245 }
4246 
4247 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4248 /// use in the MRS/MSR instructions added to support virtualization.
4250 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4251  MCAsmParser &Parser = getParser();
4252  SMLoc S = Parser.getTok().getLoc();
4253  const AsmToken &Tok = Parser.getTok();
4254  if (!Tok.is(AsmToken::Identifier))
4255  return MatchOperand_NoMatch;
4256  StringRef RegName = Tok.getString();
4257 
4258  auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
4259  if (!TheReg)
4260  return MatchOperand_NoMatch;
4261  unsigned Encoding = TheReg->Encoding;
4262 
4263  Parser.Lex(); // Eat identifier token.
4264  Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4265  return MatchOperand_Success;
4266 }
4267 
4269 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4270  int High) {
4271  MCAsmParser &Parser = getParser();
4272  const AsmToken &Tok = Parser.getTok();
4273  if (Tok.isNot(AsmToken::Identifier)) {
4274  Error(Parser.getTok().getLoc(), Op + " operand expected.");
4275  return MatchOperand_ParseFail;
4276  }
4277  StringRef ShiftName = Tok.getString();
4278  std::string LowerOp = Op.lower();
4279  std::string UpperOp = Op.upper();
4280  if (ShiftName != LowerOp && ShiftName != UpperOp) {
4281  Error(Parser.getTok().getLoc(), Op + " operand expected.");
4282  return MatchOperand_ParseFail;
4283  }
4284  Parser.Lex(); // Eat shift type token.
4285 
4286  // There must be a '#' and a shift amount.
4287  if (Parser.getTok().isNot(AsmToken::Hash) &&
4288  Parser.getTok().isNot(AsmToken::Dollar)) {
4289  Error(Parser.getTok().getLoc(), "'#' expected");
4290  return MatchOperand_ParseFail;
4291  }
4292  Parser.Lex(); // Eat hash token.
4293 
4294  const MCExpr *ShiftAmount;
4295  SMLoc Loc = Parser.getTok().getLoc();
4296  SMLoc EndLoc;
4297  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4298  Error(Loc, "illegal expression");
4299  return MatchOperand_ParseFail;
4300  }
4301  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4302  if (!CE) {
4303  Error(Loc, "constant expression expected");
4304  return MatchOperand_ParseFail;
4305  }
4306  int Val = CE->getValue();
4307  if (Val < Low || Val > High) {
4308  Error(Loc, "immediate value out of range");
4309  return MatchOperand_ParseFail;
4310  }
4311 
4312  Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4313 
4314  return MatchOperand_Success;
4315 }
4316 
4318 ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4319  MCAsmParser &Parser = getParser();
4320  const AsmToken &Tok = Parser.getTok();
4321  SMLoc S = Tok.getLoc();
4322  if (Tok.isNot(AsmToken::Identifier)) {
4323  Error(S, "'be' or 'le' operand expected");
4324  return MatchOperand_ParseFail;
4325  }
4326  int Val = StringSwitch<int>(Tok.getString().lower())
4327  .Case("be", 1)
4328  .Case("le", 0)
4329  .Default(-1);
4330  Parser.Lex(); // Eat the token.
4331 
4332  if (Val == -1) {
4333  Error(S, "'be' or 'le' operand expected");
4334  return MatchOperand_ParseFail;
4335  }
4336  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
4337  getContext()),
4338  S, Tok.getEndLoc()));
4339  return MatchOperand_Success;
4340 }
4341 
4342 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4343 /// instructions. Legal values are:
4344 /// lsl #n 'n' in [0,31]
4345 /// asr #n 'n' in [1,32]
4346 /// n == 32 encoded as n == 0.
4348 ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4349  MCAsmParser &Parser = getParser();
4350  const AsmToken &Tok = Parser.getTok();
4351  SMLoc S = Tok.getLoc();
4352  if (Tok.isNot(AsmToken::Identifier)) {
4353  Error(S, "shift operator 'asr' or 'lsl' expected");
4354  return MatchOperand_ParseFail;
4355  }
4356  StringRef ShiftName = Tok.getString();
4357  bool isASR;
4358  if (ShiftName == "lsl" || ShiftName == "LSL")
4359  isASR = false;
4360  else if (ShiftName == "asr" || ShiftName == "ASR")
4361  isASR = true;
4362  else {
4363  Error(S, "shift operator 'asr' or 'lsl' expected");
4364  return MatchOperand_ParseFail;
4365  }
4366  Parser.Lex(); // Eat the operator.
4367 
4368  // A '#' and a shift amount.
4369  if (Parser.getTok().isNot(AsmToken::Hash) &&
4370  Parser.getTok().isNot(AsmToken::Dollar)) {
4371  Error(Parser.getTok().getLoc(), "'#' expected");
4372  return MatchOperand_ParseFail;
4373  }
4374  Parser.Lex(); // Eat hash token.
4375  SMLoc ExLoc = Parser.getTok().getLoc();
4376 
4377  const MCExpr *ShiftAmount;
4378  SMLoc EndLoc;
4379  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4380  Error(ExLoc, "malformed shift expression");
4381  return MatchOperand_ParseFail;
4382  }
4383  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4384  if (!CE) {
4385  Error(ExLoc, "shift amount must be an immediate");
4386  return MatchOperand_ParseFail;
4387  }
4388 
4389  int64_t Val = CE->getValue();
4390  if (isASR) {
4391  // Shift amount must be in [1,32]
4392  if (Val < 1 || Val > 32) {
4393  Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4394  return MatchOperand_ParseFail;
4395  }
4396  // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4397  if (isThumb() && Val == 32) {
4398  Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4399  return MatchOperand_ParseFail;
4400  }
4401  if (Val == 32) Val = 0;
4402  } else {
4403  // Shift amount must be in [1,32]
4404  if (Val < 0 || Val > 31) {
4405  Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4406  return MatchOperand_ParseFail;
4407  }
4408  }
4409 
4410  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4411 
4412  return MatchOperand_Success;
4413 }
4414 
4415 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4416 /// of instructions. Legal values are:
4417 /// ror #n 'n' in {0, 8, 16, 24}
4419 ARMAsmParser::parseRotImm(OperandVector &Operands) {
4420  MCAsmParser &Parser = getParser();
4421  const AsmToken &Tok = Parser.getTok();
4422  SMLoc S = Tok.getLoc();
4423  if (Tok.isNot(AsmToken::Identifier))
4424  return MatchOperand_NoMatch;
4425  StringRef ShiftName = Tok.getString();
4426  if (ShiftName != "ror" && ShiftName != "ROR")
4427  return MatchOperand_NoMatch;
4428  Parser.Lex(); // Eat the operator.
4429 
4430  // A '#' and a rotate amount.
4431  if (Parser.getTok().isNot(AsmToken::Hash) &&
4432  Parser.getTok().isNot(AsmToken::Dollar)) {
4433  Error(Parser.getTok().getLoc(), "'#' expected");
4434  return MatchOperand_ParseFail;
4435  }
4436  Parser.Lex(); // Eat hash token.
4437  SMLoc ExLoc = Parser.getTok().getLoc();
4438 
4439  const MCExpr *ShiftAmount;
4440  SMLoc EndLoc;
4441  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4442  Error(ExLoc, "malformed rotate expression");
4443  return MatchOperand_ParseFail;
4444  }
4445  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4446  if (!CE) {
4447  Error(ExLoc, "rotate amount must be an immediate");
4448  return MatchOperand_ParseFail;
4449  }
4450 
4451  int64_t Val = CE->getValue();
4452  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4453  // normally, zero is represented in asm by omitting the rotate operand
4454  // entirely.
4455  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4456  Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4457  return MatchOperand_ParseFail;
4458  }
4459 
4460  Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4461 
4462  return MatchOperand_Success;
4463 }
4464 
4466 ARMAsmParser::parseModImm(OperandVector &Operands) {
4467  MCAsmParser &Parser = getParser();
4468  MCAsmLexer &Lexer = getLexer();
4469  int64_t Imm1, Imm2;
4470 
4471  SMLoc S = Parser.getTok().getLoc();
4472 
4473  // 1) A mod_imm operand can appear in the place of a register name:
4474  // add r0, #mod_imm
4475  // add r0, r0, #mod_imm
4476  // to correctly handle the latter, we bail out as soon as we see an
4477  // identifier.
4478  //
4479  // 2) Similarly, we do not want to parse into complex operands:
4480  // mov r0, #mod_imm
4481  // mov r0, :lower16:(_foo)
4482  if (Parser.getTok().is(AsmToken::Identifier) ||
4483  Parser.getTok().is(AsmToken::Colon))
4484  return MatchOperand_NoMatch;
4485 
4486  // Hash (dollar) is optional as per the ARMARM
4487  if (Parser.getTok().is(AsmToken::Hash) ||
4488  Parser.getTok().is(AsmToken::Dollar)) {
4489  // Avoid parsing into complex operands (#:)
4490  if (Lexer.peekTok().is(AsmToken::Colon))
4491  return MatchOperand_NoMatch;
4492 
4493  // Eat the hash (dollar)
4494  Parser.Lex();
4495  }
4496 
4497  SMLoc Sx1, Ex1;
4498  Sx1 = Parser.getTok().getLoc();
4499  const MCExpr *Imm1Exp;
4500  if (getParser().parseExpression(Imm1Exp, Ex1)) {
4501  Error(Sx1, "malformed expression");
4502  return MatchOperand_ParseFail;
4503  }
4504 
4505  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4506 
4507  if (CE) {
4508  // Immediate must fit within 32-bits
4509  Imm1 = CE->getValue();
4510  int Enc = ARM_AM::getSOImmVal(Imm1);
4511  if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4512  // We have a match!
4513  Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4514  (Enc & 0xF00) >> 7,
4515  Sx1, Ex1));
4516  return MatchOperand_Success;
4517  }
4518 
4519  // We have parsed an immediate which is not for us, fallback to a plain
4520  // immediate. This can happen for instruction aliases. For an example,
4521  // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
4522  // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
4523  // instruction with a mod_imm operand. The alias is defined such that the
4524  // parser method is shared, that's why we have to do this here.
4525  if (Parser.getTok().is(AsmToken::EndOfStatement)) {
4526  Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4527  return MatchOperand_Success;
4528  }
4529  } else {
4530  // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
4531  // MCFixup). Fallback to a plain immediate.
4532  Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4533  return MatchOperand_Success;
4534  }
4535 
4536  // From this point onward, we expect the input to be a (#bits, #rot) pair
4537  if (Parser.getTok().isNot(AsmToken::Comma)) {
4538  Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
4539  return MatchOperand_ParseFail;
4540  }
4541 
4542  if (Imm1 & ~0xFF) {
4543  Error(Sx1, "immediate operand must a number in the range [0, 255]");
4544  return MatchOperand_ParseFail;
4545  }
4546 
4547  // Eat the comma
4548  Parser.Lex();
4549 
4550  // Repeat for #rot
4551  SMLoc Sx2, Ex2;
4552  Sx2 = Parser.getTok().getLoc();
4553 
4554  // Eat the optional hash (dollar)
4555  if (Parser.getTok().is(AsmToken::Hash) ||
4556  Parser.getTok().is(AsmToken::Dollar))
4557  Parser.Lex();
4558 
4559  const MCExpr *Imm2Exp;
4560  if (getParser().parseExpression(Imm2Exp, Ex2)) {
4561  Error(Sx2, "malformed expression");
4562  return MatchOperand_ParseFail;
4563  }
4564 
4565  CE = dyn_cast<MCConstantExpr>(Imm2Exp);
4566 
4567  if (CE) {
4568  Imm2 = CE->getValue();
4569  if (!(Imm2 & ~0x1E)) {
4570  // We have a match!
4571  Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
4572  return MatchOperand_Success;
4573  }
4574  Error(Sx2, "immediate operand must an even number in the range [0, 30]");
4575  return MatchOperand_ParseFail;
4576  } else {
4577  Error(Sx2, "constant expression expected");
4578  return MatchOperand_ParseFail;
4579  }
4580 }
4581 
4583 ARMAsmParser::parseBitfield(OperandVector &Operands) {
4584  MCAsmParser &Parser = getParser();
4585  SMLoc S = Parser.getTok().getLoc();
4586  // The bitfield descriptor is really two operands, the LSB and the width.
4587  if (Parser.getTok().isNot(AsmToken::Hash) &&
4588  Parser.getTok().isNot(AsmToken::Dollar)) {
4589  Error(Parser.getTok().getLoc(), "'#' expected");
4590  return MatchOperand_ParseFail;
4591  }
4592  Parser.Lex(); // Eat hash token.
4593 
4594  const MCExpr *LSBExpr;
4595  SMLoc E = Parser.getTok().getLoc();
4596  if (getParser().parseExpression(LSBExpr)) {
4597  Error(E, "malformed immediate expression");
4598  return MatchOperand_ParseFail;
4599  }
4600  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
4601  if (!CE) {
4602  Error(E, "'lsb' operand must be an immediate");
4603  return MatchOperand_ParseFail;
4604  }
4605 
4606  int64_t LSB = CE->getValue();
4607  // The LSB must be in the range [0,31]
4608  if (LSB < 0 || LSB > 31) {
4609  Error(E, "'lsb' operand must be in the range [0,31]");
4610  return MatchOperand_ParseFail;
4611  }
4612  E = Parser.getTok().getLoc();
4613 
4614  // Expect another immediate operand.
4615  if (Parser.getTok().isNot(AsmToken::Comma)) {
4616  Error(Parser.getTok().getLoc(), "too few operands");
4617  return MatchOperand_ParseFail;
4618  }
4619  Parser.Lex(); // Eat hash token.
4620  if (Parser.getTok().isNot(AsmToken::Hash) &&
4621  Parser.getTok().isNot(AsmToken::Dollar)) {
4622  Error(Parser.getTok().getLoc(), "'#' expected");
4623  return MatchOperand_ParseFail;
4624  }
4625  Parser.Lex(); // Eat hash token.
4626 
4627  const MCExpr *WidthExpr;
4628  SMLoc EndLoc;
4629  if (getParser().parseExpression(WidthExpr, EndLoc)) {
4630  Error(E, "malformed immediate expression");
4631  return MatchOperand_ParseFail;
4632  }
4633  CE = dyn_cast<MCConstantExpr>(WidthExpr);
4634  if (!CE) {
4635  Error(E, "'width' operand must be an immediate");
4636  return MatchOperand_ParseFail;
4637  }
4638 
4639  int64_t Width = CE->getValue();
4640  // The LSB must be in the range [1,32-lsb]
4641  if (Width < 1 || Width > 32 - LSB) {
4642  Error(E, "'width' operand must be in the range [1,32-lsb]");
4643  return MatchOperand_ParseFail;
4644  }
4645 
4646  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4647 
4648  return MatchOperand_Success;
4649 }
4650 
4652 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
4653  // Check for a post-index addressing register operand. Specifically:
4654  // postidx_reg := '+' register {, shift}
4655  // | '-' register {, shift}
4656  // | register {, shift}
4657 
4658  // This method must return MatchOperand_NoMatch without consuming any tokens
4659  // in the case where there is no match, as other alternatives take other
4660  // parse methods.
4661  MCAsmParser &Parser = getParser();
4662  AsmToken Tok = Parser.getTok();
4663  SMLoc S = Tok.getLoc();
4664  bool haveEaten = false;
4665  bool isAdd = true;
4666  if (Tok.is(AsmToken::Plus)) {
4667  Parser.Lex(); // Eat the '+' token.
4668  haveEaten = true;
4669  } else if (Tok.is(AsmToken::Minus)) {
4670  Parser.Lex(); // Eat the '-' token.
4671  isAdd = false;
4672  haveEaten = true;
4673  }
4674 
4675  SMLoc E = Parser.getTok().getEndLoc();
4676  int Reg = tryParseRegister();
4677  if (Reg == -1) {
4678  if (!haveEaten)
4679  return MatchOperand_NoMatch;
4680  Error(Parser.getTok().getLoc(), "register expected");
4681  return MatchOperand_ParseFail;
4682  }
4683 
4685  unsigned ShiftImm = 0;
4686  if (Parser.getTok().is(AsmToken::Comma)) {
4687  Parser.Lex(); // Eat the ','.
4688  if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4689  return MatchOperand_ParseFail;
4690 
4691  // FIXME: Only approximates end...may include intervening whitespace.
4692  E = Parser.getTok().getLoc();
4693  }
4694 
4695  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4696  ShiftImm, S, E));
4697 
4698  return MatchOperand_Success;
4699 }
4700 
4702 ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
4703  // Check for a post-index addressing register operand. Specifically:
4704  // am3offset := '+' register
4705  // | '-' register
4706  // | register
4707  // | # imm
4708  // | # + imm
4709  // | # - imm
4710 
4711  // This method must return MatchOperand_NoMatch without consuming any tokens
4712  // in the case where there is no match, as other alternatives take other
4713  // parse methods.
4714  MCAsmParser &Parser = getParser();
4715  AsmToken Tok = Parser.getTok();
4716  SMLoc S = Tok.getLoc();
4717 
4718  // Do immediates first, as we always parse those if we have a '#'.
4719  if (Parser.getTok().is(AsmToken::Hash) ||
4720  Parser.getTok().is(AsmToken::Dollar)) {
4721  Parser.Lex(); // Eat '#' or '$'.
4722  // Explicitly look for a '-', as we need to encode negative zero
4723  // differently.
4724  bool isNegative = Parser.getTok().is(AsmToken::Minus);
4725  const MCExpr *Offset;
4726  SMLoc E;
4727  if (getParser().parseExpression(Offset, E))
4728  return MatchOperand_ParseFail;
4730  if (!CE) {
4731  Error(S, "constant expression expected");
4732  return MatchOperand_ParseFail;
4733  }
4734  // Negative zero is encoded as the flag value
4735  // std::numeric_limits<int32_t>::min().
4736  int32_t Val = CE->getValue();
4737  if (isNegative && Val == 0)
4738  Val = std::numeric_limits<int32_t>::min();
4739 
4740  Operands.push_back(
4741  ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
4742 
4743  return MatchOperand_Success;
4744  }
4745 
4746  bool haveEaten = false;
4747  bool isAdd = true;
4748  if (Tok.is(AsmToken::Plus)) {
4749  Parser.Lex(); // Eat the '+' token.
4750  haveEaten = true;
4751  } else if (Tok.is(AsmToken::Minus)) {
4752  Parser.Lex(); // Eat the '-' token.
4753  isAdd = false;
4754  haveEaten = true;
4755  }
4756 
4757  Tok = Parser.getTok();
4758  int Reg = tryParseRegister();
4759  if (Reg == -1) {
4760  if (!haveEaten)
4761  return MatchOperand_NoMatch;
4762  Error(Tok.getLoc(), "register expected");
4763  return MatchOperand_ParseFail;
4764  }
4765 
4766  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4767  0, S, Tok.getEndLoc()));
4768 
4769  return MatchOperand_Success;
4770 }
4771 
4772 /// Convert parsed operands to MCInst. Needed here because this instruction
4773 /// only has two register operands, but multiplication is commutative so
4774 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
4775 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
4776  const OperandVector &Operands) {
4777  ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
4778  ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
4779  // If we have a three-operand form, make sure to set Rn to be the operand
4780  // that isn't the same as Rd.
4781  unsigned RegOp = 4;
4782  if (Operands.size() == 6 &&
4783  ((ARMOperand &)*Operands[4]).getReg() ==
4784  ((ARMOperand &)*Operands[3]).getReg())
4785  RegOp = 5;
4786  ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
4787  Inst.addOperand(Inst.getOperand(0));
4788  ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
4789 }
4790 
4791 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
4792  const OperandVector &Operands) {
4793  int CondOp = -1, ImmOp = -1;
4794  switch(Inst.getOpcode()) {
4795  case ARM::tB:
4796  case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
4797 
4798  case ARM::t2B:
4799  case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4800 
4801  default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
4802  }
4803  // first decide whether or not the branch should be conditional
4804  // by looking at it's location relative to an IT block
4805  if(inITBlock()) {
4806  // inside an IT block we cannot have any conditional branches. any
4807  // such instructions needs to be converted to unconditional form
4808  switch(Inst.getOpcode()) {
4809  case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4810  case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4811  }
4812  } else {
4813  // outside IT blocks we can only have unconditional branches with AL
4814  // condition code or conditional branches with non-AL condition code
4815  unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
4816  switch(Inst.getOpcode()) {
4817  case ARM::tB:
4818  case ARM::tBcc:
4819  Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4820  break;
4821  case ARM::t2B:
4822  case ARM::t2Bcc:
4823  Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4824  break;
4825  }
4826  }
4827 
4828  // now decide on encoding size based on branch target range
4829  switch(Inst.getOpcode()) {
4830  // classify tB as either t2B or t1B based on range of immediate operand
4831  case ARM::tB: {
4832  ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4833  if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
4834  Inst.setOpcode(ARM::t2B);
4835  break;
4836  }
4837  // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4838  case ARM::tBcc: {
4839  ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4840  if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
4841  Inst.setOpcode(ARM::t2Bcc);
4842  break;
4843  }
4844  }
4845  ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
4846  ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
4847 }
4848 
4849 /// Parse an ARM memory expression, return false if successful else return true
4850 /// or an error. The first token must be a '[' when called.
4851 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
4852  MCAsmParser &Parser = getParser();
4853  SMLoc S, E;
4854  if (Parser.getTok().isNot(AsmToken::LBrac))
4855  return TokError("Token is not a Left Bracket");
4856  S = Parser.getTok().getLoc();
4857  Parser.Lex(); // Eat left bracket token.
4858 
4859  const AsmToken &BaseRegTok = Parser.getTok();
4860  int BaseRegNum = tryParseRegister();
4861  if (BaseRegNum == -1)
4862  return Error(BaseRegTok.getLoc(), "register expected");
4863 
4864  // The next token must either be a comma, a colon or a closing bracket.
4865  const AsmToken &Tok = Parser.getTok();
4866  if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4867  !Tok.is(AsmToken::RBrac))
4868  return Error(Tok.getLoc(), "malformed memory operand");
4869 
4870  if (Tok.is(AsmToken::RBrac)) {
4871  E = Tok.getEndLoc();
4872  Parser.Lex(); // Eat right bracket token.
4873 
4874  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4875  ARM_AM::no_shift, 0, 0, false,
4876  S, E));
4877 
4878  // If there's a pre-indexing writeback marker, '!', just add it as a token
4879  // operand. It's rather odd, but syntactically valid.
4880  if (Parser.getTok().is(AsmToken::Exclaim)) {
4881  Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4882  Parser.Lex(); // Eat the '!'.
4883  }
4884 
4885  return false;
4886  }
4887 
4888  assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4889  "Lost colon or comma in memory operand?!");
4890  if (Tok.is(AsmToken::Comma)) {
4891  Parser.Lex(); // Eat the comma.
4892  }
4893 
4894  // If we have a ':', it's an alignment specifier.
4895  if (Parser.getTok().is(AsmToken::Colon)) {
4896  Parser.Lex(); // Eat the ':'.
4897  E = Parser.getTok().getLoc();
4898  SMLoc AlignmentLoc = Tok.getLoc();
4899 
4900  const MCExpr *Expr;
4901  if (getParser().parseExpression(Expr))
4902  return true;
4903 
4904  // The expression has to be a constant. Memory references with relocations
4905  // don't come through here, as they use the <label> forms of the relevant
4906  // instructions.
4907  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4908  if (!CE)
4909  return Error (E, "constant expression expected");
4910 
4911  unsigned Align = 0;
4912  switch (CE->getValue()) {
4913  default:
4914  return Error(E,
4915  "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4916  case 16: Align = 2; break;
4917  case 32: Align = 4; break;
4918  case 64: Align = 8; break;
4919  case 128: Align = 16; break;
4920  case 256: Align = 32; break;
4921  }
4922 
4923  // Now we should have the closing ']'
4924  if (Parser.getTok().isNot(AsmToken::RBrac))
4925  return Error(Parser.getTok().getLoc(), "']' expected");
4926  E = Parser.getTok().getEndLoc();
4927  Parser.Lex(); // Eat right bracket token.
4928 
4929  // Don't worry about range checking the value here. That's handled by
4930  // the is*() predicates.
4931  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4932  ARM_AM::no_shift, 0, Align,
4933  false, S, E, AlignmentLoc));
4934 
4935  // If there's a pre-indexing writeback marker, '!', just add it as a token
4936  // operand.
4937  if (Parser.getTok().is(AsmToken::Exclaim)) {
4938  Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4939  Parser.Lex(); // Eat the '!'.
4940  }
4941 
4942  return false;
4943  }
4944 
4945  // If we have a '#', it's an immediate offset, else assume it's a register
4946  // offset. Be friendly and also accept a plain integer (without a leading
4947  // hash) for gas compatibility.
4948  if (Parser.getTok().is(AsmToken::Hash) ||
4949  Parser.getTok().is(AsmToken::Dollar) ||
4950  Parser.getTok().is(AsmToken::Integer)) {
4951  if (Parser.getTok().isNot(AsmToken::Integer))
4952  Parser.Lex(); // Eat '#' or '$'.
4953  E = Parser.getTok().getLoc();
4954 
4955  bool isNegative = getParser().getTok().is(AsmToken::Minus);
4956  const MCExpr *Offset;
4957  if (getParser().parseExpression(Offset))
4958  return true;
4959 
4960  // The expression has to be a constant. Memory references with relocations
4961  // don't come through here, as they use the <label> forms of the relevant
4962  // instructions.
4964  if (!CE)
4965  return Error (E, "constant expression expected");
4966 
4967  // If the constant was #-0, represent it as
4968  // std::numeric_limits<int32_t>::min().
4969  int32_t Val = CE->getValue();
4970  if (isNegative && Val == 0)
4971  CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
4972  getContext());
4973 
4974  // Now we should have the closing ']'
4975  if (Parser.getTok().isNot(AsmToken::RBrac))
4976  return Error(Parser.getTok().getLoc(), "']' expected");
4977  E = Parser.getTok().getEndLoc();
4978  Parser.Lex(); // Eat right bracket token.
4979 
4980  // Don't worry about range checking the value here. That's handled by
4981  // the is*() predicates.
4982  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4983  ARM_AM::no_shift, 0, 0,
4984  false, S, E));
4985 
4986  // If there's a pre-indexing writeback marker, '!', just add it as a token
4987  // operand.
4988  if (Parser.getTok().is(AsmToken::Exclaim)) {
4989  Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4990  Parser.Lex(); // Eat the '!'.
4991  }
4992 
4993  return false;
4994  }
4995 
4996  // The register offset is optionally preceded by a '+' or '-'
4997  bool isNegative = false;
4998  if (Parser.getTok().is(AsmToken::Minus)) {
4999  isNegative = true;
5000  Parser.Lex(); // Eat the '-'.
5001  } else if (Parser.getTok().is(AsmToken::Plus)) {
5002  // Nothing to do.
5003  Parser.Lex(); // Eat the '+'.
5004  }
5005 
5006  E = Parser.getTok().getLoc();
5007  int OffsetRegNum = tryParseRegister();
5008  if (OffsetRegNum == -1)
5009  return Error(E, "register expected");
5010 
5011  // If there's a shift operator, handle it.
5012  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5013  unsigned ShiftImm = 0;
5014  if (Parser.getTok().is(AsmToken::Comma)) {
5015  Parser.Lex(); // Eat the ','.
5016  if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5017  return true;
5018  }
5019 
5020  // Now we should have the closing ']'
5021  if (Parser.getTok().isNot(AsmToken::RBrac))
5022  return Error(Parser.getTok().getLoc(), "']' expected");
5023  E = Parser.getTok().getEndLoc();
5024  Parser.Lex(); // Eat right bracket token.
5025 
5026  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5027  ShiftType, ShiftImm, 0, isNegative,
5028  S, E));
5029 
5030  // If there's a pre-indexing writeback marker, '!', just add it as a token
5031  // operand.
5032  if (Parser.getTok().is(AsmToken::Exclaim)) {
5033  Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5034  Parser.Lex(); // Eat the '!'.
5035  }
5036 
5037  return false;
5038 }
5039 
5040 /// parseMemRegOffsetShift - one of these two:
5041 /// ( lsl | lsr | asr | ror ) , # shift_amount
5042 /// rrx
5043 /// return true if it parses a shift otherwise it returns false.
5044 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5045  unsigned &Amount) {
5046  MCAsmParser &Parser = getParser();
5047  SMLoc Loc = Parser.getTok().getLoc();
5048  const AsmToken &Tok = Parser.getTok();
5049  if (Tok.isNot(AsmToken::Identifier))
5050  return true;
5051  StringRef ShiftName = Tok.getString();
5052  if (ShiftName == "lsl" || ShiftName == "LSL" ||
5053  ShiftName == "asl" || ShiftName == "ASL")
5054  St = ARM_AM::lsl;
5055  else if (ShiftName == "lsr" || ShiftName == "LSR")
5056  St = ARM_AM::lsr;
5057  else if (ShiftName == "asr" || ShiftName == "ASR")
5058  St = ARM_AM::asr;
5059  else if (ShiftName == "ror" || ShiftName == "ROR")
5060  St = ARM_AM::ror;
5061  else if (ShiftName == "rrx" || ShiftName == "RRX")
5062  St = ARM_AM::rrx;
5063  else
5064  return Error(Loc, "illegal shift operator");
5065  Parser.Lex(); // Eat shift type token.
5066 
5067  // rrx stands alone.
5068  Amount = 0;
5069  if (St != ARM_AM::rrx) {
5070  Loc = Parser.getTok().getLoc();
5071  // A '#' and a shift amount.
5072  const AsmToken &HashTok = Parser.getTok();
5073  if (HashTok.isNot(AsmToken::Hash) &&
5074  HashTok.isNot(AsmToken::Dollar))
5075  return Error(HashTok.getLoc(), "'#' expected");
5076  Parser.Lex(); // Eat hash token.
5077 
5078  const MCExpr *Expr;
5079  if (getParser().parseExpression(Expr))
5080  return true;
5081  // Range check the immediate.
5082  // lsl, ror: 0 <= imm <= 31
5083  // lsr, asr: 0 <= imm <= 32
5084  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5085  if (!CE)
5086  return Error(Loc, "shift amount must be an immediate");
5087  int64_t Imm = CE->getValue();
5088  if (Imm < 0 ||
5089  ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5090  ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5091  return Error(Loc, "immediate shift value out of range");
5092  // If <ShiftTy> #0, turn it into a no_shift.
5093  if (Imm == 0)
5094  St = ARM_AM::lsl;
5095  // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5096  if (Imm == 32)
5097  Imm = 0;
5098  Amount = Imm;
5099  }
5100 
5101  return false;
5102 }
5103 
5104 /// parseFPImm - A floating point immediate expression operand.
5106 ARMAsmParser::parseFPImm(OperandVector &Operands) {
5107  MCAsmParser &Parser = getParser();
5108  // Anything that can accept a floating point constant as an operand
5109  // needs to go through here, as the regular parseExpression is
5110  // integer only.
5111  //
5112  // This routine still creates a generic Immediate operand, containing
5113  // a bitcast of the 64-bit floating point value. The various operands
5114  // that accept floats can check whether the value is valid for them
5115  // via the standard is*() predicates.
5116 
5117  SMLoc S = Parser.getTok().getLoc();
5118 
5119  if (Parser.getTok().isNot(AsmToken::Hash) &&
5120  Parser.getTok().isNot(AsmToken::Dollar))
5121  return MatchOperand_NoMatch;
5122 
5123  // Disambiguate the VMOV forms that can accept an FP immediate.
5124  // vmov.f32 <sreg>, #imm
5125  // vmov.f64 <dreg>, #imm
5126  // vmov.f32 <dreg>, #imm @ vector f32x2
5127  // vmov.f32 <qreg>, #imm @ vector f32x4
5128  //
5129  // There are also the NEON VMOV instructions which expect an
5130  // integer constant. Make sure we don't try to parse an FPImm
5131  // for these:
5132  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5133  ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5134  bool isVmovf = TyOp.isToken() &&
5135  (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5136  TyOp.getToken() == ".f16");
5137  ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5138  bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5139  Mnemonic.getToken() == "fconsts");
5140  if (!(isVmovf || isFconst))
5141  return MatchOperand_NoMatch;
5142 
5143  Parser.Lex(); // Eat '#' or '$'.
5144 
5145  // Handle negation, as that still comes through as a separate token.
5146  bool isNegative = false;
5147  if (Parser.getTok().is(AsmToken::Minus)) {
5148  isNegative = true;
5149  Parser.Lex();
5150  }
5151  const AsmToken &Tok = Parser.getTok();
5152  SMLoc Loc = Tok.getLoc();
5153  if (Tok.is(AsmToken::Real) && isVmovf) {
5154  APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
5155  uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5156  // If we had a '-' in front, toggle the sign bit.
5157  IntVal ^= (uint64_t)isNegative << 31;
5158  Parser.Lex(); // Eat the token.
5159  Operands.push_back(ARMOperand::CreateImm(
5160  MCConstantExpr::create(IntVal, getContext()),
5161  S, Parser.getTok().getLoc()));
5162  return MatchOperand_Success;
5163  }
5164  // Also handle plain integers. Instructions which allow floating point
5165  // immediates also allow a raw encoded 8-bit value.
5166  if (Tok.is(AsmToken::Integer) && isFconst) {
5167  int64_t Val = Tok.getIntVal();
5168  Parser.Lex(); // Eat the token.
5169  if (Val > 255 || Val < 0) {
5170  Error(Loc, "encoded floating point value out of range");
5171  return MatchOperand_ParseFail;
5172  }
5173  float RealVal = ARM_AM::getFPImmFloat(Val);
5174  Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
5175 
5176  Operands.push_back(ARMOperand::CreateImm(
5177  MCConstantExpr::create(Val, getContext()), S,
5178  Parser.getTok().getLoc()));
5179  return MatchOperand_Success;
5180  }
5181 
5182  Error(Loc, "invalid floating point immediate");
5183  return MatchOperand_ParseFail;
5184 }
5185 
5186 /// Parse a arm instruction operand. For now this parses the operand regardless
5187 /// of the mnemonic.
5188 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
5189  MCAsmParser &Parser = getParser();
5190  SMLoc S, E;
5191 
5192  // Check if the current operand has a custom associated parser, if so, try to
5193  // custom parse the operand, or fallback to the general approach.
5194  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
5195  if (ResTy == MatchOperand_Success)
5196  return false;
5197  // If there wasn't a custom match, try the generic matcher below. Otherwise,
5198  // there was a match, but an error occurred, in which case, just return that
5199  // the operand parsing failed.
5200  if (ResTy == MatchOperand_ParseFail)
5201  return true;
5202 
5203  switch (getLexer().getKind()) {
5204  default:
5205  Error(Parser.getTok().getLoc(), "unexpected token in operand");
5206  return true;
5207  case AsmToken::Identifier: {
5208  // If we've seen a branch mnemonic, the next operand must be a label. This
5209  // is true even if the label is a register name. So "br r1" means branch to
5210  // label "r1".
5211  bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
5212  if (!ExpectLabel) {
5213  if (!tryParseRegisterWithWriteBack(Operands))
5214  return false;
5215  int Res = tryParseShiftRegister(Operands);
5216  if (Res == 0) // success
5217  return false;
5218  else if (Res == -1) // irrecoverable error
5219  return true;
5220  // If this is VMRS, check for the apsr_nzcv operand.
5221  if (Mnemonic == "vmrs" &&
5222  Parser.getTok().getString().equals_lower("apsr_nzcv")) {
5223  S = Parser.getTok().getLoc();
5224  Parser.Lex();
5225  Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
5226  return false;
5227  }
5228  }
5229 
5230  // Fall though for the Identifier case that is not a register or a
5231  // special name.
5233  }
5234  case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
5235  case AsmToken::Integer: // things like 1f and 2b as a branch targets
5236  case AsmToken::String: // quoted label names.
5237  case AsmToken::Dot: { // . as a branch target
5238  // This was not a register so parse other operands that start with an
5239  // identifier (like labels) as expressions and create them as immediates.
5240  const MCExpr *IdVal;
5241  S = Parser.getTok().getLoc();
5242  if (getParser().parseExpression(IdVal))
5243  return true;
5244  E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5245  Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
5246  return false;
5247  }
5248  case AsmToken::LBrac:
5249  return parseMemory(Operands);
5250  case AsmToken::LCurly:
5251  return parseRegisterList(Operands);
5252  case AsmToken::Dollar:
5253  case AsmToken::Hash:
5254  // #42 -> immediate.
5255  S = Parser.getTok().getLoc();
5256  Parser.Lex();
5257 
5258  if (Parser.getTok().isNot(AsmToken::Colon)) {
5259  bool isNegative = Parser.getTok().is(AsmToken::Minus);
5260  const MCExpr *ImmVal;
5261  if (getParser().parseExpression(ImmVal))
5262  return true;
5263  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
5264  if (CE) {
5265  int32_t Val = CE->getValue();
5266  if (isNegative && Val == 0)
5267  ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5268  getContext());
5269  }
5270  E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5271  Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
5272 
5273  // There can be a trailing '!' on operands that we want as a separate
5274  // '!' Token operand. Handle that here. For example, the compatibility
5275  // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
5276  if (Parser.getTok().is(AsmToken::Exclaim)) {
5277  Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
5278  Parser.getTok().getLoc()));
5279  Parser.Lex(); // Eat exclaim token
5280  }
5281  <