LLVM 19.0.0git
ARMAsmParser.cpp
Go to the documentation of this file.
1//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ARMBaseInstrInfo.h"
10#include "ARMFeatures.h"
17#include "Utils/ARMBaseInfo.h"
18#include "llvm/ADT/APFloat.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/StringMap.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/StringSet.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
31#include "llvm/MC/MCInstrDesc.h"
32#include "llvm/MC/MCInstrInfo.h"
40#include "llvm/MC/MCSection.h"
41#include "llvm/MC/MCStreamer.h"
43#include "llvm/MC/MCSymbol.h"
52#include "llvm/Support/SMLoc.h"
57#include <algorithm>
58#include <cassert>
59#include <cstddef>
60#include <cstdint>
61#include <iterator>
62#include <limits>
63#include <memory>
64#include <string>
65#include <utility>
66#include <vector>
67
68#define DEBUG_TYPE "asm-parser"
69
70using namespace llvm;
71
72namespace llvm {
77};
78extern const ARMInstrTable ARMDescs;
79} // end namespace llvm
80
81namespace {
82
83enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
84
85static cl::opt<ImplicitItModeTy> ImplicitItMode(
86 "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
87 cl::desc("Allow conditional instructions outdside of an IT block"),
88 cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
89 "Accept in both ISAs, emit implicit ITs in Thumb"),
90 clEnumValN(ImplicitItModeTy::Never, "never",
91 "Warn in ARM, reject in Thumb"),
92 clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
93 "Accept in ARM, reject in Thumb"),
94 clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
95 "Warn in ARM, emit implicit ITs in Thumb")));
96
97static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
98 cl::init(false));
99
100enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
101
102static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
103 // Position==0 means we're not in an IT block at all. Position==1
104 // means we want the first state bit, which is always 0 (Then).
105 // Position==2 means we want the second state bit, stored at bit 3
106 // of Mask, and so on downwards. So (5 - Position) will shift the
107 // right bit down to bit 0, including the always-0 bit at bit 4 for
108 // the mandatory initial Then.
109 return (Mask >> (5 - Position) & 1);
110}
111
112class UnwindContext {
113 using Locs = SmallVector<SMLoc, 4>;
114
115 MCAsmParser &Parser;
116 Locs FnStartLocs;
117 Locs CantUnwindLocs;
118 Locs PersonalityLocs;
119 Locs PersonalityIndexLocs;
120 Locs HandlerDataLocs;
121 int FPReg;
122
123public:
124 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
125
126 bool hasFnStart() const { return !FnStartLocs.empty(); }
127 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
128 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
129
130 bool hasPersonality() const {
131 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
132 }
133
134 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
135 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
136 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
137 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
138 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
139
140 void saveFPReg(int Reg) { FPReg = Reg; }
141 int getFPReg() const { return FPReg; }
142
143 void emitFnStartLocNotes() const {
144 for (const SMLoc &Loc : FnStartLocs)
145 Parser.Note(Loc, ".fnstart was specified here");
146 }
147
148 void emitCantUnwindLocNotes() const {
149 for (const SMLoc &Loc : CantUnwindLocs)
150 Parser.Note(Loc, ".cantunwind was specified here");
151 }
152
153 void emitHandlerDataLocNotes() const {
154 for (const SMLoc &Loc : HandlerDataLocs)
155 Parser.Note(Loc, ".handlerdata was specified here");
156 }
157
158 void emitPersonalityLocNotes() const {
159 for (Locs::const_iterator PI = PersonalityLocs.begin(),
160 PE = PersonalityLocs.end(),
161 PII = PersonalityIndexLocs.begin(),
162 PIE = PersonalityIndexLocs.end();
163 PI != PE || PII != PIE;) {
164 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
165 Parser.Note(*PI++, ".personality was specified here");
166 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
167 Parser.Note(*PII++, ".personalityindex was specified here");
168 else
169 llvm_unreachable(".personality and .personalityindex cannot be "
170 "at the same location");
171 }
172 }
173
174 void reset() {
175 FnStartLocs = Locs();
176 CantUnwindLocs = Locs();
177 PersonalityLocs = Locs();
178 HandlerDataLocs = Locs();
179 PersonalityIndexLocs = Locs();
180 FPReg = ARM::SP;
181 }
182};
183
184// Various sets of ARM instruction mnemonics which are used by the asm parser
185class ARMMnemonicSets {
186 StringSet<> CDE;
187 StringSet<> CDEWithVPTSuffix;
188public:
189 ARMMnemonicSets(const MCSubtargetInfo &STI);
190
191 /// Returns true iff a given mnemonic is a CDE instruction
192 bool isCDEInstr(StringRef Mnemonic) {
193 // Quick check before searching the set
194 if (!Mnemonic.starts_with("cx") && !Mnemonic.starts_with("vcx"))
195 return false;
196 return CDE.count(Mnemonic);
197 }
198
199 /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
200 /// (possibly with a predication suffix "e" or "t")
201 bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
202 if (!Mnemonic.starts_with("vcx"))
203 return false;
204 return CDEWithVPTSuffix.count(Mnemonic);
205 }
206
207 /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
208 /// (possibly with a condition suffix)
209 bool isITPredicableCDEInstr(StringRef Mnemonic) {
210 if (!Mnemonic.starts_with("cx"))
211 return false;
212 return Mnemonic.starts_with("cx1a") || Mnemonic.starts_with("cx1da") ||
213 Mnemonic.starts_with("cx2a") || Mnemonic.starts_with("cx2da") ||
214 Mnemonic.starts_with("cx3a") || Mnemonic.starts_with("cx3da");
215 }
216
217 /// Return true iff a given mnemonic is an integer CDE instruction with
218 /// dual-register destination
219 bool isCDEDualRegInstr(StringRef Mnemonic) {
220 if (!Mnemonic.starts_with("cx"))
221 return false;
222 return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
223 Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
224 Mnemonic == "cx3d" || Mnemonic == "cx3da";
225 }
226};
227
228ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
229 for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
230 "cx2", "cx2a", "cx2d", "cx2da",
231 "cx3", "cx3a", "cx3d", "cx3da", })
232 CDE.insert(Mnemonic);
233 for (StringRef Mnemonic :
234 {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
235 CDE.insert(Mnemonic);
236 CDEWithVPTSuffix.insert(Mnemonic);
237 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
238 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
239 }
240}
241
242class ARMAsmParser : public MCTargetAsmParser {
243 const MCRegisterInfo *MRI;
244 UnwindContext UC;
245 ARMMnemonicSets MS;
246
247 ARMTargetStreamer &getTargetStreamer() {
248 assert(getParser().getStreamer().getTargetStreamer() &&
249 "do not have a target streamer");
251 return static_cast<ARMTargetStreamer &>(TS);
252 }
253
254 // Map of register aliases registers via the .req directive.
255 StringMap<unsigned> RegisterReqs;
256
257 bool NextSymbolIsThumb;
258
259 bool useImplicitITThumb() const {
260 return ImplicitItMode == ImplicitItModeTy::Always ||
261 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
262 }
263
264 bool useImplicitITARM() const {
265 return ImplicitItMode == ImplicitItModeTy::Always ||
266 ImplicitItMode == ImplicitItModeTy::ARMOnly;
267 }
268
269 struct {
270 ARMCC::CondCodes Cond; // Condition for IT block.
271 unsigned Mask:4; // Condition mask for instructions.
272 // Starting at first 1 (from lsb).
273 // '1' condition as indicated in IT.
274 // '0' inverse of condition (else).
275 // Count of instructions in IT block is
276 // 4 - trailingzeroes(mask)
277 // Note that this does not have the same encoding
278 // as in the IT instruction, which also depends
279 // on the low bit of the condition code.
280
281 unsigned CurPosition; // Current position in parsing of IT
282 // block. In range [0,4], with 0 being the IT
283 // instruction itself. Initialized according to
284 // count of instructions in block. ~0U if no
285 // active IT block.
286
287 bool IsExplicit; // true - The IT instruction was present in the
288 // input, we should not modify it.
289 // false - The IT instruction was added
290 // implicitly, we can extend it if that
291 // would be legal.
292 } ITState;
293
294 SmallVector<MCInst, 4> PendingConditionalInsts;
295
296 void flushPendingInstructions(MCStreamer &Out) override {
297 if (!inImplicitITBlock()) {
298 assert(PendingConditionalInsts.size() == 0);
299 return;
300 }
301
302 // Emit the IT instruction
303 MCInst ITInst;
304 ITInst.setOpcode(ARM::t2IT);
305 ITInst.addOperand(MCOperand::createImm(ITState.Cond));
306 ITInst.addOperand(MCOperand::createImm(ITState.Mask));
307 Out.emitInstruction(ITInst, getSTI());
308
309 // Emit the conditional instructions
310 assert(PendingConditionalInsts.size() <= 4);
311 for (const MCInst &Inst : PendingConditionalInsts) {
312 Out.emitInstruction(Inst, getSTI());
313 }
314 PendingConditionalInsts.clear();
315
316 // Clear the IT state
317 ITState.Mask = 0;
318 ITState.CurPosition = ~0U;
319 }
320
321 bool inITBlock() { return ITState.CurPosition != ~0U; }
322 bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
323 bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
324
325 bool lastInITBlock() {
326 return ITState.CurPosition == 4 - (unsigned)llvm::countr_zero(ITState.Mask);
327 }
328
329 void forwardITPosition() {
330 if (!inITBlock()) return;
331 // Move to the next instruction in the IT block, if there is one. If not,
332 // mark the block as done, except for implicit IT blocks, which we leave
333 // open until we find an instruction that can't be added to it.
334 unsigned TZ = llvm::countr_zero(ITState.Mask);
335 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
336 ITState.CurPosition = ~0U; // Done with the IT block after this.
337 }
338
339 // Rewind the state of the current IT block, removing the last slot from it.
340 void rewindImplicitITPosition() {
341 assert(inImplicitITBlock());
342 assert(ITState.CurPosition > 1);
343 ITState.CurPosition--;
344 unsigned TZ = llvm::countr_zero(ITState.Mask);
345 unsigned NewMask = 0;
346 NewMask |= ITState.Mask & (0xC << TZ);
347 NewMask |= 0x2 << TZ;
348 ITState.Mask = NewMask;
349 }
350
351 // Rewind the state of the current IT block, removing the last slot from it.
352 // If we were at the first slot, this closes the IT block.
353 void discardImplicitITBlock() {
354 assert(inImplicitITBlock());
355 assert(ITState.CurPosition == 1);
356 ITState.CurPosition = ~0U;
357 }
358
359 // Return the low-subreg of a given Q register.
360 unsigned getDRegFromQReg(unsigned QReg) const {
361 return MRI->getSubReg(QReg, ARM::dsub_0);
362 }
363
364 // Get the condition code corresponding to the current IT block slot.
365 ARMCC::CondCodes currentITCond() {
366 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
367 return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
368 }
369
370 // Invert the condition of the current IT block slot without changing any
371 // other slots in the same block.
372 void invertCurrentITCondition() {
373 if (ITState.CurPosition == 1) {
374 ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
375 } else {
376 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
377 }
378 }
379
380 // Returns true if the current IT block is full (all 4 slots used).
381 bool isITBlockFull() {
382 return inITBlock() && (ITState.Mask & 1);
383 }
384
385 // Extend the current implicit IT block to have one more slot with the given
386 // condition code.
387 void extendImplicitITBlock(ARMCC::CondCodes Cond) {
388 assert(inImplicitITBlock());
389 assert(!isITBlockFull());
390 assert(Cond == ITState.Cond ||
391 Cond == ARMCC::getOppositeCondition(ITState.Cond));
392 unsigned TZ = llvm::countr_zero(ITState.Mask);
393 unsigned NewMask = 0;
394 // Keep any existing condition bits.
395 NewMask |= ITState.Mask & (0xE << TZ);
396 // Insert the new condition bit.
397 NewMask |= (Cond != ITState.Cond) << TZ;
398 // Move the trailing 1 down one bit.
399 NewMask |= 1 << (TZ - 1);
400 ITState.Mask = NewMask;
401 }
402
403 // Create a new implicit IT block with a dummy condition code.
404 void startImplicitITBlock() {
405 assert(!inITBlock());
406 ITState.Cond = ARMCC::AL;
407 ITState.Mask = 8;
408 ITState.CurPosition = 1;
409 ITState.IsExplicit = false;
410 }
411
412 // Create a new explicit IT block with the given condition and mask.
413 // The mask should be in the format used in ARMOperand and
414 // MCOperand, with a 1 implying 'e', regardless of the low bit of
415 // the condition.
416 void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
417 assert(!inITBlock());
418 ITState.Cond = Cond;
419 ITState.Mask = Mask;
420 ITState.CurPosition = 0;
421 ITState.IsExplicit = true;
422 }
423
424 struct {
425 unsigned Mask : 4;
426 unsigned CurPosition;
427 } VPTState;
428 bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
429 void forwardVPTPosition() {
430 if (!inVPTBlock()) return;
431 unsigned TZ = llvm::countr_zero(VPTState.Mask);
432 if (++VPTState.CurPosition == 5 - TZ)
433 VPTState.CurPosition = ~0U;
434 }
435
436 void Note(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
437 return getParser().Note(L, Msg, Range);
438 }
439
440 bool Warning(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
441 return getParser().Warning(L, Msg, Range);
442 }
443
444 bool Error(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
445 return getParser().Error(L, Msg, Range);
446 }
447
448 bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
449 unsigned ListNo, bool IsARPop = false);
450 bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
451 unsigned ListNo);
452
453 int tryParseRegister(bool AllowOutofBoundReg = false);
454 bool tryParseRegisterWithWriteBack(OperandVector &);
455 int tryParseShiftRegister(OperandVector &);
456 bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
457 bool AllowRAAC = false,
458 bool AllowOutOfBoundReg = false);
459 bool parseMemory(OperandVector &);
460 bool parseOperand(OperandVector &, StringRef Mnemonic);
461 bool parseImmExpr(int64_t &Out);
462 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
463 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
464 unsigned &ShiftAmount);
465 bool parseLiteralValues(unsigned Size, SMLoc L);
466 bool parseDirectiveThumb(SMLoc L);
467 bool parseDirectiveARM(SMLoc L);
468 bool parseDirectiveThumbFunc(SMLoc L);
469 bool parseDirectiveCode(SMLoc L);
470 bool parseDirectiveSyntax(SMLoc L);
471 bool parseDirectiveReq(StringRef Name, SMLoc L);
472 bool parseDirectiveUnreq(SMLoc L);
473 bool parseDirectiveArch(SMLoc L);
474 bool parseDirectiveEabiAttr(SMLoc L);
475 bool parseDirectiveCPU(SMLoc L);
476 bool parseDirectiveFPU(SMLoc L);
477 bool parseDirectiveFnStart(SMLoc L);
478 bool parseDirectiveFnEnd(SMLoc L);
479 bool parseDirectiveCantUnwind(SMLoc L);
480 bool parseDirectivePersonality(SMLoc L);
481 bool parseDirectiveHandlerData(SMLoc L);
482 bool parseDirectiveSetFP(SMLoc L);
483 bool parseDirectivePad(SMLoc L);
484 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
485 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
486 bool parseDirectiveLtorg(SMLoc L);
487 bool parseDirectiveEven(SMLoc L);
488 bool parseDirectivePersonalityIndex(SMLoc L);
489 bool parseDirectiveUnwindRaw(SMLoc L);
490 bool parseDirectiveTLSDescSeq(SMLoc L);
491 bool parseDirectiveMovSP(SMLoc L);
492 bool parseDirectiveObjectArch(SMLoc L);
493 bool parseDirectiveArchExtension(SMLoc L);
494 bool parseDirectiveAlign(SMLoc L);
495 bool parseDirectiveThumbSet(SMLoc L);
496
497 bool parseDirectiveSEHAllocStack(SMLoc L, bool Wide);
498 bool parseDirectiveSEHSaveRegs(SMLoc L, bool Wide);
499 bool parseDirectiveSEHSaveSP(SMLoc L);
500 bool parseDirectiveSEHSaveFRegs(SMLoc L);
501 bool parseDirectiveSEHSaveLR(SMLoc L);
502 bool parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment);
503 bool parseDirectiveSEHNop(SMLoc L, bool Wide);
504 bool parseDirectiveSEHEpilogStart(SMLoc L, bool Condition);
505 bool parseDirectiveSEHEpilogEnd(SMLoc L);
506 bool parseDirectiveSEHCustom(SMLoc L);
507
508 bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
509 StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
510 ARMCC::CondCodes &PredicationCode,
511 ARMVCC::VPTCodes &VPTPredicationCode,
512 bool &CarrySetting, unsigned &ProcessorIMod,
513 StringRef &ITMask);
514 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
515 StringRef FullInst, bool &CanAcceptCarrySet,
516 bool &CanAcceptPredicationCode,
517 bool &CanAcceptVPTPredicationCode);
518 bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
519
520 void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
522 bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands);
523
524 bool isThumb() const {
525 // FIXME: Can tablegen auto-generate this?
526 return getSTI().hasFeature(ARM::ModeThumb);
527 }
528
529 bool isThumbOne() const {
530 return isThumb() && !getSTI().hasFeature(ARM::FeatureThumb2);
531 }
532
533 bool isThumbTwo() const {
534 return isThumb() && getSTI().hasFeature(ARM::FeatureThumb2);
535 }
536
537 bool hasThumb() const {
538 return getSTI().hasFeature(ARM::HasV4TOps);
539 }
540
541 bool hasThumb2() const {
542 return getSTI().hasFeature(ARM::FeatureThumb2);
543 }
544
545 bool hasV6Ops() const {
546 return getSTI().hasFeature(ARM::HasV6Ops);
547 }
548
549 bool hasV6T2Ops() const {
550 return getSTI().hasFeature(ARM::HasV6T2Ops);
551 }
552
553 bool hasV6MOps() const {
554 return getSTI().hasFeature(ARM::HasV6MOps);
555 }
556
557 bool hasV7Ops() const {
558 return getSTI().hasFeature(ARM::HasV7Ops);
559 }
560
561 bool hasV8Ops() const {
562 return getSTI().hasFeature(ARM::HasV8Ops);
563 }
564
565 bool hasV8MBaseline() const {
566 return getSTI().hasFeature(ARM::HasV8MBaselineOps);
567 }
568
569 bool hasV8MMainline() const {
570 return getSTI().hasFeature(ARM::HasV8MMainlineOps);
571 }
572 bool hasV8_1MMainline() const {
573 return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
574 }
575 bool hasMVE() const {
576 return getSTI().hasFeature(ARM::HasMVEIntegerOps);
577 }
578 bool hasMVEFloat() const {
579 return getSTI().hasFeature(ARM::HasMVEFloatOps);
580 }
581 bool hasCDE() const {
582 return getSTI().hasFeature(ARM::HasCDEOps);
583 }
584 bool has8MSecExt() const {
585 return getSTI().hasFeature(ARM::Feature8MSecExt);
586 }
587
588 bool hasARM() const {
589 return !getSTI().hasFeature(ARM::FeatureNoARM);
590 }
591
592 bool hasDSP() const {
593 return getSTI().hasFeature(ARM::FeatureDSP);
594 }
595
596 bool hasD32() const {
597 return getSTI().hasFeature(ARM::FeatureD32);
598 }
599
600 bool hasV8_1aOps() const {
601 return getSTI().hasFeature(ARM::HasV8_1aOps);
602 }
603
604 bool hasRAS() const {
605 return getSTI().hasFeature(ARM::FeatureRAS);
606 }
607
608 void SwitchMode() {
609 MCSubtargetInfo &STI = copySTI();
610 auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
612 }
613
614 void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
615
616 bool isMClass() const {
617 return getSTI().hasFeature(ARM::FeatureMClass);
618 }
619
620 /// @name Auto-generated Match Functions
621 /// {
622
623#define GET_ASSEMBLER_HEADER
624#include "ARMGenAsmMatcher.inc"
625
626 /// }
627
628 ParseStatus parseITCondCode(OperandVector &);
629 ParseStatus parseCoprocNumOperand(OperandVector &);
630 ParseStatus parseCoprocRegOperand(OperandVector &);
631 ParseStatus parseCoprocOptionOperand(OperandVector &);
632 ParseStatus parseMemBarrierOptOperand(OperandVector &);
633 ParseStatus parseTraceSyncBarrierOptOperand(OperandVector &);
634 ParseStatus parseInstSyncBarrierOptOperand(OperandVector &);
635 ParseStatus parseProcIFlagsOperand(OperandVector &);
636 ParseStatus parseMSRMaskOperand(OperandVector &);
637 ParseStatus parseBankedRegOperand(OperandVector &);
638 ParseStatus parsePKHImm(OperandVector &O, StringRef Op, int Low, int High);
639 ParseStatus parsePKHLSLImm(OperandVector &O) {
640 return parsePKHImm(O, "lsl", 0, 31);
641 }
642 ParseStatus parsePKHASRImm(OperandVector &O) {
643 return parsePKHImm(O, "asr", 1, 32);
644 }
645 ParseStatus parseSetEndImm(OperandVector &);
646 ParseStatus parseShifterImm(OperandVector &);
647 ParseStatus parseRotImm(OperandVector &);
648 ParseStatus parseModImm(OperandVector &);
649 ParseStatus parseBitfield(OperandVector &);
650 ParseStatus parsePostIdxReg(OperandVector &);
651 ParseStatus parseAM3Offset(OperandVector &);
652 ParseStatus parseFPImm(OperandVector &);
653 ParseStatus parseVectorList(OperandVector &);
654 ParseStatus parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
655 SMLoc &EndLoc);
656
657 // Asm Match Converter Methods
658 void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
659 void cvtThumbBranches(MCInst &Inst, const OperandVector &);
660 void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
661
662 bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
663 bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
664 bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
665 bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
666 bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
667 bool isITBlockTerminator(MCInst &Inst) const;
668 void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
669 bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
670 bool Load, bool ARMMode, bool Writeback);
671
672public:
673 enum ARMMatchResultTy {
674 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
675 Match_RequiresNotITBlock,
676 Match_RequiresV6,
677 Match_RequiresThumb2,
678 Match_RequiresV8,
679 Match_RequiresFlagSetting,
680#define GET_OPERAND_DIAGNOSTIC_TYPES
681#include "ARMGenAsmMatcher.inc"
682
683 };
684
685 ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
686 const MCInstrInfo &MII, const MCTargetOptions &Options)
687 : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
689
690 // Cache the MCRegisterInfo.
692
693 // Initialize the set of available features.
694 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
695
696 // Add build attributes based on the selected target.
698 getTargetStreamer().emitTargetAttributes(STI);
699
700 // Not in an ITBlock to start with.
701 ITState.CurPosition = ~0U;
702
703 VPTState.CurPosition = ~0U;
704
705 NextSymbolIsThumb = false;
706 }
707
708 // Implementation of the MCTargetAsmParser interface:
709 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
711 SMLoc &EndLoc) override;
713 SMLoc NameLoc, OperandVector &Operands) override;
714 bool ParseDirective(AsmToken DirectiveID) override;
715
717 unsigned Kind) override;
718 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
719
720 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
723 bool MatchingInlineAsm) override;
724 unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
726 bool MatchingInlineAsm, bool &EmitInITBlock,
727 MCStreamer &Out);
728
729 struct NearMissMessage {
730 SMLoc Loc;
731 SmallString<128> Message;
732 };
733
734 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
735
736 void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
739 void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
741
743 getVariantKindForName(StringRef Name) const override;
744
745 void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
746
747 void onLabelParsed(MCSymbol *Symbol) override;
748};
749
750/// ARMOperand - Instances of this class represent a parsed ARM machine
751/// operand.
752class ARMOperand : public MCParsedAsmOperand {
753 enum KindTy {
754 k_CondCode,
755 k_VPTPred,
756 k_CCOut,
757 k_ITCondMask,
758 k_CoprocNum,
759 k_CoprocReg,
760 k_CoprocOption,
761 k_Immediate,
762 k_MemBarrierOpt,
763 k_InstSyncBarrierOpt,
764 k_TraceSyncBarrierOpt,
765 k_Memory,
766 k_PostIndexRegister,
767 k_MSRMask,
768 k_BankedReg,
769 k_ProcIFlags,
770 k_VectorIndex,
771 k_Register,
772 k_RegisterList,
773 k_RegisterListWithAPSR,
774 k_DPRRegisterList,
775 k_SPRRegisterList,
776 k_FPSRegisterListWithVPR,
777 k_FPDRegisterListWithVPR,
778 k_VectorList,
779 k_VectorListAllLanes,
780 k_VectorListIndexed,
781 k_ShiftedRegister,
782 k_ShiftedImmediate,
783 k_ShifterImmediate,
784 k_RotateImmediate,
785 k_ModifiedImmediate,
786 k_ConstantPoolImmediate,
787 k_BitfieldDescriptor,
788 k_Token,
789 } Kind;
790
791 SMLoc StartLoc, EndLoc, AlignmentLoc;
793
794 struct CCOp {
796 };
797
798 struct VCCOp {
800 };
801
802 struct CopOp {
803 unsigned Val;
804 };
805
806 struct CoprocOptionOp {
807 unsigned Val;
808 };
809
810 struct ITMaskOp {
811 unsigned Mask:4;
812 };
813
814 struct MBOptOp {
815 ARM_MB::MemBOpt Val;
816 };
817
818 struct ISBOptOp {
820 };
821
822 struct TSBOptOp {
824 };
825
826 struct IFlagsOp {
828 };
829
830 struct MMaskOp {
831 unsigned Val;
832 };
833
834 struct BankedRegOp {
835 unsigned Val;
836 };
837
838 struct TokOp {
839 const char *Data;
840 unsigned Length;
841 };
842
843 struct RegOp {
844 unsigned RegNum;
845 };
846
847 // A vector register list is a sequential list of 1 to 4 registers.
848 struct VectorListOp {
849 unsigned RegNum;
850 unsigned Count;
851 unsigned LaneIndex;
852 bool isDoubleSpaced;
853 };
854
855 struct VectorIndexOp {
856 unsigned Val;
857 };
858
859 struct ImmOp {
860 const MCExpr *Val;
861 };
862
863 /// Combined record for all forms of ARM address expressions.
864 struct MemoryOp {
865 unsigned BaseRegNum;
866 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
867 // was specified.
868 const MCExpr *OffsetImm; // Offset immediate value
869 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
870 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
871 unsigned ShiftImm; // shift for OffsetReg.
872 unsigned Alignment; // 0 = no alignment specified
873 // n = alignment in bytes (2, 4, 8, 16, or 32)
874 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
875 };
876
877 struct PostIdxRegOp {
878 unsigned RegNum;
879 bool isAdd;
880 ARM_AM::ShiftOpc ShiftTy;
881 unsigned ShiftImm;
882 };
883
884 struct ShifterImmOp {
885 bool isASR;
886 unsigned Imm;
887 };
888
889 struct RegShiftedRegOp {
890 ARM_AM::ShiftOpc ShiftTy;
891 unsigned SrcReg;
892 unsigned ShiftReg;
893 unsigned ShiftImm;
894 };
895
896 struct RegShiftedImmOp {
897 ARM_AM::ShiftOpc ShiftTy;
898 unsigned SrcReg;
899 unsigned ShiftImm;
900 };
901
902 struct RotImmOp {
903 unsigned Imm;
904 };
905
906 struct ModImmOp {
907 unsigned Bits;
908 unsigned Rot;
909 };
910
911 struct BitfieldOp {
912 unsigned LSB;
913 unsigned Width;
914 };
915
916 union {
917 struct CCOp CC;
918 struct VCCOp VCC;
919 struct CopOp Cop;
920 struct CoprocOptionOp CoprocOption;
921 struct MBOptOp MBOpt;
922 struct ISBOptOp ISBOpt;
923 struct TSBOptOp TSBOpt;
924 struct ITMaskOp ITMask;
925 struct IFlagsOp IFlags;
926 struct MMaskOp MMask;
927 struct BankedRegOp BankedReg;
928 struct TokOp Tok;
929 struct RegOp Reg;
930 struct VectorListOp VectorList;
931 struct VectorIndexOp VectorIndex;
932 struct ImmOp Imm;
933 struct MemoryOp Memory;
934 struct PostIdxRegOp PostIdxReg;
935 struct ShifterImmOp ShifterImm;
936 struct RegShiftedRegOp RegShiftedReg;
937 struct RegShiftedImmOp RegShiftedImm;
938 struct RotImmOp RotImm;
939 struct ModImmOp ModImm;
940 struct BitfieldOp Bitfield;
941 };
942
943public:
944 ARMOperand(KindTy K) : Kind(K) {}
945
946 /// getStartLoc - Get the location of the first token of this operand.
947 SMLoc getStartLoc() const override { return StartLoc; }
948
949 /// getEndLoc - Get the location of the last token of this operand.
950 SMLoc getEndLoc() const override { return EndLoc; }
951
952 /// getLocRange - Get the range between the first and last token of this
953 /// operand.
954 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
955
956 /// getAlignmentLoc - Get the location of the Alignment token of this operand.
957 SMLoc getAlignmentLoc() const {
958 assert(Kind == k_Memory && "Invalid access!");
959 return AlignmentLoc;
960 }
961
963 assert(Kind == k_CondCode && "Invalid access!");
964 return CC.Val;
965 }
966
967 ARMVCC::VPTCodes getVPTPred() const {
968 assert(isVPTPred() && "Invalid access!");
969 return VCC.Val;
970 }
971
972 unsigned getCoproc() const {
973 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
974 return Cop.Val;
975 }
976
977 StringRef getToken() const {
978 assert(Kind == k_Token && "Invalid access!");
979 return StringRef(Tok.Data, Tok.Length);
980 }
981
982 unsigned getReg() const override {
983 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
984 return Reg.RegNum;
985 }
986
987 const SmallVectorImpl<unsigned> &getRegList() const {
988 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
989 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
990 Kind == k_FPSRegisterListWithVPR ||
991 Kind == k_FPDRegisterListWithVPR) &&
992 "Invalid access!");
993 return Registers;
994 }
995
996 const MCExpr *getImm() const {
997 assert(isImm() && "Invalid access!");
998 return Imm.Val;
999 }
1000
1001 const MCExpr *getConstantPoolImm() const {
1002 assert(isConstantPoolImm() && "Invalid access!");
1003 return Imm.Val;
1004 }
1005
1006 unsigned getVectorIndex() const {
1007 assert(Kind == k_VectorIndex && "Invalid access!");
1008 return VectorIndex.Val;
1009 }
1010
1011 ARM_MB::MemBOpt getMemBarrierOpt() const {
1012 assert(Kind == k_MemBarrierOpt && "Invalid access!");
1013 return MBOpt.Val;
1014 }
1015
1016 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
1017 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
1018 return ISBOpt.Val;
1019 }
1020
1021 ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1022 assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1023 return TSBOpt.Val;
1024 }
1025
1026 ARM_PROC::IFlags getProcIFlags() const {
1027 assert(Kind == k_ProcIFlags && "Invalid access!");
1028 return IFlags.Val;
1029 }
1030
1031 unsigned getMSRMask() const {
1032 assert(Kind == k_MSRMask && "Invalid access!");
1033 return MMask.Val;
1034 }
1035
1036 unsigned getBankedReg() const {
1037 assert(Kind == k_BankedReg && "Invalid access!");
1038 return BankedReg.Val;
1039 }
1040
1041 bool isCoprocNum() const { return Kind == k_CoprocNum; }
1042 bool isCoprocReg() const { return Kind == k_CoprocReg; }
1043 bool isCoprocOption() const { return Kind == k_CoprocOption; }
1044 bool isCondCode() const { return Kind == k_CondCode; }
1045 bool isVPTPred() const { return Kind == k_VPTPred; }
1046 bool isCCOut() const { return Kind == k_CCOut; }
1047 bool isITMask() const { return Kind == k_ITCondMask; }
1048 bool isITCondCode() const { return Kind == k_CondCode; }
1049 bool isImm() const override {
1050 return Kind == k_Immediate;
1051 }
1052
1053 bool isARMBranchTarget() const {
1054 if (!isImm()) return false;
1055
1056 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1057 return CE->getValue() % 4 == 0;
1058 return true;
1059 }
1060
1061
1062 bool isThumbBranchTarget() const {
1063 if (!isImm()) return false;
1064
1065 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1066 return CE->getValue() % 2 == 0;
1067 return true;
1068 }
1069
1070 // checks whether this operand is an unsigned offset which fits is a field
1071 // of specified width and scaled by a specific number of bits
1072 template<unsigned width, unsigned scale>
1073 bool isUnsignedOffset() const {
1074 if (!isImm()) return false;
1075 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1076 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1077 int64_t Val = CE->getValue();
1078 int64_t Align = 1LL << scale;
1079 int64_t Max = Align * ((1LL << width) - 1);
1080 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1081 }
1082 return false;
1083 }
1084
1085 // checks whether this operand is an signed offset which fits is a field
1086 // of specified width and scaled by a specific number of bits
1087 template<unsigned width, unsigned scale>
1088 bool isSignedOffset() const {
1089 if (!isImm()) return false;
1090 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1091 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1092 int64_t Val = CE->getValue();
1093 int64_t Align = 1LL << scale;
1094 int64_t Max = Align * ((1LL << (width-1)) - 1);
1095 int64_t Min = -Align * (1LL << (width-1));
1096 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1097 }
1098 return false;
1099 }
1100
1101 // checks whether this operand is an offset suitable for the LE /
1102 // LETP instructions in Arm v8.1M
1103 bool isLEOffset() const {
1104 if (!isImm()) return false;
1105 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1106 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1107 int64_t Val = CE->getValue();
1108 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1109 }
1110 return false;
1111 }
1112
1113 // checks whether this operand is a memory operand computed as an offset
1114 // applied to PC. the offset may have 8 bits of magnitude and is represented
1115 // with two bits of shift. textually it may be either [pc, #imm], #imm or
1116 // relocable expression...
1117 bool isThumbMemPC() const {
1118 int64_t Val = 0;
1119 if (isImm()) {
1120 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1121 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1122 if (!CE) return false;
1123 Val = CE->getValue();
1124 }
1125 else if (isGPRMem()) {
1126 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1127 if(Memory.BaseRegNum != ARM::PC) return false;
1128 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1129 Val = CE->getValue();
1130 else
1131 return false;
1132 }
1133 else return false;
1134 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1135 }
1136
1137 bool isFPImm() const {
1138 if (!isImm()) return false;
1139 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1140 if (!CE) return false;
1141 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1142 return Val != -1;
1143 }
1144
1145 template<int64_t N, int64_t M>
1146 bool isImmediate() const {
1147 if (!isImm()) return false;
1148 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1149 if (!CE) return false;
1150 int64_t Value = CE->getValue();
1151 return Value >= N && Value <= M;
1152 }
1153
1154 template<int64_t N, int64_t M>
1155 bool isImmediateS4() const {
1156 if (!isImm()) return false;
1157 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1158 if (!CE) return false;
1159 int64_t Value = CE->getValue();
1160 return ((Value & 3) == 0) && Value >= N && Value <= M;
1161 }
1162 template<int64_t N, int64_t M>
1163 bool isImmediateS2() const {
1164 if (!isImm()) return false;
1165 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1166 if (!CE) return false;
1167 int64_t Value = CE->getValue();
1168 return ((Value & 1) == 0) && Value >= N && Value <= M;
1169 }
1170 bool isFBits16() const {
1171 return isImmediate<0, 17>();
1172 }
1173 bool isFBits32() const {
1174 return isImmediate<1, 33>();
1175 }
1176 bool isImm8s4() const {
1177 return isImmediateS4<-1020, 1020>();
1178 }
1179 bool isImm7s4() const {
1180 return isImmediateS4<-508, 508>();
1181 }
1182 bool isImm7Shift0() const {
1183 return isImmediate<-127, 127>();
1184 }
1185 bool isImm7Shift1() const {
1186 return isImmediateS2<-255, 255>();
1187 }
1188 bool isImm7Shift2() const {
1189 return isImmediateS4<-511, 511>();
1190 }
1191 bool isImm7() const {
1192 return isImmediate<-127, 127>();
1193 }
1194 bool isImm0_1020s4() const {
1195 return isImmediateS4<0, 1020>();
1196 }
1197 bool isImm0_508s4() const {
1198 return isImmediateS4<0, 508>();
1199 }
1200 bool isImm0_508s4Neg() const {
1201 if (!isImm()) return false;
1202 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1203 if (!CE) return false;
1204 int64_t Value = -CE->getValue();
1205 // explicitly exclude zero. we want that to use the normal 0_508 version.
1206 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1207 }
1208
1209 bool isImm0_4095Neg() const {
1210 if (!isImm()) return false;
1211 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1212 if (!CE) return false;
1213 // isImm0_4095Neg is used with 32-bit immediates only.
1214 // 32-bit immediates are zero extended to 64-bit when parsed,
1215 // thus simple -CE->getValue() results in a big negative number,
1216 // not a small positive number as intended
1217 if ((CE->getValue() >> 32) > 0) return false;
1218 uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1219 return Value > 0 && Value < 4096;
1220 }
1221
1222 bool isImm0_7() const {
1223 return isImmediate<0, 7>();
1224 }
1225
1226 bool isImm1_16() const {
1227 return isImmediate<1, 16>();
1228 }
1229
1230 bool isImm1_32() const {
1231 return isImmediate<1, 32>();
1232 }
1233
1234 bool isImm8_255() const {
1235 return isImmediate<8, 255>();
1236 }
1237
1238 bool isImm0_255Expr() const {
1239 if (!isImm())
1240 return false;
1241 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1242 // If it's not a constant expression, it'll generate a fixup and be
1243 // handled later.
1244 if (!CE)
1245 return true;
1246 int64_t Value = CE->getValue();
1247 return isUInt<8>(Value);
1248 }
1249
1250 bool isImm256_65535Expr() const {
1251 if (!isImm()) return false;
1252 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1253 // If it's not a constant expression, it'll generate a fixup and be
1254 // handled later.
1255 if (!CE) return true;
1256 int64_t Value = CE->getValue();
1257 return Value >= 256 && Value < 65536;
1258 }
1259
1260 bool isImm0_65535Expr() const {
1261 if (!isImm()) return false;
1262 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1263 // If it's not a constant expression, it'll generate a fixup and be
1264 // handled later.
1265 if (!CE) return true;
1266 int64_t Value = CE->getValue();
1267 return Value >= 0 && Value < 65536;
1268 }
1269
1270 bool isImm24bit() const {
1271 return isImmediate<0, 0xffffff + 1>();
1272 }
1273
1274 bool isImmThumbSR() const {
1275 return isImmediate<1, 33>();
1276 }
1277
1278 bool isPKHLSLImm() const {
1279 return isImmediate<0, 32>();
1280 }
1281
1282 bool isPKHASRImm() const {
1283 return isImmediate<0, 33>();
1284 }
1285
1286 bool isAdrLabel() const {
1287 // If we have an immediate that's not a constant, treat it as a label
1288 // reference needing a fixup.
1289 if (isImm() && !isa<MCConstantExpr>(getImm()))
1290 return true;
1291
1292 // If it is a constant, it must fit into a modified immediate encoding.
1293 if (!isImm()) return false;
1294 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1295 if (!CE) return false;
1296 int64_t Value = CE->getValue();
1297 return (ARM_AM::getSOImmVal(Value) != -1 ||
1298 ARM_AM::getSOImmVal(-Value) != -1);
1299 }
1300
1301 bool isT2SOImm() const {
1302 // If we have an immediate that's not a constant, treat it as an expression
1303 // needing a fixup.
1304 if (isImm() && !isa<MCConstantExpr>(getImm())) {
1305 // We want to avoid matching :upper16: and :lower16: as we want these
1306 // expressions to match in isImm0_65535Expr()
1307 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1308 return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1309 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1310 }
1311 if (!isImm()) return false;
1312 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1313 if (!CE) return false;
1314 int64_t Value = CE->getValue();
1315 return ARM_AM::getT2SOImmVal(Value) != -1;
1316 }
1317
1318 bool isT2SOImmNot() const {
1319 if (!isImm()) return false;
1320 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1321 if (!CE) return false;
1322 int64_t Value = CE->getValue();
1323 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1325 }
1326
1327 bool isT2SOImmNeg() const {
1328 if (!isImm()) return false;
1329 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1330 if (!CE) return false;
1331 int64_t Value = CE->getValue();
1332 // Only use this when not representable as a plain so_imm.
1333 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1335 }
1336
1337 bool isSetEndImm() const {
1338 if (!isImm()) return false;
1339 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1340 if (!CE) return false;
1341 int64_t Value = CE->getValue();
1342 return Value == 1 || Value == 0;
1343 }
1344
1345 bool isReg() const override { return Kind == k_Register; }
1346 bool isRegList() const { return Kind == k_RegisterList; }
1347 bool isRegListWithAPSR() const {
1348 return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1349 }
1350 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1351 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1352 bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1353 bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1354 bool isToken() const override { return Kind == k_Token; }
1355 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1356 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1357 bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1358 bool isMem() const override {
1359 return isGPRMem() || isMVEMem();
1360 }
1361 bool isMVEMem() const {
1362 if (Kind != k_Memory)
1363 return false;
1364 if (Memory.BaseRegNum &&
1365 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1366 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1367 return false;
1368 if (Memory.OffsetRegNum &&
1369 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1370 Memory.OffsetRegNum))
1371 return false;
1372 return true;
1373 }
1374 bool isGPRMem() const {
1375 if (Kind != k_Memory)
1376 return false;
1377 if (Memory.BaseRegNum &&
1378 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1379 return false;
1380 if (Memory.OffsetRegNum &&
1381 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1382 return false;
1383 return true;
1384 }
1385 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1386 bool isRegShiftedReg() const {
1387 return Kind == k_ShiftedRegister &&
1388 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1389 RegShiftedReg.SrcReg) &&
1390 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1391 RegShiftedReg.ShiftReg);
1392 }
1393 bool isRegShiftedImm() const {
1394 return Kind == k_ShiftedImmediate &&
1395 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1396 RegShiftedImm.SrcReg);
1397 }
1398 bool isRotImm() const { return Kind == k_RotateImmediate; }
1399
1400 template<unsigned Min, unsigned Max>
1401 bool isPowerTwoInRange() const {
1402 if (!isImm()) return false;
1403 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1404 if (!CE) return false;
1405 int64_t Value = CE->getValue();
1406 return Value > 0 && llvm::popcount((uint64_t)Value) == 1 && Value >= Min &&
1407 Value <= Max;
1408 }
1409 bool isModImm() const { return Kind == k_ModifiedImmediate; }
1410
1411 bool isModImmNot() const {
1412 if (!isImm()) return false;
1413 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1414 if (!CE) return false;
1415 int64_t Value = CE->getValue();
1416 return ARM_AM::getSOImmVal(~Value) != -1;
1417 }
1418
1419 bool isModImmNeg() const {
1420 if (!isImm()) return false;
1421 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1422 if (!CE) return false;
1423 int64_t Value = CE->getValue();
1424 return ARM_AM::getSOImmVal(Value) == -1 &&
1425 ARM_AM::getSOImmVal(-Value) != -1;
1426 }
1427
1428 bool isThumbModImmNeg1_7() const {
1429 if (!isImm()) return false;
1430 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1431 if (!CE) return false;
1432 int32_t Value = -(int32_t)CE->getValue();
1433 return 0 < Value && Value < 8;
1434 }
1435
1436 bool isThumbModImmNeg8_255() const {
1437 if (!isImm()) return false;
1438 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1439 if (!CE) return false;
1440 int32_t Value = -(int32_t)CE->getValue();
1441 return 7 < Value && Value < 256;
1442 }
1443
1444 bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1445 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1446 bool isPostIdxRegShifted() const {
1447 return Kind == k_PostIndexRegister &&
1448 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1449 }
1450 bool isPostIdxReg() const {
1451 return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1452 }
1453 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1454 if (!isGPRMem())
1455 return false;
1456 // No offset of any kind.
1457 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1458 (alignOK || Memory.Alignment == Alignment);
1459 }
1460 bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1461 if (!isGPRMem())
1462 return false;
1463
1464 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1465 Memory.BaseRegNum))
1466 return false;
1467
1468 // No offset of any kind.
1469 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1470 (alignOK || Memory.Alignment == Alignment);
1471 }
1472 bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1473 if (!isGPRMem())
1474 return false;
1475
1476 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1477 Memory.BaseRegNum))
1478 return false;
1479
1480 // No offset of any kind.
1481 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1482 (alignOK || Memory.Alignment == Alignment);
1483 }
1484 bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1485 if (!isGPRMem())
1486 return false;
1487
1488 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1489 Memory.BaseRegNum))
1490 return false;
1491
1492 // No offset of any kind.
1493 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1494 (alignOK || Memory.Alignment == Alignment);
1495 }
1496 bool isMemPCRelImm12() const {
1497 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1498 return false;
1499 // Base register must be PC.
1500 if (Memory.BaseRegNum != ARM::PC)
1501 return false;
1502 // Immediate offset in range [-4095, 4095].
1503 if (!Memory.OffsetImm) return true;
1504 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1505 int64_t Val = CE->getValue();
1506 return (Val > -4096 && Val < 4096) ||
1507 (Val == std::numeric_limits<int32_t>::min());
1508 }
1509 return false;
1510 }
1511
1512 bool isAlignedMemory() const {
1513 return isMemNoOffset(true);
1514 }
1515
1516 bool isAlignedMemoryNone() const {
1517 return isMemNoOffset(false, 0);
1518 }
1519
1520 bool isDupAlignedMemoryNone() const {
1521 return isMemNoOffset(false, 0);
1522 }
1523
1524 bool isAlignedMemory16() const {
1525 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1526 return true;
1527 return isMemNoOffset(false, 0);
1528 }
1529
1530 bool isDupAlignedMemory16() const {
1531 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1532 return true;
1533 return isMemNoOffset(false, 0);
1534 }
1535
1536 bool isAlignedMemory32() const {
1537 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1538 return true;
1539 return isMemNoOffset(false, 0);
1540 }
1541
1542 bool isDupAlignedMemory32() const {
1543 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1544 return true;
1545 return isMemNoOffset(false, 0);
1546 }
1547
1548 bool isAlignedMemory64() const {
1549 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1550 return true;
1551 return isMemNoOffset(false, 0);
1552 }
1553
1554 bool isDupAlignedMemory64() const {
1555 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1556 return true;
1557 return isMemNoOffset(false, 0);
1558 }
1559
1560 bool isAlignedMemory64or128() const {
1561 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1562 return true;
1563 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1564 return true;
1565 return isMemNoOffset(false, 0);
1566 }
1567
1568 bool isDupAlignedMemory64or128() const {
1569 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1570 return true;
1571 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1572 return true;
1573 return isMemNoOffset(false, 0);
1574 }
1575
1576 bool isAlignedMemory64or128or256() const {
1577 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1578 return true;
1579 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1580 return true;
1581 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1582 return true;
1583 return isMemNoOffset(false, 0);
1584 }
1585
1586 bool isAddrMode2() const {
1587 if (!isGPRMem() || Memory.Alignment != 0) return false;
1588 // Check for register offset.
1589 if (Memory.OffsetRegNum) return true;
1590 // Immediate offset in range [-4095, 4095].
1591 if (!Memory.OffsetImm) return true;
1592 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1593 int64_t Val = CE->getValue();
1594 return Val > -4096 && Val < 4096;
1595 }
1596 return false;
1597 }
1598
1599 bool isAM2OffsetImm() const {
1600 if (!isImm()) return false;
1601 // Immediate offset in range [-4095, 4095].
1602 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1603 if (!CE) return false;
1604 int64_t Val = CE->getValue();
1605 return (Val == std::numeric_limits<int32_t>::min()) ||
1606 (Val > -4096 && Val < 4096);
1607 }
1608
1609 bool isAddrMode3() const {
1610 // If we have an immediate that's not a constant, treat it as a label
1611 // reference needing a fixup. If it is a constant, it's something else
1612 // and we reject it.
1613 if (isImm() && !isa<MCConstantExpr>(getImm()))
1614 return true;
1615 if (!isGPRMem() || Memory.Alignment != 0) return false;
1616 // No shifts are legal for AM3.
1617 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1618 // Check for register offset.
1619 if (Memory.OffsetRegNum) return true;
1620 // Immediate offset in range [-255, 255].
1621 if (!Memory.OffsetImm) return true;
1622 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1623 int64_t Val = CE->getValue();
1624 // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1625 // we have to check for this too.
1626 return (Val > -256 && Val < 256) ||
1627 Val == std::numeric_limits<int32_t>::min();
1628 }
1629 return false;
1630 }
1631
1632 bool isAM3Offset() const {
1633 if (isPostIdxReg())
1634 return true;
1635 if (!isImm())
1636 return false;
1637 // Immediate offset in range [-255, 255].
1638 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1639 if (!CE) return false;
1640 int64_t Val = CE->getValue();
1641 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1642 return (Val > -256 && Val < 256) ||
1643 Val == std::numeric_limits<int32_t>::min();
1644 }
1645
1646 bool isAddrMode5() const {
1647 // If we have an immediate that's not a constant, treat it as a label
1648 // reference needing a fixup. If it is a constant, it's something else
1649 // and we reject it.
1650 if (isImm() && !isa<MCConstantExpr>(getImm()))
1651 return true;
1652 if (!isGPRMem() || Memory.Alignment != 0) return false;
1653 // Check for register offset.
1654 if (Memory.OffsetRegNum) return false;
1655 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1656 if (!Memory.OffsetImm) return true;
1657 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1658 int64_t Val = CE->getValue();
1659 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1660 Val == std::numeric_limits<int32_t>::min();
1661 }
1662 return false;
1663 }
1664
1665 bool isAddrMode5FP16() const {
1666 // If we have an immediate that's not a constant, treat it as a label
1667 // reference needing a fixup. If it is a constant, it's something else
1668 // and we reject it.
1669 if (isImm() && !isa<MCConstantExpr>(getImm()))
1670 return true;
1671 if (!isGPRMem() || Memory.Alignment != 0) return false;
1672 // Check for register offset.
1673 if (Memory.OffsetRegNum) return false;
1674 // Immediate offset in range [-510, 510] and a multiple of 2.
1675 if (!Memory.OffsetImm) return true;
1676 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1677 int64_t Val = CE->getValue();
1678 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1679 Val == std::numeric_limits<int32_t>::min();
1680 }
1681 return false;
1682 }
1683
1684 bool isMemTBB() const {
1685 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1686 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1687 return false;
1688 return true;
1689 }
1690
1691 bool isMemTBH() const {
1692 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1693 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1694 Memory.Alignment != 0 )
1695 return false;
1696 return true;
1697 }
1698
1699 bool isMemRegOffset() const {
1700 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1701 return false;
1702 return true;
1703 }
1704
1705 bool isT2MemRegOffset() const {
1706 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1707 Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1708 return false;
1709 // Only lsl #{0, 1, 2, 3} allowed.
1710 if (Memory.ShiftType == ARM_AM::no_shift)
1711 return true;
1712 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1713 return false;
1714 return true;
1715 }
1716
1717 bool isMemThumbRR() const {
1718 // Thumb reg+reg addressing is simple. Just two registers, a base and
1719 // an offset. No shifts, negations or any other complicating factors.
1720 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1721 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1722 return false;
1723 return isARMLowRegister(Memory.BaseRegNum) &&
1724 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1725 }
1726
1727 bool isMemThumbRIs4() const {
1728 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1729 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1730 return false;
1731 // Immediate offset, multiple of 4 in range [0, 124].
1732 if (!Memory.OffsetImm) return true;
1733 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1734 int64_t Val = CE->getValue();
1735 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1736 }
1737 return false;
1738 }
1739
1740 bool isMemThumbRIs2() const {
1741 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1742 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1743 return false;
1744 // Immediate offset, multiple of 4 in range [0, 62].
1745 if (!Memory.OffsetImm) return true;
1746 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1747 int64_t Val = CE->getValue();
1748 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1749 }
1750 return false;
1751 }
1752
1753 bool isMemThumbRIs1() const {
1754 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1755 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1756 return false;
1757 // Immediate offset in range [0, 31].
1758 if (!Memory.OffsetImm) return true;
1759 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1760 int64_t Val = CE->getValue();
1761 return Val >= 0 && Val <= 31;
1762 }
1763 return false;
1764 }
1765
1766 bool isMemThumbSPI() const {
1767 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1768 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1769 return false;
1770 // Immediate offset, multiple of 4 in range [0, 1020].
1771 if (!Memory.OffsetImm) return true;
1772 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1773 int64_t Val = CE->getValue();
1774 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1775 }
1776 return false;
1777 }
1778
1779 bool isMemImm8s4Offset() const {
1780 // If we have an immediate that's not a constant, treat it as a label
1781 // reference needing a fixup. If it is a constant, it's something else
1782 // and we reject it.
1783 if (isImm() && !isa<MCConstantExpr>(getImm()))
1784 return true;
1785 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1786 return false;
1787 // Immediate offset a multiple of 4 in range [-1020, 1020].
1788 if (!Memory.OffsetImm) return true;
1789 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1790 int64_t Val = CE->getValue();
1791 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1792 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1793 Val == std::numeric_limits<int32_t>::min();
1794 }
1795 return false;
1796 }
1797
1798 bool isMemImm7s4Offset() const {
1799 // If we have an immediate that's not a constant, treat it as a label
1800 // reference needing a fixup. If it is a constant, it's something else
1801 // and we reject it.
1802 if (isImm() && !isa<MCConstantExpr>(getImm()))
1803 return true;
1804 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1805 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1806 Memory.BaseRegNum))
1807 return false;
1808 // Immediate offset a multiple of 4 in range [-508, 508].
1809 if (!Memory.OffsetImm) return true;
1810 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1811 int64_t Val = CE->getValue();
1812 // Special case, #-0 is INT32_MIN.
1813 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1814 }
1815 return false;
1816 }
1817
1818 bool isMemImm0_1020s4Offset() const {
1819 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1820 return false;
1821 // Immediate offset a multiple of 4 in range [0, 1020].
1822 if (!Memory.OffsetImm) return true;
1823 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1824 int64_t Val = CE->getValue();
1825 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1826 }
1827 return false;
1828 }
1829
1830 bool isMemImm8Offset() const {
1831 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1832 return false;
1833 // Base reg of PC isn't allowed for these encodings.
1834 if (Memory.BaseRegNum == ARM::PC) return false;
1835 // Immediate offset in range [-255, 255].
1836 if (!Memory.OffsetImm) return true;
1837 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1838 int64_t Val = CE->getValue();
1839 return (Val == std::numeric_limits<int32_t>::min()) ||
1840 (Val > -256 && Val < 256);
1841 }
1842 return false;
1843 }
1844
1845 template<unsigned Bits, unsigned RegClassID>
1846 bool isMemImm7ShiftedOffset() const {
1847 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1848 !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1849 return false;
1850
1851 // Expect an immediate offset equal to an element of the range
1852 // [-127, 127], shifted left by Bits.
1853
1854 if (!Memory.OffsetImm) return true;
1855 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1856 int64_t Val = CE->getValue();
1857
1858 // INT32_MIN is a special-case value (indicating the encoding with
1859 // zero offset and the subtract bit set)
1860 if (Val == INT32_MIN)
1861 return true;
1862
1863 unsigned Divisor = 1U << Bits;
1864
1865 // Check that the low bits are zero
1866 if (Val % Divisor != 0)
1867 return false;
1868
1869 // Check that the remaining offset is within range.
1870 Val /= Divisor;
1871 return (Val >= -127 && Val <= 127);
1872 }
1873 return false;
1874 }
1875
1876 template <int shift> bool isMemRegRQOffset() const {
1877 if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1878 return false;
1879
1880 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1881 Memory.BaseRegNum))
1882 return false;
1883 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1884 Memory.OffsetRegNum))
1885 return false;
1886
1887 if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1888 return false;
1889
1890 if (shift > 0 &&
1891 (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1892 return false;
1893
1894 return true;
1895 }
1896
1897 template <int shift> bool isMemRegQOffset() const {
1898 if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1899 return false;
1900
1901 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1902 Memory.BaseRegNum))
1903 return false;
1904
1905 if (!Memory.OffsetImm)
1906 return true;
1907 static_assert(shift < 56,
1908 "Such that we dont shift by a value higher than 62");
1909 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1910 int64_t Val = CE->getValue();
1911
1912 // The value must be a multiple of (1 << shift)
1913 if ((Val & ((1U << shift) - 1)) != 0)
1914 return false;
1915
1916 // And be in the right range, depending on the amount that it is shifted
1917 // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
1918 // separately.
1919 int64_t Range = (1U << (7 + shift)) - 1;
1920 return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1921 }
1922 return false;
1923 }
1924
1925 bool isMemPosImm8Offset() const {
1926 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1927 return false;
1928 // Immediate offset in range [0, 255].
1929 if (!Memory.OffsetImm) return true;
1930 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1931 int64_t Val = CE->getValue();
1932 return Val >= 0 && Val < 256;
1933 }
1934 return false;
1935 }
1936
1937 bool isMemNegImm8Offset() const {
1938 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1939 return false;
1940 // Base reg of PC isn't allowed for these encodings.
1941 if (Memory.BaseRegNum == ARM::PC) return false;
1942 // Immediate offset in range [-255, -1].
1943 if (!Memory.OffsetImm) return false;
1944 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1945 int64_t Val = CE->getValue();
1946 return (Val == std::numeric_limits<int32_t>::min()) ||
1947 (Val > -256 && Val < 0);
1948 }
1949 return false;
1950 }
1951
1952 bool isMemUImm12Offset() const {
1953 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1954 return false;
1955 // Immediate offset in range [0, 4095].
1956 if (!Memory.OffsetImm) return true;
1957 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1958 int64_t Val = CE->getValue();
1959 return (Val >= 0 && Val < 4096);
1960 }
1961 return false;
1962 }
1963
1964 bool isMemImm12Offset() const {
1965 // If we have an immediate that's not a constant, treat it as a label
1966 // reference needing a fixup. If it is a constant, it's something else
1967 // and we reject it.
1968
1969 if (isImm() && !isa<MCConstantExpr>(getImm()))
1970 return true;
1971
1972 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1973 return false;
1974 // Immediate offset in range [-4095, 4095].
1975 if (!Memory.OffsetImm) return true;
1976 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1977 int64_t Val = CE->getValue();
1978 return (Val > -4096 && Val < 4096) ||
1979 (Val == std::numeric_limits<int32_t>::min());
1980 }
1981 // If we have an immediate that's not a constant, treat it as a
1982 // symbolic expression needing a fixup.
1983 return true;
1984 }
1985
1986 bool isConstPoolAsmImm() const {
1987 // Delay processing of Constant Pool Immediate, this will turn into
1988 // a constant. Match no other operand
1989 return (isConstantPoolImm());
1990 }
1991
1992 bool isPostIdxImm8() const {
1993 if (!isImm()) return false;
1994 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1995 if (!CE) return false;
1996 int64_t Val = CE->getValue();
1997 return (Val > -256 && Val < 256) ||
1998 (Val == std::numeric_limits<int32_t>::min());
1999 }
2000
2001 bool isPostIdxImm8s4() const {
2002 if (!isImm()) return false;
2003 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2004 if (!CE) return false;
2005 int64_t Val = CE->getValue();
2006 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2007 (Val == std::numeric_limits<int32_t>::min());
2008 }
2009
2010 bool isMSRMask() const { return Kind == k_MSRMask; }
2011 bool isBankedReg() const { return Kind == k_BankedReg; }
2012 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2013
2014 // NEON operands.
2015 bool isSingleSpacedVectorList() const {
2016 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2017 }
2018
2019 bool isDoubleSpacedVectorList() const {
2020 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2021 }
2022
2023 bool isVecListOneD() const {
2024 if (!isSingleSpacedVectorList()) return false;
2025 return VectorList.Count == 1;
2026 }
2027
2028 bool isVecListTwoMQ() const {
2029 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2030 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2031 VectorList.RegNum);
2032 }
2033
2034 bool isVecListDPair() const {
2035 if (!isSingleSpacedVectorList()) return false;
2036 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2037 .contains(VectorList.RegNum));
2038 }
2039
2040 bool isVecListThreeD() const {
2041 if (!isSingleSpacedVectorList()) return false;
2042 return VectorList.Count == 3;
2043 }
2044
2045 bool isVecListFourD() const {
2046 if (!isSingleSpacedVectorList()) return false;
2047 return VectorList.Count == 4;
2048 }
2049
2050 bool isVecListDPairSpaced() const {
2051 if (Kind != k_VectorList) return false;
2052 if (isSingleSpacedVectorList()) return false;
2053 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2054 .contains(VectorList.RegNum));
2055 }
2056
2057 bool isVecListThreeQ() const {
2058 if (!isDoubleSpacedVectorList()) return false;
2059 return VectorList.Count == 3;
2060 }
2061
2062 bool isVecListFourQ() const {
2063 if (!isDoubleSpacedVectorList()) return false;
2064 return VectorList.Count == 4;
2065 }
2066
2067 bool isVecListFourMQ() const {
2068 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2069 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2070 VectorList.RegNum);
2071 }
2072
2073 bool isSingleSpacedVectorAllLanes() const {
2074 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2075 }
2076
2077 bool isDoubleSpacedVectorAllLanes() const {
2078 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2079 }
2080
2081 bool isVecListOneDAllLanes() const {
2082 if (!isSingleSpacedVectorAllLanes()) return false;
2083 return VectorList.Count == 1;
2084 }
2085
2086 bool isVecListDPairAllLanes() const {
2087 if (!isSingleSpacedVectorAllLanes()) return false;
2088 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2089 .contains(VectorList.RegNum));
2090 }
2091
2092 bool isVecListDPairSpacedAllLanes() const {
2093 if (!isDoubleSpacedVectorAllLanes()) return false;
2094 return VectorList.Count == 2;
2095 }
2096
2097 bool isVecListThreeDAllLanes() const {
2098 if (!isSingleSpacedVectorAllLanes()) return false;
2099 return VectorList.Count == 3;
2100 }
2101
2102 bool isVecListThreeQAllLanes() const {
2103 if (!isDoubleSpacedVectorAllLanes()) return false;
2104 return VectorList.Count == 3;
2105 }
2106
2107 bool isVecListFourDAllLanes() const {
2108 if (!isSingleSpacedVectorAllLanes()) return false;
2109 return VectorList.Count == 4;
2110 }
2111
2112 bool isVecListFourQAllLanes() const {
2113 if (!isDoubleSpacedVectorAllLanes()) return false;
2114 return VectorList.Count == 4;
2115 }
2116
2117 bool isSingleSpacedVectorIndexed() const {
2118 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2119 }
2120
2121 bool isDoubleSpacedVectorIndexed() const {
2122 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2123 }
2124
2125 bool isVecListOneDByteIndexed() const {
2126 if (!isSingleSpacedVectorIndexed()) return false;
2127 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2128 }
2129
2130 bool isVecListOneDHWordIndexed() const {
2131 if (!isSingleSpacedVectorIndexed()) return false;
2132 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2133 }
2134
2135 bool isVecListOneDWordIndexed() const {
2136 if (!isSingleSpacedVectorIndexed()) return false;
2137 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2138 }
2139
2140 bool isVecListTwoDByteIndexed() const {
2141 if (!isSingleSpacedVectorIndexed()) return false;
2142 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2143 }
2144
2145 bool isVecListTwoDHWordIndexed() const {
2146 if (!isSingleSpacedVectorIndexed()) return false;
2147 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2148 }
2149
2150 bool isVecListTwoQWordIndexed() const {
2151 if (!isDoubleSpacedVectorIndexed()) return false;
2152 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2153 }
2154
2155 bool isVecListTwoQHWordIndexed() const {
2156 if (!isDoubleSpacedVectorIndexed()) return false;
2157 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2158 }
2159
2160 bool isVecListTwoDWordIndexed() const {
2161 if (!isSingleSpacedVectorIndexed()) return false;
2162 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2163 }
2164
2165 bool isVecListThreeDByteIndexed() const {
2166 if (!isSingleSpacedVectorIndexed()) return false;
2167 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2168 }
2169
2170 bool isVecListThreeDHWordIndexed() const {
2171 if (!isSingleSpacedVectorIndexed()) return false;
2172 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2173 }
2174
2175 bool isVecListThreeQWordIndexed() const {
2176 if (!isDoubleSpacedVectorIndexed()) return false;
2177 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2178 }
2179
2180 bool isVecListThreeQHWordIndexed() const {
2181 if (!isDoubleSpacedVectorIndexed()) return false;
2182 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2183 }
2184
2185 bool isVecListThreeDWordIndexed() const {
2186 if (!isSingleSpacedVectorIndexed()) return false;
2187 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2188 }
2189
2190 bool isVecListFourDByteIndexed() const {
2191 if (!isSingleSpacedVectorIndexed()) return false;
2192 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2193 }
2194
2195 bool isVecListFourDHWordIndexed() const {
2196 if (!isSingleSpacedVectorIndexed()) return false;
2197 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2198 }
2199
2200 bool isVecListFourQWordIndexed() const {
2201 if (!isDoubleSpacedVectorIndexed()) return false;
2202 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2203 }
2204
2205 bool isVecListFourQHWordIndexed() const {
2206 if (!isDoubleSpacedVectorIndexed()) return false;
2207 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2208 }
2209
2210 bool isVecListFourDWordIndexed() const {
2211 if (!isSingleSpacedVectorIndexed()) return false;
2212 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2213 }
2214
2215 bool isVectorIndex() const { return Kind == k_VectorIndex; }
2216
2217 template <unsigned NumLanes>
2218 bool isVectorIndexInRange() const {
2219 if (Kind != k_VectorIndex) return false;
2220 return VectorIndex.Val < NumLanes;
2221 }
2222
2223 bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
2224 bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2225 bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2226 bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2227
2228 template<int PermittedValue, int OtherPermittedValue>
2229 bool isMVEPairVectorIndex() const {
2230 if (Kind != k_VectorIndex) return false;
2231 return VectorIndex.Val == PermittedValue ||
2232 VectorIndex.Val == OtherPermittedValue;
2233 }
2234
2235 bool isNEONi8splat() const {
2236 if (!isImm()) return false;
2237 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2238 // Must be a constant.
2239 if (!CE) return false;
2240 int64_t Value = CE->getValue();
2241 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2242 // value.
2243 return Value >= 0 && Value < 256;
2244 }
2245
2246 bool isNEONi16splat() const {
2247 if (isNEONByteReplicate(2))
2248 return false; // Leave that for bytes replication and forbid by default.
2249 if (!isImm())
2250 return false;
2251 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2252 // Must be a constant.
2253 if (!CE) return false;
2254 unsigned Value = CE->getValue();
2256 }
2257
2258 bool isNEONi16splatNot() const {
2259 if (!isImm())
2260 return false;
2261 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2262 // Must be a constant.
2263 if (!CE) return false;
2264 unsigned Value = CE->getValue();
2265 return ARM_AM::isNEONi16splat(~Value & 0xffff);
2266 }
2267
2268 bool isNEONi32splat() const {
2269 if (isNEONByteReplicate(4))
2270 return false; // Leave that for bytes replication and forbid by default.
2271 if (!isImm())
2272 return false;
2273 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2274 // Must be a constant.
2275 if (!CE) return false;
2276 unsigned Value = CE->getValue();
2278 }
2279
2280 bool isNEONi32splatNot() const {
2281 if (!isImm())
2282 return false;
2283 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2284 // Must be a constant.
2285 if (!CE) return false;
2286 unsigned Value = CE->getValue();
2288 }
2289
2290 static bool isValidNEONi32vmovImm(int64_t Value) {
2291 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2292 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2293 return ((Value & 0xffffffffffffff00) == 0) ||
2294 ((Value & 0xffffffffffff00ff) == 0) ||
2295 ((Value & 0xffffffffff00ffff) == 0) ||
2296 ((Value & 0xffffffff00ffffff) == 0) ||
2297 ((Value & 0xffffffffffff00ff) == 0xff) ||
2298 ((Value & 0xffffffffff00ffff) == 0xffff);
2299 }
2300
2301 bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2302 assert((Width == 8 || Width == 16 || Width == 32) &&
2303 "Invalid element width");
2304 assert(NumElems * Width <= 64 && "Invalid result width");
2305
2306 if (!isImm())
2307 return false;
2308 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2309 // Must be a constant.
2310 if (!CE)
2311 return false;
2312 int64_t Value = CE->getValue();
2313 if (!Value)
2314 return false; // Don't bother with zero.
2315 if (Inv)
2316 Value = ~Value;
2317
2318 uint64_t Mask = (1ull << Width) - 1;
2319 uint64_t Elem = Value & Mask;
2320 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2321 return false;
2322 if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2323 return false;
2324
2325 for (unsigned i = 1; i < NumElems; ++i) {
2326 Value >>= Width;
2327 if ((Value & Mask) != Elem)
2328 return false;
2329 }
2330 return true;
2331 }
2332
2333 bool isNEONByteReplicate(unsigned NumBytes) const {
2334 return isNEONReplicate(8, NumBytes, false);
2335 }
2336
2337 static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2338 assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2339 "Invalid source width");
2340 assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2341 "Invalid destination width");
2342 assert(FromW < ToW && "ToW is not less than FromW");
2343 }
2344
2345 template<unsigned FromW, unsigned ToW>
2346 bool isNEONmovReplicate() const {
2347 checkNeonReplicateArgs(FromW, ToW);
2348 if (ToW == 64 && isNEONi64splat())
2349 return false;
2350 return isNEONReplicate(FromW, ToW / FromW, false);
2351 }
2352
2353 template<unsigned FromW, unsigned ToW>
2354 bool isNEONinvReplicate() const {
2355 checkNeonReplicateArgs(FromW, ToW);
2356 return isNEONReplicate(FromW, ToW / FromW, true);
2357 }
2358
2359 bool isNEONi32vmov() const {
2360 if (isNEONByteReplicate(4))
2361 return false; // Let it to be classified as byte-replicate case.
2362 if (!isImm())
2363 return false;
2364 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2365 // Must be a constant.
2366 if (!CE)
2367 return false;
2368 return isValidNEONi32vmovImm(CE->getValue());
2369 }
2370
2371 bool isNEONi32vmovNeg() const {
2372 if (!isImm()) return false;
2373 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2374 // Must be a constant.
2375 if (!CE) return false;
2376 return isValidNEONi32vmovImm(~CE->getValue());
2377 }
2378
2379 bool isNEONi64splat() const {
2380 if (!isImm()) return false;
2381 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2382 // Must be a constant.
2383 if (!CE) return false;
2384 uint64_t Value = CE->getValue();
2385 // i64 value with each byte being either 0 or 0xff.
2386 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2387 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2388 return true;
2389 }
2390
2391 template<int64_t Angle, int64_t Remainder>
2392 bool isComplexRotation() const {
2393 if (!isImm()) return false;
2394
2395 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2396 if (!CE) return false;
2397 uint64_t Value = CE->getValue();
2398
2399 return (Value % Angle == Remainder && Value <= 270);
2400 }
2401
2402 bool isMVELongShift() const {
2403 if (!isImm()) return false;
2404 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2405 // Must be a constant.
2406 if (!CE) return false;
2407 uint64_t Value = CE->getValue();
2408 return Value >= 1 && Value <= 32;
2409 }
2410
2411 bool isMveSaturateOp() const {
2412 if (!isImm()) return false;
2413 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2414 if (!CE) return false;
2415 uint64_t Value = CE->getValue();
2416 return Value == 48 || Value == 64;
2417 }
2418
2419 bool isITCondCodeNoAL() const {
2420 if (!isITCondCode()) return false;
2422 return CC != ARMCC::AL;
2423 }
2424
2425 bool isITCondCodeRestrictedI() const {
2426 if (!isITCondCode())
2427 return false;
2429 return CC == ARMCC::EQ || CC == ARMCC::NE;
2430 }
2431
2432 bool isITCondCodeRestrictedS() const {
2433 if (!isITCondCode())
2434 return false;
2436 return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2437 CC == ARMCC::GE;
2438 }
2439
2440 bool isITCondCodeRestrictedU() const {
2441 if (!isITCondCode())
2442 return false;
2444 return CC == ARMCC::HS || CC == ARMCC::HI;
2445 }
2446
2447 bool isITCondCodeRestrictedFP() const {
2448 if (!isITCondCode())
2449 return false;
2451 return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2452 CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2453 }
2454
2455 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2456 // Add as immediates when possible. Null MCExpr = 0.
2457 if (!Expr)
2459 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2460 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2461 else
2463 }
2464
2465 void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2466 assert(N == 1 && "Invalid number of operands!");
2467 addExpr(Inst, getImm());
2468 }
2469
2470 void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2471 assert(N == 1 && "Invalid number of operands!");
2472 addExpr(Inst, getImm());
2473 }
2474
2475 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2476 assert(N == 2 && "Invalid number of operands!");
2477 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2478 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2479 Inst.addOperand(MCOperand::createReg(RegNum));
2480 }
2481
2482 void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2483 assert(N == 3 && "Invalid number of operands!");
2484 Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2485 unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2486 Inst.addOperand(MCOperand::createReg(RegNum));
2488 }
2489
2490 void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2491 assert(N == 4 && "Invalid number of operands!");
2492 addVPTPredNOperands(Inst, N-1);
2493 unsigned RegNum;
2494 if (getVPTPred() == ARMVCC::None) {
2495 RegNum = 0;
2496 } else {
2497 unsigned NextOpIndex = Inst.getNumOperands();
2498 const MCInstrDesc &MCID =
2499 ARMDescs.Insts[ARM::INSTRUCTION_LIST_END - 1 - Inst.getOpcode()];
2500 int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2501 assert(TiedOp >= 0 &&
2502 "Inactive register in vpred_r is not tied to an output!");
2503 RegNum = Inst.getOperand(TiedOp).getReg();
2504 }
2505 Inst.addOperand(MCOperand::createReg(RegNum));
2506 }
2507
2508 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2509 assert(N == 1 && "Invalid number of operands!");
2510 Inst.addOperand(MCOperand::createImm(getCoproc()));
2511 }
2512
2513 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2514 assert(N == 1 && "Invalid number of operands!");
2515 Inst.addOperand(MCOperand::createImm(getCoproc()));
2516 }
2517
2518 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2519 assert(N == 1 && "Invalid number of operands!");
2520 Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2521 }
2522
2523 void addITMaskOperands(MCInst &Inst, unsigned N) const {
2524 assert(N == 1 && "Invalid number of operands!");
2525 Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2526 }
2527
2528 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2529 assert(N == 1 && "Invalid number of operands!");
2530 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2531 }
2532
2533 void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2534 assert(N == 1 && "Invalid number of operands!");
2536 }
2537
2538 void addCCOutOperands(MCInst &Inst, unsigned N) const {
2539 assert(N == 1 && "Invalid number of operands!");
2541 }
2542
2543 void addRegOperands(MCInst &Inst, unsigned N) const {
2544 assert(N == 1 && "Invalid number of operands!");
2546 }
2547
2548 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2549 assert(N == 3 && "Invalid number of operands!");
2550 assert(isRegShiftedReg() &&
2551 "addRegShiftedRegOperands() on non-RegShiftedReg!");
2552 Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2553 Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2555 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2556 }
2557
2558 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2559 assert(N == 2 && "Invalid number of operands!");
2560 assert(isRegShiftedImm() &&
2561 "addRegShiftedImmOperands() on non-RegShiftedImm!");
2562 Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2563 // Shift of #32 is encoded as 0 where permitted
2564 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2566 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2567 }
2568
2569 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2570 assert(N == 1 && "Invalid number of operands!");
2571 Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2572 ShifterImm.Imm));
2573 }
2574
2575 void addRegListOperands(MCInst &Inst, unsigned N) const {
2576 assert(N == 1 && "Invalid number of operands!");
2577 const SmallVectorImpl<unsigned> &RegList = getRegList();
2578 for (unsigned Reg : RegList)
2580 }
2581
2582 void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2583 assert(N == 1 && "Invalid number of operands!");
2584 const SmallVectorImpl<unsigned> &RegList = getRegList();
2585 for (unsigned Reg : RegList)
2587 }
2588
2589 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2590 addRegListOperands(Inst, N);
2591 }
2592
2593 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2594 addRegListOperands(Inst, N);
2595 }
2596
2597 void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2598 addRegListOperands(Inst, N);
2599 }
2600
2601 void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2602 addRegListOperands(Inst, N);
2603 }
2604
2605 void addRotImmOperands(MCInst &Inst, unsigned N) const {
2606 assert(N == 1 && "Invalid number of operands!");
2607 // Encoded as val>>3. The printer handles display as 8, 16, 24.
2608 Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2609 }
2610
2611 void addModImmOperands(MCInst &Inst, unsigned N) const {
2612 assert(N == 1 && "Invalid number of operands!");
2613
2614 // Support for fixups (MCFixup)
2615 if (isImm())
2616 return addImmOperands(Inst, N);
2617
2618 Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2619 }
2620
2621 void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2622 assert(N == 1 && "Invalid number of operands!");
2623 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2624 uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2626 }
2627
2628 void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2629 assert(N == 1 && "Invalid number of operands!");
2630 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2631 uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2633 }
2634
2635 void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2636 assert(N == 1 && "Invalid number of operands!");
2637 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2638 uint32_t Val = -CE->getValue();
2640 }
2641
2642 void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2643 assert(N == 1 && "Invalid number of operands!");
2644 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2645 uint32_t Val = -CE->getValue();
2647 }
2648
2649 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2650 assert(N == 1 && "Invalid number of operands!");
2651 // Munge the lsb/width into a bitfield mask.
2652 unsigned lsb = Bitfield.LSB;
2653 unsigned width = Bitfield.Width;
2654 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2655 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2656 (32 - (lsb + width)));
2657 Inst.addOperand(MCOperand::createImm(Mask));
2658 }
2659
2660 void addImmOperands(MCInst &Inst, unsigned N) const {
2661 assert(N == 1 && "Invalid number of operands!");
2662 addExpr(Inst, getImm());
2663 }
2664
2665 void addFBits16Operands(MCInst &Inst, unsigned N) const {
2666 assert(N == 1 && "Invalid number of operands!");
2667 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2668 Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2669 }
2670
2671 void addFBits32Operands(MCInst &Inst, unsigned N) const {
2672 assert(N == 1 && "Invalid number of operands!");
2673 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2674 Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2675 }
2676
2677 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2678 assert(N == 1 && "Invalid number of operands!");
2679 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2680 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2682 }
2683
2684 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2685 assert(N == 1 && "Invalid number of operands!");
2686 // FIXME: We really want to scale the value here, but the LDRD/STRD
2687 // instruction don't encode operands that way yet.
2688 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2689 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2690 }
2691
2692 void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2693 assert(N == 1 && "Invalid number of operands!");
2694 // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2695 // instruction don't encode operands that way yet.
2696 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2697 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2698 }
2699
2700 void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2701 assert(N == 1 && "Invalid number of operands!");
2702 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2703 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2704 }
2705
2706 void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2707 assert(N == 1 && "Invalid number of operands!");
2708 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2709 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2710 }
2711
2712 void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2713 assert(N == 1 && "Invalid number of operands!");
2714 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2715 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2716 }
2717
2718 void addImm7Operands(MCInst &Inst, unsigned N) const {
2719 assert(N == 1 && "Invalid number of operands!");
2720 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2721 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2722 }
2723
2724 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2725 assert(N == 1 && "Invalid number of operands!");
2726 // The immediate is scaled by four in the encoding and is stored
2727 // in the MCInst as such. Lop off the low two bits here.
2728 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2729 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2730 }
2731
2732 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2733 assert(N == 1 && "Invalid number of operands!");
2734 // The immediate is scaled by four in the encoding and is stored
2735 // in the MCInst as such. Lop off the low two bits here.
2736 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2737 Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2738 }
2739
2740 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2741 assert(N == 1 && "Invalid number of operands!");
2742 // The immediate is scaled by four in the encoding and is stored
2743 // in the MCInst as such. Lop off the low two bits here.
2744 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2745 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2746 }
2747
2748 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2749 assert(N == 1 && "Invalid number of operands!");
2750 // The constant encodes as the immediate-1, and we store in the instruction
2751 // the bits as encoded, so subtract off one here.
2752 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2753 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2754 }
2755
2756 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2757 assert(N == 1 && "Invalid number of operands!");
2758 // The constant encodes as the immediate-1, and we store in the instruction
2759 // the bits as encoded, so subtract off one here.
2760 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2761 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2762 }
2763
2764 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2765 assert(N == 1 && "Invalid number of operands!");
2766 // The constant encodes as the immediate, except for 32, which encodes as
2767 // zero.
2768 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2769 unsigned Imm = CE->getValue();
2770 Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2771 }
2772
2773 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2774 assert(N == 1 && "Invalid number of operands!");
2775 // An ASR value of 32 encodes as 0, so that's how we want to add it to
2776 // the instruction as well.
2777 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2778 int Val = CE->getValue();
2779 Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2780 }
2781
2782 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2783 assert(N == 1 && "Invalid number of operands!");
2784 // The operand is actually a t2_so_imm, but we have its bitwise
2785 // negation in the assembly source, so twiddle it here.
2786 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2787 Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2788 }
2789
2790 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2791 assert(N == 1 && "Invalid number of operands!");
2792 // The operand is actually a t2_so_imm, but we have its
2793 // negation in the assembly source, so twiddle it here.
2794 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2795 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2796 }
2797
2798 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2799 assert(N == 1 && "Invalid number of operands!");
2800 // The operand is actually an imm0_4095, but we have its
2801 // negation in the assembly source, so twiddle it here.
2802 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2803 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2804 }
2805
2806 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2807 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2808 Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2809 return;
2810 }
2811 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2813 }
2814
2815 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2816 assert(N == 1 && "Invalid number of operands!");
2817 if (isImm()) {
2818 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2819 if (CE) {
2820 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2821 return;
2822 }
2823 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2825 return;
2826 }
2827
2828 assert(isGPRMem() && "Unknown value type!");
2829 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2830 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2831 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2832 else
2833 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2834 }
2835
2836 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2837 assert(N == 1 && "Invalid number of operands!");
2838 Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2839 }
2840
2841 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2842 assert(N == 1 && "Invalid number of operands!");
2843 Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2844 }
2845
2846 void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2847 assert(N == 1 && "Invalid number of operands!");
2848 Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2849 }
2850
2851 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2852 assert(N == 1 && "Invalid number of operands!");
2853 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2854 }
2855
2856 void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2857 assert(N == 1 && "Invalid number of operands!");
2858 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2859 }
2860
2861 void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2862 assert(N == 1 && "Invalid number of operands!");
2863 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2864 }
2865
2866 void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2867 assert(N == 1 && "Invalid number of operands!");
2868 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2869 }
2870
2871 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2872 assert(N == 1 && "Invalid number of operands!");
2873 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2874 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2875 else
2876 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2877 }
2878
2879 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2880 assert(N == 1 && "Invalid number of operands!");
2881 assert(isImm() && "Not an immediate!");
2882
2883 // If we have an immediate that's not a constant, treat it as a label
2884 // reference needing a fixup.
2885 if (!isa<MCConstantExpr>(getImm())) {
2886 Inst.addOperand(MCOperand::createExpr(getImm()));
2887 return;
2888 }
2889
2890 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2891 int Val = CE->getValue();
2893 }
2894
2895 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2896 assert(N == 2 && "Invalid number of operands!");
2897 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2898 Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2899 }
2900
2901 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2902 addAlignedMemoryOperands(Inst, N);
2903 }
2904
2905 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2906 addAlignedMemoryOperands(Inst, N);
2907 }
2908
2909 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2910 addAlignedMemoryOperands(Inst, N);
2911 }
2912
2913 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2914 addAlignedMemoryOperands(Inst, N);
2915 }
2916
2917 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2918 addAlignedMemoryOperands(Inst, N);
2919 }
2920
2921 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2922 addAlignedMemoryOperands(Inst, N);
2923 }
2924
2925 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2926 addAlignedMemoryOperands(Inst, N);
2927 }
2928
2929 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2930 addAlignedMemoryOperands(Inst, N);
2931 }
2932
2933 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2934 addAlignedMemoryOperands(Inst, N);
2935 }
2936
2937 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2938 addAlignedMemoryOperands(Inst, N);
2939 }
2940
2941 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2942 addAlignedMemoryOperands(Inst, N);
2943 }
2944
2945 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2946 assert(N == 3 && "Invalid number of operands!");
2947 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2948 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2949 if (!Memory.OffsetRegNum) {
2950 if (!Memory.OffsetImm)
2952 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2953 int32_t Val = CE->getValue();
2955 // Special case for #-0
2956 if (Val == std::numeric_limits<int32_t>::min())
2957 Val = 0;
2958 if (Val < 0)
2959 Val = -Val;
2960 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2962 } else
2963 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2964 } else {
2965 // For register offset, we encode the shift type and negation flag
2966 // here.
2967 int32_t Val =
2969 Memory.ShiftImm, Memory.ShiftType);
2971 }
2972 }
2973
2974 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2975 assert(N == 2 && "Invalid number of operands!");
2976 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2977 assert(CE && "non-constant AM2OffsetImm operand!");
2978 int32_t Val = CE->getValue();
2980 // Special case for #-0
2981 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2982 if (Val < 0) Val = -Val;
2983 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2986 }
2987
2988 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2989 assert(N == 3 && "Invalid number of operands!");
2990 // If we have an immediate that's not a constant, treat it as a label
2991 // reference needing a fixup. If it is a constant, it's something else
2992 // and we reject it.
2993 if (isImm()) {
2994 Inst.addOperand(MCOperand::createExpr(getImm()));
2997 return;
2998 }
2999
3000 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3001 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3002 if (!Memory.OffsetRegNum) {
3003 if (!Memory.OffsetImm)
3005 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3006 int32_t Val = CE->getValue();
3008 // Special case for #-0
3009 if (Val == std::numeric_limits<int32_t>::min())
3010 Val = 0;
3011 if (Val < 0)
3012 Val = -Val;
3013 Val = ARM_AM::getAM3Opc(AddSub, Val);
3015 } else
3016 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3017 } else {
3018 // For register offset, we encode the shift type and negation flag
3019 // here.
3020 int32_t Val =
3023 }
3024 }
3025
3026 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3027 assert(N == 2 && "Invalid number of operands!");
3028 if (Kind == k_PostIndexRegister) {
3029 int32_t Val =
3030 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3031 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3033 return;
3034 }
3035
3036 // Constant offset.
3037 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3038 int32_t Val = CE->getValue();
3040 // Special case for #-0
3041 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3042 if (Val < 0) Val = -Val;
3043 Val = ARM_AM::getAM3Opc(AddSub, Val);
3046 }
3047
3048 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3049 assert(N == 2 && "Invalid number of operands!");
3050 // If we have an immediate that's not a constant, treat it as a label
3051 // reference needing a fixup. If it is a constant, it's something else
3052 // and we reject it.
3053 if (isImm()) {
3054 Inst.addOperand(MCOperand::createExpr(getImm()));
3056 return;
3057 }
3058
3059 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3060 if (!Memory.OffsetImm)
3062 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3063 // The lower two bits are always zero and as such are not encoded.
3064 int32_t Val = CE->getValue() / 4;
3066 // Special case for #-0
3067 if (Val == std::numeric_limits<int32_t>::min())
3068 Val = 0;
3069 if (Val < 0)
3070 Val = -Val;
3071 Val = ARM_AM::getAM5Opc(AddSub, Val);
3073 } else
3074 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3075 }
3076
3077 void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3078 assert(N == 2 && "Invalid number of operands!");
3079 // If we have an immediate that's not a constant, treat it as a label
3080 // reference needing a fixup. If it is a constant, it's something else
3081 // and we reject it.
3082 if (isImm()) {
3083 Inst.addOperand(MCOperand::createExpr(getImm()));
3085 return;
3086 }
3087
3088 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3089 // The lower bit is always zero and as such is not encoded.
3090 if (!Memory.OffsetImm)
3092 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3093 int32_t Val = CE->getValue() / 2;
3095 // Special case for #-0
3096 if (Val == std::numeric_limits<int32_t>::min())
3097 Val = 0;
3098 if (Val < 0)
3099 Val = -Val;
3100 Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3102 } else
3103 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3104 }
3105
3106 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3107 assert(N == 2 && "Invalid number of operands!");
3108 // If we have an immediate that's not a constant, treat it as a label
3109 // reference needing a fixup. If it is a constant, it's something else
3110 // and we reject it.
3111 if (isImm()) {
3112 Inst.addOperand(MCOperand::createExpr(getImm()));
3114 return;
3115 }
3116
3117 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3118 addExpr(Inst, Memory.OffsetImm);
3119 }
3120
3121 void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3122 assert(N == 2 && "Invalid number of operands!");
3123 // If we have an immediate that's not a constant, treat it as a label
3124 // reference needing a fixup. If it is a constant, it's something else
3125 // and we reject it.
3126 if (isImm()) {
3127 Inst.addOperand(MCOperand::createExpr(getImm()));
3129 return;
3130 }
3131
3132 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3133 addExpr(Inst, Memory.OffsetImm);
3134 }
3135
3136 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3137 assert(N == 2 && "Invalid number of operands!");
3138 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3139 if (!Memory.OffsetImm)
3141 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3142 // The lower two bits are always zero and as such are not encoded.
3143 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3144 else
3145 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3146 }
3147
3148 void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3149 assert(N == 2 && "Invalid number of operands!");
3150 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3151 addExpr(Inst, Memory.OffsetImm);
3152 }
3153
3154 void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3155 assert(N == 2 && "Invalid number of operands!");
3156 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3157 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3158 }
3159
3160 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3161 assert(N == 2 && "Invalid number of operands!");
3162 // If this is an immediate, it's a label reference.
3163 if (isImm()) {
3164 addExpr(Inst, getImm());
3166 return;
3167 }
3168
3169 // Otherwise, it's a normal memory reg+offset.
3170 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3171 addExpr(Inst, Memory.OffsetImm);
3172 }
3173
3174 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3175 assert(N == 2 && "Invalid number of operands!");
3176 // If this is an immediate, it's a label reference.
3177 if (isImm()) {
3178 addExpr(Inst, getImm());
3180 return;
3181 }
3182
3183 // Otherwise, it's a normal memory reg+offset.
3184 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3185 addExpr(Inst, Memory.OffsetImm);
3186 }
3187
3188 void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3189 assert(N == 1 && "Invalid number of operands!");
3190 // This is container for the immediate that we will create the constant
3191 // pool from
3192 addExpr(Inst, getConstantPoolImm());
3193 }
3194
3195 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3196 assert(N == 2 && "Invalid number of operands!");
3197 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3198 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3199 }
3200
3201 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3202 assert(N == 2 && "Invalid number of operands!");
3203 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3204 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3205 }
3206
3207 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3208 assert(N == 3 && "Invalid number of operands!");
3209 unsigned Val =
3211 Memory.ShiftImm, Memory.ShiftType);
3212 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3213 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3215 }
3216
3217 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3218 assert(N == 3 && "Invalid number of operands!");
3219 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3220 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3221 Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3222 }
3223
3224 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3225 assert(N == 2 && "Invalid number of operands!");
3226 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3227 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3228 }
3229
3230 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3231 assert(N == 2 && "Invalid number of operands!");
3232 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3233 if (!Memory.OffsetImm)
3235 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3236 // The lower two bits are always zero and as such are not encoded.
3237 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3238 else
3239 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3240 }
3241
3242 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3243 assert(N == 2 && "Invalid number of operands!");
3244 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3245 if (!Memory.OffsetImm)
3247 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3248 Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3249 else
3250 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3251 }
3252
3253 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3254 assert(N == 2 && "Invalid number of operands!");
3255 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3256 addExpr(Inst, Memory.OffsetImm);
3257 }
3258
3259 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3260 assert(N == 2 && "Invalid number of operands!");
3261 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3262 if (!Memory.OffsetImm)
3264 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3265 // The lower two bits are always zero and as such are not encoded.
3266 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3267 else
3268 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3269 }
3270
3271 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3272 assert(N == 1 && "Invalid number of operands!");
3273 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3274 assert(CE && "non-constant post-idx-imm8 operand!");
3275 int Imm = CE->getValue();
3276 bool isAdd = Imm >= 0;
3277 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3278 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3280 }
3281
3282 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3283 assert(N == 1 && "Invalid number of operands!");
3284 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3285 assert(CE && "non-constant post-idx-imm8s4 operand!");
3286 int Imm = CE->getValue();
3287 bool isAdd = Imm >= 0;
3288 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3289 // Immediate is scaled by 4.
3290 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3292 }
3293
3294 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3295 assert(N == 2 && "Invalid number of operands!");
3296 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3297 Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3298 }
3299
3300 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3301 assert(N == 2 && "Invalid number of operands!");
3302 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3303 // The sign, shift type, and shift amount are encoded in a single operand
3304 // using the AM2 encoding helpers.
3305 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3306 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3307 PostIdxReg.ShiftTy);
3309 }
3310
3311 void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3312 assert(N == 1 && "Invalid number of operands!");
3313 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3314 Inst.addOperand(MCOperand::createImm(CE->getValue()));
3315 }
3316
3317 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3318 assert(N == 1 && "Invalid number of operands!");
3319 Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3320 }
3321
3322 void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3323 assert(N == 1 && "Invalid number of operands!");
3324 Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3325 }
3326
3327 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3328 assert(N == 1 && "Invalid number of operands!");
3329 Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3330 }
3331
3332 void addVecListOperands(MCInst &Inst, unsigned N) const {
3333 assert(N == 1 && "Invalid number of operands!");
3334 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3335 }
3336
3337 void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3338 assert(N == 1 && "Invalid number of operands!");
3339
3340 // When we come here, the VectorList field will identify a range
3341 // of q-registers by its base register and length, and it will
3342 // have already been error-checked to be the expected length of
3343 // range and contain only q-regs in the range q0-q7. So we can
3344 // count on the base register being in the range q0-q6 (for 2
3345 // regs) or q0-q4 (for 4)
3346 //
3347 // The MVE instructions taking a register range of this kind will
3348 // need an operand in the MQQPR or MQQQQPR class, representing the
3349 // entire range as a unit. So we must translate into that class,
3350 // by finding the index of the base register in the MQPR reg
3351 // class, and returning the super-register at the corresponding
3352 // index in the target class.
3353
3354 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3355 const MCRegisterClass *RC_out =
3356 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3357 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3358
3359 unsigned I, E = RC_out->getNumRegs();
3360 for (I = 0; I < E; I++)
3361 if (RC_in->getRegister(I) == VectorList.RegNum)
3362 break;
3363 assert(I < E && "Invalid vector list start register!");
3364
3366 }
3367
3368 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3369 assert(N == 2 && "Invalid number of operands!");
3370 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3371 Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3372 }
3373
3374 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3375 assert(N == 1 && "Invalid number of operands!");
3376 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3377 }
3378
3379 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3380 assert(N == 1 && "Invalid number of operands!");
3381 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3382 }
3383
3384 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3385 assert(N == 1 && "Invalid number of operands!");
3386 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3387 }
3388
3389 void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3390 assert(N == 1 && "Invalid number of operands!");
3391 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3392 }
3393
3394 void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3395 assert(N == 1 && "Invalid number of operands!");
3396 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3397 }
3398
3399 void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3400 assert(N == 1 && "Invalid number of operands!");
3401 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3402 }
3403
3404 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3405 assert(N == 1 && "Invalid number of operands!");
3406 // The immediate encodes the type of constant as well as the value.
3407 // Mask in that this is an i8 splat.
3408 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3409 Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3410 }
3411
3412 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3413 assert(N == 1 && "Invalid number of operands!");
3414 // The immediate encodes the type of constant as well as the value.
3415 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3416 unsigned Value = CE->getValue();
3419 }
3420
3421 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3422 assert(N == 1 && "Invalid number of operands!");
3423 // The immediate encodes the type of constant as well as the value.
3424 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3425 unsigned Value = CE->getValue();
3428 }
3429
3430 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3431 assert(N == 1 && "Invalid number of operands!");
3432 // The immediate encodes the type of constant as well as the value.
3433 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3434 unsigned Value = CE->getValue();
3437 }
3438
3439 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3440 assert(N == 1 && "Invalid number of operands!");
3441 // The immediate encodes the type of constant as well as the value.
3442 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3443 unsigned Value = CE->getValue();
3446 }
3447
3448 void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3449 // The immediate encodes the type of constant as well as the value.
3450 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3451 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3452 Inst.getOpcode() == ARM::VMOVv16i8) &&
3453 "All instructions that wants to replicate non-zero byte "
3454 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3455 unsigned Value = CE->getValue();
3456 if (Inv)
3457 Value = ~Value;
3458 unsigned B = Value & 0xff;
3459 B |= 0xe00; // cmode = 0b1110
3461 }
3462
3463 void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3464 assert(N == 1 && "Invalid number of operands!");
3465 addNEONi8ReplicateOperands(Inst, true);
3466 }
3467
3468 static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3469 if (Value >= 256 && Value <= 0xffff)
3470 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3471 else if (Value > 0xffff && Value <= 0xffffff)
3472 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3473 else if (Value > 0xffffff)
3474 Value = (Value >> 24) | 0x600;
3475 return Value;
3476 }
3477
3478 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3479 assert(N == 1 && "Invalid number of operands!");
3480 // The immediate encodes the type of constant as well as the value.
3481 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3482 unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3484 }
3485
3486 void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3487 assert(N == 1 && "Invalid number of operands!");
3488 addNEONi8ReplicateOperands(Inst, false);
3489 }
3490
3491 void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3492 assert(N == 1 && "Invalid number of operands!");
3493 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3494 assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3495 Inst.getOpcode() == ARM::VMOVv8i16 ||
3496 Inst.getOpcode() == ARM::VMVNv4i16 ||
3497 Inst.getOpcode() == ARM::VMVNv8i16) &&
3498 "All instructions that want to replicate non-zero half-word "
3499 "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3500 uint64_t Value = CE->getValue();
3501 unsigned Elem = Value & 0xffff;
3502 if (Elem >= 256)
3503 Elem = (Elem >> 8) | 0x200;
3504 Inst.addOperand(MCOperand::createImm(Elem));
3505 }
3506
3507 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3508 assert(N == 1 && "Invalid number of operands!");
3509 // The immediate encodes the type of constant as well as the value.
3510 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3511 unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3513 }
3514
3515 void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3516 assert(N == 1 && "Invalid number of operands!");
3517 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3518 assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3519 Inst.getOpcode() == ARM::VMOVv4i32 ||
3520 Inst.getOpcode() == ARM::VMVNv2i32 ||
3521 Inst.getOpcode() == ARM::VMVNv4i32) &&
3522 "All instructions that want to replicate non-zero word "
3523 "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3524 uint64_t Value = CE->getValue();
3525 unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3526 Inst.addOperand(MCOperand::createImm(Elem));
3527 }
3528
3529 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3530 assert(N == 1 && "Invalid number of operands!");
3531 // The immediate encodes the type of constant as well as the value.
3532 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3533 uint64_t Value = CE->getValue();
3534 unsigned Imm = 0;
3535 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3536 Imm |= (Value & 1) << i;
3537 }
3538 Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3539 }
3540
3541 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3542 assert(N == 1 && "Invalid number of operands!");
3543 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3544 Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3545 }
3546
3547 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3548 assert(N == 1 && "Invalid number of operands!");
3549 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3550 Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3551 }
3552
3553 void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3554 assert(N == 1 && "Invalid number of operands!");
3555 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3556 unsigned Imm = CE->getValue();
3557 assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3558 Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3559 }
3560
3561 void print(raw_ostream &OS) const override;
3562
3563 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3564 auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3565 Op->ITMask.Mask = Mask;
3566 Op->StartLoc = S;
3567 Op->EndLoc = S;
3568 return Op;
3569 }
3570
3571 static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3572 SMLoc S) {
3573 auto Op = std::make_unique<ARMOperand>(k_CondCode);
3574 Op->CC.Val = CC;
3575 Op->StartLoc = S;
3576 Op->EndLoc = S;
3577 return Op;
3578 }
3579
3580 static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3581 SMLoc S) {
3582 auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3583 Op->VCC.Val = CC;
3584 Op->StartLoc = S;
3585 Op->EndLoc = S;
3586 return Op;
3587 }
3588
3589 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3590 auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3591 Op->Cop.Val = CopVal;
3592 Op->StartLoc = S;
3593 Op->EndLoc = S;
3594 return Op;
3595 }
3596
3597 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3598 auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3599 Op->Cop.Val = CopVal;
3600 Op->StartLoc = S;
3601 Op->EndLoc = S;
3602 return Op;
3603 }
3604
3605 static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3606 SMLoc E) {
3607 auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3608 Op->Cop.Val = Val;
3609 Op->StartLoc = S;
3610 Op->EndLoc = E;
3611 return Op;
3612 }
3613
3614 static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3615 auto Op = std::make_unique<ARMOperand>(k_CCOut);
3616 Op->Reg.RegNum = RegNum;
3617 Op->StartLoc = S;
3618 Op->EndLoc = S;
3619 return Op;
3620 }
3621
3622 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3623 auto Op = std::make_unique<ARMOperand>(k_Token);
3624 Op->Tok.Data = Str.data();
3625 Op->Tok.Length = Str.size();
3626 Op->StartLoc = S;
3627 Op->EndLoc = S;
3628 return Op;
3629 }
3630
3631 static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3632 SMLoc E) {
3633 auto Op = std::make_unique<ARMOperand>(k_Register);
3634 Op->Reg.RegNum = RegNum;
3635 Op->StartLoc = S;
3636 Op->EndLoc = E;
3637 return Op;
3638 }
3639
3640 static std::unique_ptr<ARMOperand>
3641 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3642 unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3643 SMLoc E) {
3644 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3645 Op->RegShiftedReg.ShiftTy = ShTy;
3646 Op->RegShiftedReg.SrcReg = SrcReg;
3647 Op->RegShiftedReg.ShiftReg = ShiftReg;
3648 Op->RegShiftedReg.ShiftImm = ShiftImm;
3649 Op->StartLoc = S;
3650 Op->EndLoc = E;
3651 return Op;
3652 }
3653
3654 static std::unique_ptr<ARMOperand>
3655 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3656 unsigned ShiftImm, SMLoc S, SMLoc E) {
3657 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3658 Op->RegShiftedImm.ShiftTy = ShTy;
3659 Op->RegShiftedImm.SrcReg = SrcReg;
3660 Op->RegShiftedImm.ShiftImm = ShiftImm;
3661 Op->StartLoc = S;
3662 Op->EndLoc = E;
3663 return Op;
3664 }
3665
3666 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3667 SMLoc S, SMLoc E) {
3668 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3669 Op->ShifterImm.isASR = isASR;
3670 Op->ShifterImm.Imm = Imm;
3671 Op->StartLoc = S;
3672 Op->EndLoc = E;
3673 return Op;
3674 }
3675
3676 static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3677 SMLoc E) {
3678 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3679 Op->RotImm.Imm = Imm;
3680 Op->StartLoc = S;
3681 Op->EndLoc = E;
3682 return Op;
3683 }
3684
3685 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3686 SMLoc S, SMLoc E) {
3687 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3688 Op->ModImm.Bits = Bits;
3689 Op->ModImm.Rot = Rot;
3690 Op->StartLoc = S;
3691 Op->EndLoc = E;
3692 return Op;
3693 }
3694
3695 static std::unique_ptr<ARMOperand>
3696 CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3697 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3698 Op->Imm.Val = Val;
3699 Op->StartLoc = S;
3700 Op->EndLoc = E;
3701 return Op;
3702 }
3703
3704 static std::unique_ptr<ARMOperand>
3705 CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3706 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3707 Op->Bitfield.LSB = LSB;
3708 Op->Bitfield.Width = Width;
3709 Op->StartLoc = S;
3710 Op->EndLoc = E;
3711 return Op;
3712 }
3713
3714 static std::unique_ptr<ARMOperand>
3715 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3716 SMLoc StartLoc, SMLoc EndLoc) {
3717 assert(Regs.size() > 0 && "RegList contains no registers?");
3718 KindTy Kind = k_RegisterList;
3719
3720 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3721 Regs.front().second)) {
3722 if (Regs.back().second == ARM::VPR)
3723 Kind = k_FPDRegisterListWithVPR;
3724 else
3725 Kind = k_DPRRegisterList;
3726 } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3727 Regs.front().second)) {
3728 if (Regs.back().second == ARM::VPR)
3729 Kind = k_FPSRegisterListWithVPR;
3730 else
3731 Kind = k_SPRRegisterList;
3732 }
3733
3734 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3735 Kind = k_RegisterListWithAPSR;
3736
3737 assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3738
3739 auto Op = std::make_unique<ARMOperand>(Kind);
3740 for (const auto &P : Regs)
3741 Op->Registers.push_back(P.second);
3742
3743 Op->StartLoc = StartLoc;
3744 Op->EndLoc = EndLoc;
3745 return Op;
3746 }
3747
3748 static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3749 unsigned Count,
3750 bool isDoubleSpaced,
3751 SMLoc S, SMLoc E) {
3752 auto Op = std::make_unique<ARMOperand>(k_VectorList);
3753 Op->VectorList.RegNum = RegNum;
3754 Op->VectorList.Count = Count;
3755 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3756 Op->StartLoc = S;
3757 Op->EndLoc = E;
3758 return Op;
3759 }
3760
3761 static std::unique_ptr<ARMOperand>
3762 CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3763 SMLoc S, SMLoc E) {
3764 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3765 Op->VectorList.RegNum = RegNum;
3766 Op->VectorList.Count = Count;
3767 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3768 Op->StartLoc = S;
3769 Op->EndLoc = E;
3770 return Op;
3771 }
3772
3773 static std::unique_ptr<ARMOperand>
3774 CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3775 bool isDoubleSpaced, SMLoc S, SMLoc E) {
3776 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3777 Op->VectorList.RegNum = RegNum;
3778 Op->VectorList.Count = Count;
3779 Op->VectorList.LaneIndex = Index;
3780 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3781 Op->StartLoc = S;
3782 Op->EndLoc = E;
3783 return Op;
3784 }
3785
3786 static std::unique_ptr<ARMOperand>
3787 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3788 auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3789 Op->VectorIndex.Val = Idx;
3790 Op->StartLoc = S;
3791 Op->EndLoc = E;
3792 return Op;
3793 }
3794
3795 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3796 SMLoc E) {
3797 auto Op = std::make_unique<ARMOperand>(k_Immediate);
3798 Op->Imm.Val = Val;
3799 Op->StartLoc = S;
3800 Op->EndLoc = E;
3801 return Op;
3802 }
3803
3804 static std::unique_ptr<ARMOperand>
3805 CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
3806 ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3807 bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3808 auto Op = std::make_unique<ARMOperand>(k_Memory);
3809 Op->Memory.BaseRegNum = BaseRegNum;
3810 Op->Memory.OffsetImm = OffsetImm;
3811 Op->Memory.OffsetRegNum = OffsetRegNum;
3812 Op->Memory.ShiftType = ShiftType;
3813 Op->Memory.ShiftImm = ShiftImm;
3814 Op->Memory.Alignment = Alignment;
3815 Op->Memory.isNegative = isNegative;
3816 Op->StartLoc = S;
3817 Op->EndLoc = E;
3818 Op->AlignmentLoc = AlignmentLoc;
3819 return Op;
3820 }
3821
3822 static std::unique_ptr<ARMOperand>
3823 CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3824 unsigned ShiftImm, SMLoc S, SMLoc E) {
3825 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3826 Op->PostIdxReg.RegNum = RegNum;
3827 Op->PostIdxReg.isAdd = isAdd;
3828 Op->PostIdxReg.ShiftTy = ShiftTy;
3829 Op->PostIdxReg.ShiftImm = ShiftImm;
3830 Op->StartLoc = S;
3831 Op->EndLoc = E;
3832 return Op;
3833 }
3834
3835 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3836 SMLoc S) {
3837 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3838 Op->MBOpt.Val = Opt;
3839 Op->StartLoc = S;
3840 Op->EndLoc = S;
3841 return Op;
3842 }
3843
3844 static std::unique_ptr<ARMOperand>
3845 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3846 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3847 Op->ISBOpt.Val = Opt;
3848 Op->StartLoc = S;
3849 Op->EndLoc = S;
3850 return Op;
3851 }
3852
3853 static std::unique_ptr<ARMOperand>
3854 CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3855 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3856 Op->TSBOpt.Val = Opt;
3857 Op->StartLoc = S;
3858 Op->EndLoc = S;
3859 return Op;
3860 }
3861
3862 static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3863 SMLoc S) {
3864 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3865 Op->IFlags.Val = IFlags;
3866 Op->StartLoc = S;
3867 Op->EndLoc = S;
3868 return Op;
3869 }
3870
3871 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3872 auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3873 Op->MMask.Val = MMask;
3874 Op->StartLoc = S;
3875 Op->EndLoc = S;
3876 return Op;
3877 }
3878
3879 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3880 auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3881 Op->BankedReg.Val = Reg;
3882 Op->StartLoc = S;
3883 Op->EndLoc = S;
3884 return Op;
3885 }
3886};
3887
3888} // end anonymous namespace.
3889
3890void ARMOperand::print(raw_ostream &OS) const {
3891 auto RegName = [](MCRegister Reg) {
3892 if (Reg)
3894 else
3895 return "noreg";
3896 };
3897
3898 switch (Kind) {
3899 case k_CondCode:
3900 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3901 break;
3902 case k_VPTPred:
3903 OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3904 break;
3905 case k_CCOut:
3906 OS << "<ccout " << RegName(getReg()) << ">";
3907 break;
3908 case k_ITCondMask: {
3909 static const char *const MaskStr[] = {
3910 "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3911 "(tt)", "(ttet)", "(tte)", "(ttee)",
3912 "(t)", "(tett)", "(tet)", "(tete)",
3913 "(te)", "(teet)", "(tee)", "(teee)",
3914 };
3915 assert((ITMask.Mask & 0xf) == ITMask.Mask);
3916 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3917 break;
3918 }
3919 case k_CoprocNum:
3920 OS << "<coprocessor number: " << getCoproc() << ">";
3921 break;
3922 case k_CoprocReg:
3923 OS << "<coprocessor register: " << getCoproc() << ">";
3924 break;
3925 case k_CoprocOption:
3926 OS << "<coprocessor option: " << CoprocOption.Val << ">";
3927 break;
3928 case k_MSRMask:
3929 OS << "<mask: " << getMSRMask() << ">";
3930 break;
3931 case k_BankedReg:
3932 OS << "<banked reg: " << getBankedReg() << ">";
3933 break;
3934 case k_Immediate:
3935 OS << *getImm();
3936 break;
3937 case k_MemBarrierOpt:
3938 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3939 break;
3940 case k_InstSyncBarrierOpt:
3941 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3942 break;
3943 case k_TraceSyncBarrierOpt:
3944 OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3945 break;
3946 case k_Memory:
3947 OS << "<memory";
3948 if (Memory.BaseRegNum)
3949 OS << " base:" << RegName(Memory.BaseRegNum);
3950 if (Memory.OffsetImm)
3951 OS << " offset-imm:" << *Memory.OffsetImm;
3952 if (Memory.OffsetRegNum)
3953 OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3954 << RegName(Memory.OffsetRegNum);
3955 if (Memory.ShiftType != ARM_AM::no_shift) {
3956 OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3957 OS << " shift-imm:" << Memory.ShiftImm;
3958 }
3959 if (Memory.Alignment)
3960 OS << " alignment:" << Memory.Alignment;
3961 OS << ">";
3962 break;
3963 case k_PostIndexRegister:
3964 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3965 << RegName(PostIdxReg.RegNum);
3966 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3967 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3968 << PostIdxReg.ShiftImm;
3969 OS << ">";
3970 break;
3971 case k_ProcIFlags: {
3972 OS << "<ARM_PROC::";
3973 unsigned IFlags = getProcIFlags();
3974 for (int i=2; i >= 0; --i)
3975 if (IFlags & (1 << i))
3976 OS << ARM_PROC::IFlagsToString(1 << i);
3977 OS << ">";
3978 break;
3979 }
3980 case k_Register:
3981 OS << "<register " << RegName(getReg()) << ">";
3982 break;
3983 case k_ShifterImmediate:
3984 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3985 << " #" << ShifterImm.Imm << ">";
3986 break;
3987 case k_ShiftedRegister:
3988 OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3989 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3990 << RegName(RegShiftedReg.ShiftReg) << ">";
3991 break;
3992 case k_ShiftedImmediate:
3993 OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3994 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3995 << RegShiftedImm.ShiftImm << ">";
3996 break;
3997 case k_RotateImmediate:
3998 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3999 break;
4000 case k_ModifiedImmediate:
4001 OS << "<mod_imm #" << ModImm.Bits << ", #"
4002 << ModImm.Rot << ")>";
4003 break;
4004 case k_ConstantPoolImmediate:
4005 OS << "<constant_pool_imm #" << *getConstantPoolImm();
4006 break;
4007 case k_BitfieldDescriptor:
4008 OS << "<bitfield " << "lsb: " << Bitfield.LSB
4009 << ", width: " << Bitfield.Width << ">";
4010 break;
4011 case k_RegisterList:
4012 case k_RegisterListWithAPSR:
4013 case k_DPRRegisterList:
4014 case k_SPRRegisterList:
4015 case k_FPSRegisterListWithVPR:
4016 case k_FPDRegisterListWithVPR: {
4017 OS << "<register_list ";
4018
4019 const SmallVectorImpl<unsigned> &RegList = getRegList();
4021 I = RegList.begin(), E = RegList.end(); I != E; ) {
4022 OS << RegName(*I);
4023 if (++I < E) OS << ", ";
4024 }
4025
4026 OS << ">";
4027 break;
4028 }
4029 case k_VectorList:
4030 OS << "<vector_list " << VectorList.Count << " * "
4031 << RegName(VectorList.RegNum) << ">";
4032 break;
4033 case k_VectorListAllLanes:
4034 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4035 << RegName(VectorList.RegNum) << ">";
4036 break;
4037 case k_VectorListIndexed:
4038 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4039 << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4040 break;
4041 case k_Token:
4042 OS << "'" << getToken() << "'";
4043 break;
4044 case k_VectorIndex:
4045 OS << "<vectorindex " << getVectorIndex() << ">";
4046 break;
4047 }
4048}
4049
4050/// @name Auto-generated Match Functions
4051/// {
4052
4054
4055/// }
4056
4057bool ARMAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
4058 SMLoc &EndLoc) {
4059 const AsmToken &Tok = getParser().getTok();
4060 StartLoc = Tok.getLoc();
4061 EndLoc = Tok.getEndLoc();
4062 Reg = tryParseRegister();
4063
4064 return Reg == (unsigned)-1;
4065}
4066
4067ParseStatus ARMAsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
4068 SMLoc &EndLoc) {
4069 if (parseRegister(Reg, StartLoc, EndLoc))
4070 return ParseStatus::NoMatch;
4071 return ParseStatus::Success;
4072}
4073
4074/// Try to parse a register name. The token must be an Identifier when called,
4075/// and if it is a register name the token is eaten and the register number is
4076/// returned. Otherwise return -1.
4077int ARMAsmParser::tryParseRegister(bool AllowOutOfBoundReg) {
4078 MCAsmParser &Parser = getParser();
4079 const AsmToken &Tok = Parser.getTok();
4080 if (Tok.isNot(AsmToken::Identifier)) return -1;
4081
4082 std::string lowerCase = Tok.getString().lower();
4083 unsigned RegNum = MatchRegisterName(lowerCase);
4084 if (!RegNum) {
4085 RegNum = StringSwitch<unsigned>(lowerCase)
4086 .Case("r13", ARM::SP)
4087 .Case("r14", ARM::LR)
4088 .Case("r15", ARM::PC)
4089 .Case("ip", ARM::R12)
4090 // Additional register name aliases for 'gas' compatibility.
4091 .Case("a1", ARM::R0)
4092 .Case("a2", ARM::R1)
4093 .Case("a3", ARM::R2)
4094 .Case("a4", ARM::R3)
4095 .Case("v1", ARM::R4)
4096 .Case("v2", ARM::R5)
4097 .Case("v3", ARM::R6)
4098 .Case("v4", ARM::R7)
4099 .Case("v5", ARM::R8)
4100 .Case("v6", ARM::R9)
4101 .Case("v7", ARM::R10)
4102 .Case("v8", ARM::R11)
4103 .Case("sb", ARM::R9)
4104 .Case("sl", ARM::R10)
4105 .Case("fp", ARM::R11)
4106 .Default(0);
4107 }
4108 if (!RegNum) {
4109 // Check for aliases registered via .req. Canonicalize to lower case.
4110 // That's more consistent since register names are case insensitive, and
4111 // it's how the original entry was passed in from MC/MCParser/AsmParser.
4112 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4113 // If no match, return failure.
4114 if (Entry == RegisterReqs.end())
4115 return -1;
4116 Parser.Lex(); // Eat identifier token.
4117 return Entry->getValue();
4118 }
4119
4120 // Some FPUs only have 16 D registers, so D16-D31 are invalid
4121 if (!AllowOutOfBoundReg && !hasD32() && RegNum >= ARM::D16 &&
4122 RegNum <= ARM::D31)
4123 return -1;
4124
4125 Parser.Lex(); // Eat identifier token.
4126
4127 return RegNum;
4128}
4129
4130// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
4131// If a recoverable error occurs, return 1. If an irrecoverable error
4132// occurs, return -1. An irrecoverable error is one where tokens have been
4133// consumed in the process of trying to parse the shifter (i.e., when it is
4134// indeed a shifter operand, but malformed).
4135int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4136 MCAsmParser &Parser = getParser();
4137 SMLoc S = Parser.getTok().getLoc();
4138 const AsmToken &Tok = Parser.getTok();
4139 if (Tok.isNot(AsmToken::Identifier))
4140 return -1;
4141
4142 std::string lowerCase = Tok.getString().lower();
4144 .Case("asl", ARM_AM::lsl)
4145 .Case("lsl", ARM_AM::lsl)
4146 .Case("lsr", ARM_AM::lsr)
4147 .Case("asr", ARM_AM::asr)
4148 .Case("ror", ARM_AM::ror)
4149 .Case("rrx", ARM_AM::rrx)
4151
4152 if (ShiftTy == ARM_AM::no_shift)
4153 return 1;
4154
4155 Parser.Lex(); // Eat the operator.
4156
4157 // The source register for the shift has already been added to the
4158 // operand list, so we need to pop it off and combine it into the shifted
4159 // register operand instead.
4160 std::unique_ptr<ARMOperand> PrevOp(
4161 (ARMOperand *)Operands.pop_back_val().release());
4162 if (!PrevOp->isReg())
4163 return Error(PrevOp->getStartLoc(), "shift must be of a register");
4164 int SrcReg = PrevOp->getReg();
4165
4166 SMLoc EndLoc;
4167 int64_t Imm = 0;
4168 int ShiftReg = 0;
4169 if (ShiftTy == ARM_AM::rrx) {
4170 // RRX Doesn't have an explicit shift amount. The encoder expects
4171 // the shift register to be the same as the source register. Seems odd,
4172 // but OK.
4173 ShiftReg = SrcReg;
4174 } else {
4175 // Figure out if this is shifted by a constant or a register (for non-RRX).
4176 if (Parser.getTok().is(AsmToken::Hash) ||
4177 Parser.getTok().is(AsmToken::Dollar)) {
4178 Parser.Lex(); // Eat hash.
4179 SMLoc ImmLoc = Parser.getTok().getLoc();
4180 const MCExpr *ShiftExpr = nullptr;
4181 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4182 Error(ImmLoc, "invalid immediate shift value");
4183 return -1;
4184 }
4185 // The expression must be evaluatable as an immediate.
4186 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4187 if (!CE) {
4188 Error(ImmLoc, "invalid immediate shift value");
4189 return -1;
4190 }
4191 // Range check the immediate.
4192 // lsl, ror: 0 <= imm <= 31
4193 // lsr, asr: 0 <= imm <= 32
4194 Imm = CE->getValue();
4195 if (Imm < 0 ||
4196 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4197 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4198 Error(ImmLoc, "immediate shift value out of range");
4199 return -1;
4200 }
4201 // shift by zero is a nop. Always send it through as lsl.
4202 // ('as' compatibility)
4203 if (Imm == 0)
4204 ShiftTy = ARM_AM::lsl;
4205 } else if (Parser.getTok().is(AsmToken::Identifier)) {
4206 SMLoc L = Parser.getTok().getLoc();
4207 EndLoc = Parser.getTok().getEndLoc();
4208 ShiftReg = tryParseRegister();
4209 if (ShiftReg == -1) {
4210 Error(L, "expected immediate or register in shift operand");
4211 return -1;
4212 }
4213 } else {
4214 Error(Parser.getTok().getLoc(),
4215 "expected immediate or register in shift operand");
4216 return -1;
4217 }
4218 }
4219
4220 if (ShiftReg && ShiftTy != ARM_AM::rrx)
4221 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4222 ShiftReg, Imm,
4223 S, EndLoc));
4224 else
4225 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4226 S, EndLoc));
4227
4228 return 0;
4229}
4230
4231/// Try to parse a register name. The token must be an Identifier when called.
4232/// If it's a register, an AsmOperand is created. Another AsmOperand is created
4233/// if there is a "writeback". 'true' if it's not a register.
4234///
4235/// TODO this is likely to change to allow different register types and or to
4236/// parse for a specific register type.
4237bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4238 MCAsmParser &Parser = getParser();
4239 SMLoc RegStartLoc = Parser.getTok().getLoc();
4240 SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4241 int RegNo = tryParseRegister();
4242 if (RegNo == -1)
4243 return true;
4244
4245 Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4246
4247 const AsmToken &ExclaimTok = Parser.getTok();
4248 if (ExclaimTok.is(AsmToken::Exclaim)) {
4249 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4250 ExclaimTok.getLoc()));
4251 Parser.Lex(); // Eat exclaim token
4252 return false;
4253 }
4254
4255 // Also check for an index operand. This is only legal for vector registers,
4256 // but that'll get caught OK in operand matching, so we don't need to
4257 // explicitly filter everything else out here.
4258 if (Parser.getTok().is(AsmToken::LBrac)) {
4259 SMLoc SIdx = Parser.getTok().getLoc();
4260 Parser.Lex(); // Eat left bracket token.
4261
4262 const MCExpr *ImmVal;
4263 if (getParser().parseExpression(ImmVal))
4264 return true;
4265 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4266 if (!MCE)
4267 return TokError("immediate value expected for vector index");
4268
4269 if (Parser.getTok().isNot(AsmToken::RBrac))
4270 return Error(Parser.getTok().getLoc(), "']' expected");
4271
4272 SMLoc E = Parser.getTok().getEndLoc();
4273 Parser.Lex(); // Eat right bracket token.
4274
4275 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
4276 SIdx, E,
4277 getContext()));
4278 }
4279
4280 return false;
4281}
4282
4283/// MatchCoprocessorOperandName - Try to parse an coprocessor related
4284/// instruction with a symbolic operand name.
4285/// We accept "crN" syntax for GAS compatibility.
4286/// <operand-name> ::= <prefix><number>
4287/// If CoprocOp is 'c', then:
4288/// <prefix> ::= c | cr
4289/// If CoprocOp is 'p', then :
4290/// <prefix> ::= p
4291/// <number> ::= integer in range [0, 15]
4292static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4293 // Use the same layout as the tablegen'erated register name matcher. Ugly,
4294 // but efficient.
4295 if (Name.size() < 2 || Name[0] != CoprocOp)
4296 return -1;
4297 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4298
4299 switch (Name.size()) {
4300 default: return -1;
4301 case 1:
4302 switch (Name[0]) {
4303 default: return -1;
4304 case '0': return 0;
4305 case '1': return 1;
4306 case '2': return 2;
4307 case '3': return 3;
4308 case '4': return 4;
4309 case '5': return 5;
4310 case '6': return 6;
4311 case '7': return 7;
4312 case '8': return 8;
4313 case '9': return 9;
4314 }
4315 case 2:
4316 if (Name[0] != '1')
4317 return -1;
4318 switch (Name[1]) {
4319 default: return -1;
4320 // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4321 // However, old cores (v5/v6) did use them in that way.
4322 case '0': return 10;
4323 case '1': return 11;
4324 case '2': return 12;
4325 case '3': return 13;
4326 case '4': return 14;
4327 case '5': return 15;
4328 }
4329 }
4330}
4331
4332/// parseITCondCode - Try to parse a condition code for an IT instruction.
4333ParseStatus ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4334 MCAsmParser &Parser = getParser();
4335 SMLoc S = Parser.getTok().getLoc();
4336 const AsmToken &Tok = Parser.getTok();
4337 if (!Tok.is(AsmToken::Identifier))
4338 return ParseStatus::NoMatch;
4339 unsigned CC = ARMCondCodeFromString(Tok.getString());
4340 if (CC == ~0U)
4341 return ParseStatus::NoMatch;
4342 Parser.Lex(); // Eat the token.
4343
4344 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
4345
4346 return ParseStatus::Success;
4347}
4348
4349/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4350/// token must be an Identifier when called, and if it is a coprocessor
4351/// number, the token is eaten and the operand is added to the operand list.
4352ParseStatus ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4353 MCAsmParser &Parser = getParser();
4354 SMLoc S = Parser.getTok().getLoc();
4355 const AsmToken &Tok = Parser.getTok();
4356 if (Tok.isNot(AsmToken::Identifier))
4357 return ParseStatus::NoMatch;
4358
4359 int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4360 if (Num == -1)
4361 return ParseStatus::NoMatch;
4362 if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4363 return ParseStatus::NoMatch;
4364
4365 Parser.Lex(); // Eat identifier token.
4366 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4367 return ParseStatus::Success;
4368}
4369
4370/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4371/// token must be an Identifier when called, and if it is a coprocessor
4372/// number, the token is eaten and the operand is added to the operand list.
4373ParseStatus ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4374 MCAsmParser &Parser = getParser();
4375 SMLoc S = Parser.getTok().getLoc();
4376 const AsmToken &Tok = Parser.getTok();
4377 if (Tok.isNot(AsmToken::Identifier))
4378 return ParseStatus::NoMatch;
4379
4380 int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4381 if (Reg == -1)
4382 return ParseStatus::NoMatch;
4383
4384 Parser.Lex(); // Eat identifier token.
4385 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
4386 return ParseStatus::Success;
4387}
4388
4389/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4390/// coproc_option : '{' imm0_255 '}'
4391ParseStatus ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4392 MCAsmParser &Parser = getParser();
4393 SMLoc S = Parser.getTok().getLoc();
4394
4395 // If this isn't a '{', this isn't a coprocessor immediate operand.
4396 if (Parser.getTok().isNot(AsmToken::LCurly))
4397 return ParseStatus::NoMatch;
4398 Parser.Lex(); // Eat the '{'
4399
4400 const MCExpr *Expr;
4401 SMLoc Loc = Parser.getTok().getLoc();
4402 if (getParser().parseExpression(Expr))
4403 return Error(Loc, "illegal expression");
4404 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4405 if (!CE || CE->getValue() < 0 || CE->getValue() > 255)
4406 return Error(Loc,
4407 "coprocessor option must be an immediate in range [0, 255]");
4408 int Val = CE->getValue();
4409
4410 // Check for and consume the closing '}'
4411 if (Parser.getTok().isNot(AsmToken::RCurly))
4412 return ParseStatus::Failure;
4413 SMLoc E = Parser.getTok().getEndLoc();
4414 Parser.Lex(); // Eat the '}'
4415
4416 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
4417 return ParseStatus::Success;
4418}
4419
4420// For register list parsing, we need to map from raw GPR register numbering
4421// to the enumeration values. The enumeration values aren't sorted by
4422// register number due to our using "sp", "lr" and "pc" as canonical names.
4423static unsigned getNextRegister(unsigned Reg) {
4424 // If this is a GPR, we need to do it manually, otherwise we can rely
4425 // on the sort ordering of the enumeration since the other reg-classes
4426 // are sane.
4427 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4428 return Reg + 1;
4429 switch(Reg) {
4430 default: llvm_unreachable("Invalid GPR number!");
4431 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
4432 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
4433 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
4434 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
4435 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
4436 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4437 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
4438 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
4439 }
4440}
4441
4442// Insert an <Encoding, Register> pair in an ordered vector. Return true on
4443// success, or false, if duplicate encoding found.
4444static bool
4445insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4446 unsigned Enc, unsigned Reg) {
4447 Regs.emplace_back(Enc, Reg);
4448 for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4449 if (J->first == Enc) {
4450 Regs.erase(J.base());
4451 return false;
4452 }
4453 if (J->first < Enc)
4454 break;
4455 std::swap(*I, *J);
4456 }
4457 return true;
4458}
4459
4460/// Parse a register list.
4461bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4462 bool AllowRAAC, bool AllowOutOfBoundReg) {
4463 MCAsmParser &Parser = getParser();
4464 if (Parser.getTok().isNot(AsmToken::LCurly))
4465 return TokError("Token is not a Left Curly Brace");
4466 SMLoc S = Parser.getTok().getLoc();
4467 Parser.Lex(); // Eat '{' token.
4468 SMLoc RegLoc = Parser.getTok().getLoc();
4469
4470 // Check the first register in the list to see what register class
4471 // this is a list of.
4472 int Reg = tryParseRegister();
4473 if (Reg == -1)
4474 return Error(RegLoc, "register expected");
4475 if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4476 return Error(RegLoc, "pseudo-register not allowed");
4477 // The reglist instructions have at most 16 registers, so reserve
4478 // space for that many.
4479 int EReg = 0;
4481
4482 // Allow Q regs and just interpret them as the two D sub-registers.
4483 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4484 Reg = getDRegFromQReg(Reg);
4485 EReg = MRI->getEncodingValue(Reg);
4486 Registers.emplace_back(EReg, Reg);
4487 ++Reg;
4488 }
4489 const MCRegisterClass *RC;
4490 if (Reg == ARM::RA_AUTH_CODE ||
4491 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4492 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4493 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4494 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4495 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4496 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4497 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4498 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4499 else
4500 return Error(RegLoc, "invalid register in register list");
4501
4502 // Store the register.
4503 EReg = MRI->getEncodingValue(Reg);
4504 Registers.emplace_back(EReg, Reg);
4505
4506 // This starts immediately after the first register token in the list,
4507 // so we can see either a comma or a minus (range separator) as a legal
4508 // next token.
4509 while (Parser.getTok().is(AsmToken::Comma) ||
4510 Parser.getTok().is(AsmToken::Minus)) {
4511 if (Parser.getTok().is(AsmToken::Minus)) {
4512 if (Reg == ARM::RA_AUTH_CODE)
4513 return Error(RegLoc, "pseudo-register not allowed");
4514 Parser.Lex(); // Eat the minus.
4515 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4516 int EndReg = tryParseRegister(AllowOutOfBoundReg);
4517 if (EndReg == -1)
4518 return Error(AfterMinusLoc, "register expected");
4519 if (EndReg == ARM::RA_AUTH_CODE)
4520 return Error(AfterMinusLoc, "pseudo-register not allowed");
4521 // Allow Q regs and just interpret them as the two D sub-registers.
4522 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4523 EndReg = getDRegFromQReg(EndReg) + 1;
4524 // If the register is the same as the start reg, there's nothing
4525 // more to do.
4526 if (Reg == EndReg)
4527 continue;
4528 // The register must be in the same register class as the first.
4529 if (!RC->contains(Reg))
4530 return Error(AfterMinusLoc, "invalid register in register list");
4531 // Ranges must go from low to high.
4532 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4533 return Error(AfterMinusLoc, "bad range in register list");
4534
4535 // Add all the registers in the range to the register list.
4536 while (Reg != EndReg) {
4538 EReg = MRI->getEncodingValue(Reg);
4539 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4540 Warning(AfterMinusLoc, StringRef("duplicated register (") +
4542 ") in register list");
4543 }
4544 }
4545 continue;
4546 }
4547 Parser.Lex(); // Eat the comma.
4548 RegLoc = Parser.getTok().getLoc();
4549 int OldReg = Reg;
4550 const AsmToken RegTok = Parser.getTok();
4551 Reg = tryParseRegister(AllowOutOfBoundReg);
4552 if (Reg == -1)
4553 return Error(RegLoc, "register expected");
4554 if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4555 return Error(RegLoc, "pseudo-register not allowed");
4556 // Allow Q regs and just interpret them as the two D sub-registers.
4557 bool isQReg = false;
4558 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4559 Reg = getDRegFromQReg(Reg);
4560 isQReg = true;
4561 }
4562 if (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg) &&
4563 RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4564 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4565 // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4566 // subset of GPRRegClassId except it contains APSR as well.
4567 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4568 }
4569 if (Reg == ARM::VPR &&
4570 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4571 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4572 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4573 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4574 EReg = MRI->getEncodingValue(Reg);
4575 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4576 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4577 ") in register list");
4578 }
4579 continue;
4580 }
4581 // The register must be in the same register class as the first.
4582 if ((Reg == ARM::RA_AUTH_CODE &&
4583 RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4584 (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg)))
4585 return Error(RegLoc, "invalid register in register list");
4586 // In most cases, the list must be monotonically increasing. An
4587 // exception is CLRM, which is order-independent anyway, so
4588 // there's no potential for confusion if you write clrm {r2,r1}
4589 // instead of clrm {r1,r2}.
4590 if (EnforceOrder &&
4591 MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4592 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4593 Warning(RegLoc, "register list not in ascending order");
4594 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4595 return Error(RegLoc, "register list not in ascending order");
4596 }
4597 // VFP register lists must also be contiguous.
4598 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4599 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4600 Reg != OldReg + 1)
4601 return Error(RegLoc, "non-contiguous register range");
4602 EReg = MRI->getEncodingValue(Reg);
4603 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4604 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4605 ") in register list");
4606 }
4607 if (isQReg) {
4608 EReg = MRI->getEncodingValue(++Reg);
4609 Registers.emplace_back(EReg, Reg);
4610 }
4611 }
4612
4613 if (Parser.getTok().isNot(AsmToken::RCurly))
4614 return Error(Parser.getTok().getLoc(), "'}' expected");
4615 SMLoc E = Parser.getTok().getEndLoc();
4616 Parser.Lex(); // Eat '}' token.
4617
4618 // Push the register list operand.
4619 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4620
4621 // The ARM system instruction variants for LDM/STM have a '^' token here.
4622 if (Parser.getTok().is(AsmToken::Caret)) {
4623 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4624 Parser.Lex(); // Eat '^' token.
4625 }
4626
4627 return false;
4628}
4629
4630// Helper function to parse the lane index for vector lists.
4631ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4632 unsigned &Index, SMLoc &EndLoc) {
4633 MCAsmParser &Parser = getParser();
4634 Index = 0; // Always return a defined index value.
4635 if (Parser.getTok().is(AsmToken::LBrac)) {
4636 Parser.Lex(); // Eat the '['.
4637 if (Parser.getTok().is(AsmToken::RBrac)) {
4638 // "Dn[]" is the 'all lanes' syntax.
4639 LaneKind = AllLanes;
4640 EndLoc = Parser.getTok().getEndLoc();
4641 Parser.Lex(); // Eat the ']'.
4642 return ParseStatus::Success;
4643 }
4644
4645 // There's an optional '#' token here. Normally there wouldn't be, but
4646 // inline assemble puts one in, and it's friendly to accept that.
4647 if (Parser.getTok().is(AsmToken::Hash))
4648 Parser.Lex(); // Eat '#' or '$'.
4649
4650 const MCExpr *LaneIndex;
4651 SMLoc Loc = Parser.getTok().getLoc();
4652 if (getParser().parseExpression(LaneIndex))
4653 return Error(Loc, "illegal expression");
4654 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4655 if (!CE)
4656 return Error(Loc, "lane index must be empty or an integer");
4657 if (Parser.getTok().isNot(AsmToken::RBrac))
4658 return Error(Parser.getTok().getLoc(), "']' expected");
4659 EndLoc = Parser.getTok().getEndLoc();
4660 Parser.Lex(); // Eat the ']'.
4661 int64_t Val = CE->getValue();
4662
4663 // FIXME: Make this range check context sensitive for .8, .16, .32.
4664 if (Val < 0 || Val > 7)
4665 return Error(Parser.getTok().getLoc(), "lane index out of range");
4666 Index = Val;
4667 LaneKind = IndexedLane;
4668 return ParseStatus::Success;
4669 }
4670 LaneKind = NoLanes;
4671 return ParseStatus::Success;
4672}
4673
4674// parse a vector register list
4675ParseStatus ARMAsmParser::parseVectorList(OperandVector &Operands) {
4676 MCAsmParser &Parser = getParser();
4677 VectorLaneTy LaneKind;
4678 unsigned LaneIndex;
4679 SMLoc S = Parser.getTok().getLoc();
4680 // As an extension (to match gas), support a plain D register or Q register
4681 // (without encosing curly braces) as a single or double entry list,
4682 // respectively.
4683 if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4684 SMLoc E = Parser.getTok().getEndLoc();
4685 int Reg = tryParseRegister();
4686 if (Reg == -1)
4687 return ParseStatus::NoMatch;
4688 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4689 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4690 if (!Res.isSuccess())
4691 return Res;
4692 switch (LaneKind) {
4693 case NoLanes:
4694 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4695 break;
4696 case AllLanes:
4697 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4698 S, E));
4699 break;
4700 case IndexedLane:
4701 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4702 LaneIndex,
4703 false, S, E));
4704 break;
4705 }
4706 return ParseStatus::Success;
4707 }
4708 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4709 Reg = getDRegFromQReg(Reg);
4710 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4711 if (!Res.isSuccess())
4712 return Res;
4713 switch (LaneKind) {
4714 case NoLanes:
4715 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4716 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4717 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4718 break;
4719 case AllLanes:
4720 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4721 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4722 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4723 S, E));
4724 break;
4725 case IndexedLane:
4726 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4727 LaneIndex,
4728 false, S, E));
4729 break;
4730 }
4731 return ParseStatus::Success;
4732 }
4733 return Error(S, "vector register expected");
4734 }
4735
4736 if (Parser.getTok().isNot(AsmToken::LCurly))
4737 return ParseStatus::NoMatch;
4738
4739 Parser.Lex(); // Eat '{' token.
4740 SMLoc RegLoc = Parser.getTok().getLoc();
4741
4742 int Reg = tryParseRegister();
4743 if (Reg == -1)
4744 return Error(RegLoc, "register expected");
4745 unsigned Count = 1;
4746 int Spacing = 0;
4747 unsigned FirstReg = Reg;
4748
4749 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4750 return Error(Parser.getTok().getLoc(),
4751 "vector register in range Q0-Q7 expected");
4752 // The list is of D registers, but we also allow Q regs and just interpret
4753 // them as the two D sub-registers.
4754 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4755 FirstReg = Reg = getDRegFromQReg(Reg);
4756 Spacing = 1; // double-spacing requires explicit D registers, otherwise
4757 // it's ambiguous with four-register single spaced.
4758 ++Reg;
4759 ++Count;
4760 }
4761
4762 SMLoc E;
4763 if (!parseVectorLane(LaneKind, LaneIndex, E).isSuccess())
4764 return ParseStatus::Failure;
4765
4766 while (Parser.getTok().is(AsmToken::Comma) ||
4767 Parser.getTok().is(AsmToken::Minus)) {
4768 if (Parser.getTok().is(AsmToken::Minus)) {
4769 if (!Spacing)
4770 Spacing = 1; // Register range implies a single spaced list.
4771 else if (Spacing == 2)
4772 return Error(Parser.getTok().getLoc(),
4773 "sequential registers in double spaced list");
4774 Parser.Lex(); // Eat the minus.
4775 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4776 int EndReg = tryParseRegister();
4777 if (EndReg == -1)
4778 return Error(AfterMinusLoc, "register expected");
4779 // Allow Q regs and just interpret them as the two D sub-registers.
4780 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4781 EndReg = getDRegFromQReg(EndReg) + 1;
4782 // If the register is the same as the start reg, there's nothing
4783 // more to do.
4784 if (Reg == EndReg)
4785 continue;
4786 // The register must be in the same register class as the first.
4787 if ((hasMVE() &&
4788 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4789 (!hasMVE() &&
4790 !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)))
4791 return Error(AfterMinusLoc, "invalid register in register list");
4792 // Ranges must go from low to high.
4793 if (Reg > EndReg)
4794 return Error(AfterMinusLoc, "bad range in register list");
4795 // Parse the lane specifier if present.
4796 VectorLaneTy NextLaneKind;
4797 unsigned NextLaneIndex;
4798 if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4799 return ParseStatus::Failure;
4800 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4801 return Error(AfterMinusLoc, "mismatched lane index in register list");
4802
4803 // Add all the registers in the range to the register list.
4804 Count += EndReg - Reg;
4805 Reg = EndReg;
4806 continue;
4807 }
4808 Parser.Lex(); // Eat the comma.
4809 RegLoc = Parser.getTok().getLoc();
4810 int OldReg = Reg;
4811 Reg = tryParseRegister();
4812 if (Reg == -1)
4813 return Error(RegLoc, "register expected");
4814
4815 if (hasMVE()) {
4816 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4817 return Error(RegLoc, "vector register in range Q0-Q7 expected");
4818 Spacing = 1;
4819 }
4820 // vector register lists must be contiguous.
4821 // It's OK to use the enumeration values directly here rather, as the
4822 // VFP register classes have the enum sorted properly.
4823 //
4824 // The list is of D registers, but we also allow Q regs and just interpret
4825 // them as the two D sub-registers.
4826 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4827 if (!Spacing)
4828 Spacing = 1; // Register range implies a single spaced list.
4829 else if (Spacing == 2)
4830 return Error(
4831 RegLoc,
4832 "invalid register in double-spaced list (must be 'D' register')");
4833 Reg = getDRegFromQReg(Reg);
4834 if (Reg != OldReg + 1)
4835 return Error(RegLoc, "non-contiguous register range");
4836 ++Reg;
4837 Count += 2;
4838 // Parse the lane specifier if present.
4839 VectorLaneTy NextLaneKind;
4840 unsigned NextLaneIndex;
4841 SMLoc LaneLoc = Parser.getTok().getLoc();
4842 if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4843 return ParseStatus::Failure;
4844 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4845 return Error(LaneLoc, "mismatched lane index in register list");
4846 continue;
4847 }
4848 // Normal D register.
4849 // Figure out the register spacing (single or double) of the list if
4850 // we don't know it already.
4851 if (!Spacing)
4852 Spacing = 1 + (Reg == OldReg + 2);
4853
4854 // Just check that it's contiguous and keep going.
4855 if (Reg != OldReg + Spacing)
4856 return Error(RegLoc, "non-contiguous register range");
4857 ++Count;
4858 // Parse the lane specifier if present.
4859 VectorLaneTy NextLaneKind;
4860 unsigned NextLaneIndex;
4861 SMLoc EndLoc = Parser.getTok().getLoc();
4862 if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4863 return ParseStatus::Failure;
4864 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4865 return Error(EndLoc, "mismatched lane index in register list");
4866 }
4867
4868 if (Parser.getTok().isNot(AsmToken::RCurly))
4869 return Error(Parser.getTok().getLoc(), "'}' expected");
4870 E = Parser.getTok().getEndLoc();
4871 Parser.Lex(); // Eat '}' token.
4872
4873 switch (LaneKind) {
4874 case NoLanes:
4875 case AllLanes: {
4876 // Two-register operands have been converted to the
4877 // composite register classes.
4878 if (Count == 2 && !hasMVE()) {
4879 const MCRegisterClass *RC = (Spacing == 1) ?
4880 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4881 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4882 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4883 }
4884 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4885 ARMOperand::CreateVectorListAllLanes);
4886 Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
4887 break;
4888 }
4889 case IndexedLane:
4890 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4891 LaneIndex,
4892 (Spacing == 2),
4893 S, E));
4894 break;
4895 }
4896 return ParseStatus::Success;
4897}
4898
4899/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4900ParseStatus ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4901 MCAsmParser &Parser = getParser();
4902 SMLoc S = Parser.getTok().getLoc();
4903 const AsmToken &Tok = Parser.getTok();
4904 unsigned Opt;
4905
4906 if (Tok.is(AsmToken::Identifier)) {
4907 StringRef OptStr = Tok.getString();
4908
4909 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4910 .Case("sy", ARM_MB::SY)
4911 .Case("st", ARM_MB::ST)
4912 .Case("ld", ARM_MB::LD)
4913 .Case("sh", ARM_MB::ISH)
4914 .Case("ish", ARM_MB::ISH)
4915 .Case("shst", ARM_MB::ISHST)
4916 .Case("ishst", ARM_MB::ISHST)
4917 .Case("ishld", ARM_MB::ISHLD)
4918 .Case("nsh", ARM_MB::NSH)
4919 .Case("un", ARM_MB::NSH)
4920 .Case("nshst", ARM_MB::NSHST)
4921 .Case("nshld", ARM_MB::NSHLD)
4922 .Case("unst", ARM_MB::NSHST)
4923 .Case("osh", ARM_MB::OSH)
4924 .Case("oshst", ARM_MB::OSHST)
4925 .Case("oshld", ARM_MB::OSHLD)
4926 .Default(~0U);
4927
4928 // ishld, oshld, nshld and ld are only available from ARMv8.
4929 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4930 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4931 Opt = ~0U;
4932
4933 if (Opt == ~0U)
4934 return ParseStatus::NoMatch;
4935
4936 Parser.Lex(); // Eat identifier token.
4937 } else if (Tok.is(AsmToken::Hash) ||
4938 Tok.is(AsmToken::Dollar) ||
4939 Tok.is(AsmToken::Integer)) {
4940 if (Parser.getTok().isNot(AsmToken::Integer))
4941 Parser.Lex(); // Eat '#' or '$'.
4942 SMLoc Loc = Parser.getTok().getLoc();
4943
4944 const MCExpr *MemBarrierID;
4945 if (getParser().parseExpression(MemBarrierID))
4946 return Error(Loc, "illegal expression");
4947
4948 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4949 if (!CE)
4950 return Error(Loc, "constant expression expected");
4951
4952 int Val = CE->getValue();
4953 if (Val & ~0xf)
4954 return Error(Loc, "immediate value out of range");
4955
4956 Opt = ARM_MB::RESERVED_0 + Val;
4957 } else
4958 return ParseStatus::Failure;
4959
4960 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4961 return ParseStatus::Success;
4962}
4963
4965ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
4966 MCAsmParser &Parser = getParser();
4967 SMLoc S = Parser.getTok().getLoc();
4968 const AsmToken &Tok = Parser.getTok();
4969
4970 if (Tok.isNot(AsmToken::Identifier))
4971 return ParseStatus::NoMatch;
4972
4973 if (!Tok.getString().equals_insensitive("csync"))
4974 return ParseStatus::NoMatch;
4975
4976 Parser.Lex(); // Eat identifier token.
4977
4978 Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
4979 return ParseStatus::Success;
4980}
4981
4982/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4984ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4985 MCAsmParser &Parser = getParser();
4986 SMLoc S = Parser.getTok().getLoc();
4987 const AsmToken &Tok = Parser.getTok();
4988 unsigned Opt;
4989
4990 if (Tok.is(AsmToken::Identifier)) {
4991 StringRef OptStr = Tok.getString();
4992
4993 if (OptStr.equals_insensitive("sy"))
4994 Opt = ARM_ISB::SY;
4995 else
4996 return ParseStatus::NoMatch;
4997
4998 Parser.Lex(); // Eat identifier token.
4999 } else if (Tok.is(AsmToken::Hash) ||
5000 Tok.is(AsmToken::Dollar) ||
5001 Tok.is(AsmToken::Integer)) {
5002 if (Parser.getTok().isNot(AsmToken::Integer))
5003 Parser.Lex(); // Eat '#' or '$'.
5004 SMLoc Loc = Parser.getTok().getLoc();
5005
5006 const MCExpr *ISBarrierID;
5007 if (getParser().parseExpression(ISBarrierID))
5008 return Error(Loc, "illegal expression");
5009
5010 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
5011 if (!CE)
5012 return Error(Loc, "constant expression expected");
5013
5014 int Val = CE->getValue();
5015 if (Val & ~0xf)
5016 return Error(Loc, "immediate value out of range");
5017
5018 Opt = ARM_ISB::RESERVED_0 + Val;
5019 } else
5020 return ParseStatus::Failure;
5021
5022 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5023 (ARM_ISB::InstSyncBOpt)Opt, S));
5024 return ParseStatus::Success;
5025}
5026
5027/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
5028ParseStatus ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
5029 MCAsmParser &Parser = getParser();
5030 SMLoc S = Parser.getTok().getLoc();
5031 const AsmToken &Tok = Parser.getTok();
5032 if (!Tok.is(AsmToken::Identifier))
5033 return ParseStatus::NoMatch;
5034 StringRef IFlagsStr = Tok.getString();
5035
5036 // An iflags string of "none" is interpreted to mean that none of the AIF
5037 // bits are set. Not a terribly useful instruction, but a valid encoding.
5038 unsigned IFlags = 0;
5039 if (IFlagsStr != "none") {
5040 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
5041 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
5042 .Case("a", ARM_PROC::A)
5043 .Case("i", ARM_PROC::I)
5044 .Case("f", ARM_PROC::F)
5045 .Default(~0U);
5046
5047 // If some specific iflag is already set, it means that some letter is
5048 // present more than once, this is not acceptable.
5049 if (Flag == ~0U || (IFlags & Flag))
5050 return ParseStatus::NoMatch;
5051
5052 IFlags |= Flag;
5053 }
5054 }
5055
5056 Parser.Lex(); // Eat identifier token.
5057 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
5058 return ParseStatus::Success;
5059}
5060
5061/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
5062ParseStatus ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
5063 MCAsmParser &Parser = getParser();
5064 SMLoc S = Parser.getTok().getLoc();
5065 const AsmToken &Tok = Parser.getTok();
5066
5067 if (Tok.is(AsmToken::Integer)) {
5068 int64_t Val = Tok.getIntVal();
5069 if (Val > 255 || Val < 0) {
5070 return ParseStatus::NoMatch;
5071 }
5072 unsigned SYSmvalue = Val & 0xFF;
5073 Parser.Lex();
5074 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5075 return ParseStatus::Success;
5076 }
5077
5078 if (!Tok.is(AsmToken::Identifier))
5079 return ParseStatus::NoMatch;
5080 StringRef Mask = Tok.getString();
5081
5082 if (isMClass()) {
5083 auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
5084 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5085 return ParseStatus::NoMatch;
5086
5087 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5088
5089 Parser.Lex(); // Eat identifier token.
5090 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5091 return ParseStatus::Success;
5092 }
5093
5094 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
5095 size_t Start = 0, Next = Mask.find('_');
5096 StringRef Flags = "";
5097 std::string SpecReg = Mask.slice(Start, Next).lower();
5098 if (Next != StringRef::npos)
5099 Flags = Mask.slice(Next+1, Mask.size());
5100
5101 // FlagsVal contains the complete mask:
5102 // 3-0: Mask
5103 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5104 unsigned FlagsVal = 0;
5105
5106 if (SpecReg == "apsr") {
5107 FlagsVal = StringSwitch<unsigned>(Flags)
5108 .Case("nzcvq", 0x8) // same as CPSR_f
5109 .Case("g", 0x4) // same as CPSR_s
5110 .Case("nzcvqg", 0xc) // same as CPSR_fs
5111 .Default(~0U);
5112
5113 if (FlagsVal == ~0U) {
5114 if (!Flags.empty())
5115 return ParseStatus::NoMatch;
5116 else
5117 FlagsVal = 8; // No flag
5118 }
5119 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
5120 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
5121 if (Flags == "all" || Flags == "")
5122 Flags = "fc";
5123 for (int i = 0, e = Flags.size(); i != e; ++i) {
5124 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
5125 .Case("c", 1)
5126 .Case("x", 2)
5127 .Case("s", 4)
5128 .Case("f", 8)
5129 .Default(~0U);
5130
5131 // If some specific flag is already set, it means that some letter is
5132 // present more than once, this is not acceptable.
5133 if (Flag == ~0U || (FlagsVal & Flag))
5134 return ParseStatus::NoMatch;
5135 FlagsVal |= Flag;
5136 }
5137 } else // No match for special register.
5138 return ParseStatus::NoMatch;
5139
5140 // Special register without flags is NOT equivalent to "fc" flags.
5141 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
5142 // two lines would enable gas compatibility at the expense of breaking
5143 // round-tripping.
5144 //
5145 // if (!FlagsVal)
5146 // FlagsVal = 0x9;
5147
5148 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5149 if (SpecReg == "spsr")
5150 FlagsVal |= 16;
5151
5152 Parser.Lex(); // Eat identifier token.
5153 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
5154 return ParseStatus::Success;
5155}
5156
5157/// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
5158/// use in the MRS/MSR instructions added to support virtualization.
5159ParseStatus ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
5160 MCAsmParser &Parser = getParser();
5161 SMLoc S = Parser.getTok().getLoc();
5162 const AsmToken &Tok = Parser.getTok();
5163 if (!Tok.is(AsmToken::Identifier))
5164 return ParseStatus::NoMatch;
5165 StringRef RegName = Tok.getString();
5166
5167 auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
5168 if (!TheReg)
5169 return ParseStatus::NoMatch;
5170 unsigned Encoding = TheReg->Encoding;
5171
5172 Parser.Lex(); // Eat identifier token.
5173 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
5174 return ParseStatus::Success;
5175}
5176
5177ParseStatus ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op,
5178 int Low, int High) {
5179 MCAsmParser &Parser = getParser();
5180 const AsmToken &Tok = Parser.getTok();
5181 if (Tok.isNot(AsmToken::Identifier))
5182 return Error(Parser.getTok().getLoc(), Op + " operand expected.");
5183 StringRef ShiftName = Tok.getString();
5184 std::string LowerOp = Op.lower();
5185 std::string UpperOp = Op.upper();
5186 if (ShiftName != LowerOp && ShiftName != UpperOp)
5187 return Error(Parser.getTok().getLoc(), Op + " operand expected.");
5188 Parser.Lex(); // Eat shift type token.
5189
5190 // There must be a '#' and a shift amount.
5191 if (Parser.getTok().isNot(AsmToken::Hash) &&
5192 Parser.getTok().isNot(AsmToken::Dollar))
5193 return Error(Parser.getTok().getLoc(), "'#' expected");
5194 Parser.Lex(); // Eat hash token.
5195
5196 const MCExpr *ShiftAmount;
5197 SMLoc Loc = Parser.getTok().getLoc();
5198 SMLoc EndLoc;
5199 if (getParser().parseExpression(ShiftAmount, EndLoc))
5200 return Error(Loc, "illegal expression");
5201 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5202 if (!CE)
5203 return Error(Loc, "constant expression expected");
5204 int Val = CE->getValue();
5205 if (Val < Low || Val > High)
5206 return Error(Loc, "immediate value out of range");
5207
5208 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
5209
5210 return ParseStatus::Success;
5211}
5212
5213ParseStatus ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
5214 MCAsmParser &Parser = getParser();
5215 const AsmToken &Tok = Parser.getTok();
5216 SMLoc S = Tok.getLoc();
5217 if (Tok.isNot(AsmToken::Identifier))
5218 return Error(S, "'be' or 'le' operand expected");
5219 int Val = StringSwitch<int>(Tok.getString().lower())
5220 .Case("be", 1)
5221 .Case("le", 0)
5222 .Default(-1);
5223 Parser.Lex(); // Eat the token.
5224
5225 if (Val == -1)
5226 return Error(S, "'be' or 'le' operand expected");
5227 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
5228 getContext()),
5229 S, Tok.getEndLoc()));
5230 return ParseStatus::Success;
5231}
5232
5233/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
5234/// instructions. Legal values are:
5235/// lsl #n 'n' in [0,31]
5236/// asr #n 'n' in [1,32]
5237/// n == 32 encoded as n == 0.
5238ParseStatus ARMAsmParser::parseShifterImm(OperandVector &Operands) {
5239 MCAsmParser &Parser = getParser();
5240 const AsmToken &Tok = Parser.getTok();
5241 SMLoc S = Tok.getLoc();
5242 if (Tok.isNot(AsmToken::Identifier))
5243 return Error(S, "shift operator 'asr' or 'lsl' expected");
5244 StringRef ShiftName = Tok.getString();
5245 bool isASR;
5246 if (ShiftName == "lsl" || ShiftName == "LSL")
5247 isASR = false;
5248 else if (ShiftName == "asr" || ShiftName == "ASR")
5249 isASR = true;
5250 else
5251 return Error(S, "shift operator 'asr' or 'lsl' expected");
5252 Parser.Lex(); // Eat the operator.
5253
5254 // A '#' and a shift amount.
5255 if (Parser.getTok().isNot(AsmToken::Hash) &&
5256 Parser.getTok().isNot(AsmToken::Dollar))
5257 return Error(Parser.getTok().getLoc(), "'#' expected");
5258 Parser.Lex(); // Eat hash token.
5259 SMLoc ExLoc = Parser.getTok().getLoc();
5260
5261 const MCExpr *ShiftAmount;
5262 SMLoc EndLoc;
5263 if (getParser().parseExpression(ShiftAmount, EndLoc))
5264 return Error(ExLoc, "malformed shift expression");
5265 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5266 if (!CE)
5267 return Error(ExLoc, "shift amount must be an immediate");
5268
5269 int64_t Val = CE->getValue();
5270 if (isASR) {
5271 // Shift amount must be in [1,32]
5272 if (Val < 1 || Val > 32)
5273 return Error(ExLoc, "'asr' shift amount must be in range [1,32]");
5274 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
5275 if (isThumb() && Val == 32)
5276 return Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
5277 if (Val == 32) Val = 0;
5278 } else {
5279 // Shift amount must be in [1,32]
5280 if (Val < 0 || Val > 31)
5281 return Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
5282 }
5283
5284 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
5285
5286 return ParseStatus::Success;
5287}
5288
5289/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
5290/// of instructions. Legal values are:
5291/// ror #n 'n' in {0, 8, 16, 24}
5292ParseStatus ARMAsmParser::parseRotImm(OperandVector &Operands) {
5293 MCAsmParser &Parser = getParser();
5294 const AsmToken &Tok = Parser.getTok();
5295 SMLoc S = Tok.getLoc();
5296 if (Tok.isNot(AsmToken::Identifier))
5297 return ParseStatus::NoMatch;
5298 StringRef ShiftName = Tok.getString();
5299 if (ShiftName != "ror" && ShiftName != "ROR")
5300 return ParseStatus::NoMatch;
5301 Parser.Lex(); // Eat the operator.
5302
5303 // A '#' and a rotate amount.
5304 if (Parser.getTok().isNot(AsmToken::Hash) &&
5305 Parser.getTok().isNot(AsmToken::Dollar))
5306 return Error(Parser.getTok().getLoc(), "'#' expected");
5307 Parser.Lex(); // Eat hash token.
5308 SMLoc ExLoc = Parser.getTok().getLoc();
5309
5310 const MCExpr *ShiftAmount;
5311 SMLoc EndLoc;
5312 if (getParser().parseExpression(ShiftAmount, EndLoc))
5313 return Error(ExLoc, "malformed rotate expression");
5314 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5315 if (!CE)
5316 return Error(ExLoc, "rotate amount must be an immediate");
5317
5318 int64_t Val = CE->getValue();
5319 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
5320 // normally, zero is represented in asm by omitting the rotate operand
5321 // entirely.
5322 if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5323 return Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
5324
5325 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
5326
5327 return ParseStatus::Success;
5328}
5329
5330ParseStatus ARMAsmParser::parseModImm(OperandVector &Operands) {
5331 MCAsmParser &Parser = getParser();
5332 MCAsmLexer &Lexer = getLexer();
5333 int64_t Imm1, Imm2;
5334
5335 SMLoc S = Parser.getTok().getLoc();
5336
5337 // 1) A mod_imm operand can appear in the place of a register name:
5338 // add r0, #mod_imm
5339 // add r0, r0, #mod_imm
5340 // to correctly handle the latter, we bail out as soon as we see an
5341 // identifier.
5342 //
5343 // 2) Similarly, we do not want to parse into complex operands:
5344 // mov r0, #mod_imm
5345 // mov r0, :lower16:(_foo)
5346 if (Parser.getTok().is(AsmToken::Identifier) ||
5347 Parser.getTok().is(AsmToken::Colon))
5348 return ParseStatus::NoMatch;
5349
5350 // Hash (dollar) is optional as per the ARMARM
5351 if (Parser.getTok().is(AsmToken::Hash) ||
5352 Parser.getTok().is(AsmToken::Dollar)) {
5353 // Avoid parsing into complex operands (#:)
5354 if (Lexer.peekTok().is(AsmToken::Colon))
5355 return ParseStatus::NoMatch;
5356
5357 // Eat the hash (dollar)
5358 Parser.Lex();
5359 }
5360
5361 SMLoc Sx1, Ex1;
5362 Sx1 = Parser.getTok().getLoc();
5363 const MCExpr *Imm1Exp;
5364 if (getParser().parseExpression(Imm1Exp, Ex1))
5365 return Error(Sx1, "malformed expression");
5366
5367 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
5368
5369 if (CE) {
5370 // Immediate must fit within 32-bits
5371 Imm1 = CE->getValue();
5372 int Enc = ARM_AM::getSOImmVal(Imm1);
5373 if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
5374 // We have a match!
5375 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
5376 (Enc & 0xF00) >> 7,
5377 Sx1, Ex1));
5378 return ParseStatus::Success;
5379 }
5380
5381 // We have parsed an immediate which is not for us, fallback to a plain
5382 // immediate. This can happen for instruction aliases. For an example,
5383 // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5384 // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5385 // instruction with a mod_imm operand. The alias is defined such that the
5386 // parser method is shared, that's why we have to do this here.
5387 if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5388 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5389 return ParseStatus::Success;
5390 }
5391 } else {
5392 // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5393 // MCFixup). Fallback to a plain immediate.
5394 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5395 return ParseStatus::Success;
5396 }
5397
5398 // From this point onward, we expect the input to be a (#bits, #rot) pair
5399 if (Parser.getTok().isNot(AsmToken::Comma))
5400 return Error(Sx1,
5401 "expected modified immediate operand: #[0, 255], #even[0-30]");
5402
5403 if (Imm1 & ~0xFF)
5404 return Error(Sx1, "immediate operand must a number in the range [0, 255]");
5405
5406 // Eat the comma
5407 Parser.Lex();
5408
5409 // Repeat for #rot
5410 SMLoc Sx2, Ex2;
5411 Sx2 = Parser.getTok().getLoc();
5412
5413 // Eat the optional hash (dollar)
5414 if (Parser.getTok().is(AsmToken::Hash) ||
5415 Parser.getTok().is(AsmToken::Dollar))
5416 Parser.Lex();
5417
5418 const MCExpr *Imm2Exp;
5419 if (getParser().parseExpression(Imm2Exp, Ex2))
5420 return Error(Sx2, "malformed expression");
5421
5422 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5423
5424 if (CE) {
5425 Imm2 = CE->getValue();
5426 if (!(Imm2 & ~0x1E)) {
5427 // We have a match!
5428 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5429 return ParseStatus::Success;
5430 }
5431 return Error(Sx2,
5432 "immediate operand must an even number in the range [0, 30]");
5433 } else {
5434 return Error(Sx2, "constant expression expected");
5435 }
5436}
5437
5438ParseStatus ARMAsmParser::parseBitfield(OperandVector &Operands) {
5439 MCAsmParser &Parser = getParser();
5440 SMLoc S = Parser.getTok().getLoc();
5441 // The bitfield descriptor is really two operands, the LSB and the width.
5442 if (Parser.getTok().isNot(AsmToken::Hash) &&
5443 Parser.getTok().isNot(AsmToken::Dollar))
5444 return Error(Parser.getTok().getLoc(), "'#' expected");
5445 Parser.Lex(); // Eat hash token.
5446
5447 const MCExpr *LSBExpr;
5448 SMLoc E = Parser.getTok().getLoc();
5449 if (getParser().parseExpression(LSBExpr))
5450 return Error(E, "malformed immediate expression");
5451 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5452 if (!CE)
5453 return Error(E, "'lsb' operand must be an immediate");
5454
5455 int64_t LSB = CE->getValue();
5456 // The LSB must be in the range [0,31]
5457 if (LSB < 0 || LSB > 31)
5458 return Error(E, "'lsb' operand must be in the range [0,31]");
5459 E = Parser.getTok().getLoc();
5460
5461 // Expect another immediate operand.
5462 if (Parser.getTok().isNot(AsmToken::Comma))
5463 return Error(Parser.getTok().getLoc(), "too few operands");
5464 Parser.Lex(); // Eat hash token.
5465 if (Parser.getTok().isNot(AsmToken::Hash) &&
5466 Parser.getTok().isNot(AsmToken::Dollar))
5467 return Error(Parser.getTok().getLoc(), "'#' expected");
5468 Parser.Lex(); // Eat hash token.
5469
5470 const MCExpr *WidthExpr;
5471 SMLoc EndLoc;
5472 if (getParser().parseExpression(WidthExpr, EndLoc))
5473 return Error(E, "malformed immediate expression");
5474 CE = dyn_cast<MCConstantExpr>(WidthExpr);
5475 if (!CE)
5476 return Error(E, "'width' operand must be an immediate");
5477
5478 int64_t Width = CE->getValue();
5479 // The LSB must be in the range [1,32-lsb]
5480 if (Width < 1 || Width > 32 - LSB)
5481 return Error(E, "'width' operand must be in the range [1,32-lsb]");
5482
5483 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5484
5485 return ParseStatus::Success;
5486}
5487
5488ParseStatus ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5489 // Check for a post-index addressing register operand. Specifically:
5490 // postidx_reg := '+' register {, shift}
5491 // | '-' register {, shift}
5492 // | register {, shift}
5493
5494 // This method must return ParseStatus::NoMatch without consuming any tokens
5495 // in the case where there is no match, as other alternatives take other
5496 // parse methods.
5497 MCAsmParser &Parser = getParser();
5498 AsmToken Tok = Parser.getTok();
5499 SMLoc S = Tok.getLoc();
5500 bool haveEaten = false;
5501 bool isAdd = true;
5502 if (Tok.is(AsmToken::Plus)) {
5503 Parser.Lex(); // Eat the '+' token.
5504 haveEaten = true;
5505 } else if (Tok.is(AsmToken::Minus)) {
5506 Parser.Lex(); // Eat the '-' token.
5507 isAdd = false;
5508 haveEaten = true;
5509 }
5510
5511 SMLoc E = Parser.getTok().getEndLoc();
5512 int Reg = tryParseRegister();
5513 if (Reg == -1) {
5514 if (!haveEaten)
5515 return ParseStatus::NoMatch;
5516 return Error(Parser.getTok().getLoc(), "register expected");
5517 }
5518
5520 unsigned ShiftImm = 0;
5521 if (Parser.getTok().is(AsmToken::Comma)) {
5522 Parser.Lex(); // Eat the ','.
5523 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5524 return ParseStatus::Failure;
5525
5526 // FIXME: Only approximates end...may include intervening whitespace.
5527 E = Parser.getTok().getLoc();
5528 }
5529
5530 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
5531 ShiftImm, S, E));
5532
5533 return ParseStatus::Success;
5534}
5535
5536ParseStatus ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5537 // Check for a post-index addressing register operand. Specifically:
5538 // am3offset := '+' register
5539 // | '-' register
5540 // | register
5541 // | # imm
5542 // | # + imm
5543 // | # - imm
5544
5545 // This method must return ParseStatus::NoMatch without consuming any tokens
5546 // in the case where there is no match, as other alternatives take other
5547 // parse methods.
5548 MCAsmParser &Parser = getParser();
5549 AsmToken Tok = Parser.getTok();
5550 SMLoc S = Tok.getLoc();
5551
5552 // Do immediates first, as we always parse those if we have a '#'.
5553 if (Parser.getTok().is(AsmToken::Hash) ||
5554 Parser.getTok().is(AsmToken::Dollar)) {
5555 Parser.Lex(); // Eat '#' or '$'.
5556 // Explicitly look for a '-', as we need to encode negative zero
5557 // differently.
5558 bool isNegative = Parser.getTok().is(AsmToken::Minus);
5559 const MCExpr *Offset;
5560 SMLoc E;
5561 if (getParser().parseExpression(Offset, E))
5562 return ParseStatus::Failure;
5563 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5564 if (!CE)
5565 return Error(S, "constant expression expected");
5566 // Negative zero is encoded as the flag value
5567 // std::numeric_limits<int32_t>::min().
5568 int32_t Val = CE->getValue();
5569 if (isNegative && Val == 0)
5570 Val = std::numeric_limits<int32_t>::min();
5571
5572 Operands.push_back(
5573 ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
5574
5575 return ParseStatus::Success;
5576 }
5577
5578 bool haveEaten = false;
5579 bool isAdd = true;
5580 if (Tok.is(AsmToken::Plus)) {
5581 Parser.Lex(); // Eat the '+' token.
5582 haveEaten = true;
5583 } else if (Tok.is(AsmToken::Minus)) {
5584 Parser.Lex(); // Eat the '-' token.
5585 isAdd = false;
5586 haveEaten = true;
5587 }
5588
5589 Tok = Parser.getTok();
5590 int Reg = tryParseRegister();
5591 if (Reg == -1) {
5592 if (!haveEaten)
5593 return ParseStatus::NoMatch;
5594 return Error(Tok.getLoc(), "register expected");
5595 }
5596
5597 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
5598 0, S, Tok.getEndLoc()));
5599
5600 return ParseStatus::Success;
5601}
5602
5603/// Convert parsed operands to MCInst. Needed here because this instruction
5604/// only has two register operands, but multiplication is commutative so
5605/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5606void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5607 const OperandVector &Operands) {
5608 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
5609 ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
5610 // If we have a three-operand form, make sure to set Rn to be the operand
5611 // that isn't the same as Rd.
5612 unsigned RegOp = 4;
5613 if (Operands.size() == 6 &&
5614 ((ARMOperand &)*Operands[4]).getReg() ==
5615 ((ARMOperand &)*Operands[3]).getReg())
5616 RegOp = 5;
5617 ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
5618 Inst.addOperand(Inst.getOperand(0));
5619 ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
5620}
5621
5622void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5623 const OperandVector &Operands) {
5624 int CondOp = -1, ImmOp = -1;
5625 switch(Inst.getOpcode()) {
5626 case ARM::tB:
5627 case ARM::tBcc: CondOp = 1; ImmOp = 2; break;
5628
5629 case ARM::t2B:
5630 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
5631
5632 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
5633 }
5634 // first decide whether or not the branch should be conditional
5635 // by looking at it's location relative to an IT block
5636 if(inITBlock()) {
5637 // inside an IT block we cannot have any conditional branches. any
5638 // such instructions needs to be converted to unconditional form
5639 switch(Inst.getOpcode()) {
5640 case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5641 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5642 }
5643 } else {
5644 // outside IT blocks we can only have unconditional branches with AL
5645 // condition code or conditional branches with non-AL condition code
5646 unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
5647 switch(Inst.getOpcode()) {
5648 case ARM::tB:
5649 case ARM::tBcc:
5650 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5651 break;
5652 case ARM::t2B:
5653 case ARM::t2Bcc:
5654 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5655 break;
5656 }
5657 }
5658
5659 // now decide on encoding size based on branch target range
5660 switch(Inst.getOpcode()) {
5661 // classify tB as either t2B or t1B based on range of immediate operand
5662 case ARM::tB: {
5663 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5664 if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5665 Inst.setOpcode(ARM::t2B);
5666 break;
5667 }
5668 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5669 case ARM::tBcc: {
5670 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5671 if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5672 Inst.setOpcode(ARM::t2Bcc);
5673 break;
5674 }
5675 }
5676 ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
5677 ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
5678}
5679
5680void ARMAsmParser::cvtMVEVMOVQtoDReg(
5681 MCInst &Inst, const OperandVector &Operands) {
5682
5683 // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
5684 assert(Operands.size() == 8);
5685
5686 ((ARMOperand &)*Operands[2]).addRegOperands(Inst, 1); // Rt
5687 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); // Rt2
5688 ((ARMOperand &)*Operands[4]).addRegOperands(Inst, 1); // Qd
5689 ((ARMOperand &)*Operands[5]).addMVEPairVectorIndexOperands(Inst, 1); // idx
5690 // skip second copy of Qd in Operands[6]
5691 ((ARMOperand &)*Operands[7]).addMVEPairVectorIndexOperands(Inst, 1); // idx2
5692 ((ARMOperand &)*Operands[1]).addCondCodeOperands(Inst, 2); // condition code
5693}
5694
5695/// Parse an ARM memory expression, return false if successful else return true
5696/// or an error. The first token must be a '[' when called.
5697bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5698 MCAsmParser &Parser = getParser();
5699 SMLoc S, E;
5700 if (Parser.getTok().isNot(AsmToken::LBrac))
5701 return TokError("Token is not a Left Bracket");
5702 S = Parser.getTok().getLoc();
5703 Parser.Lex(); // Eat left bracket token.
5704
5705 const AsmToken &BaseRegTok = Parser.getTok();
5706 int BaseRegNum = tryParseRegister();
5707 if (BaseRegNum == -1)
5708 return Error(BaseRegTok.getLoc(), "register expected");
5709
5710 // The next token must either be a comma, a colon or a closing bracket.
5711 const AsmToken &Tok = Parser.getTok();
5712 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5713 !Tok.is(AsmToken::RBrac))
5714 return Error(Tok.getLoc(), "malformed memory operand");
5715
5716 if (Tok.is(AsmToken::RBrac)) {
5717 E = Tok.getEndLoc();
5718 Parser.Lex(); // Eat right bracket token.
5719
5720 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5721 ARM_AM::no_shift, 0, 0, false,
5722 S, E));
5723
5724 // If there's a pre-indexing writeback marker, '!', just add it as a token
5725 // operand. It's rather odd, but syntactically valid.
5726 if (Parser.getTok().is(AsmToken::Exclaim)) {
5727 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5728 Parser.Lex(); // Eat the '!'.
5729 }
5730
5731 return false;
5732 }
5733
5734 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5735 "Lost colon or comma in memory operand?!");
5736 if (Tok.is(AsmToken::Comma)) {
5737 Parser.Lex(); // Eat the comma.
5738 }
5739
5740 // If we have a ':', it's an alignment specifier.
5741 if (Parser.getTok().is(AsmToken::Colon)) {
5742 Parser.Lex(); // Eat the ':'.
5743 E = Parser.getTok().getLoc();
5744 SMLoc AlignmentLoc = Tok.getLoc();
5745
5746 const MCExpr *Expr;
5747 if (getParser().parseExpression(Expr))
5748 return true;
5749
5750 // The expression has to be a constant. Memory references with relocations
5751 // don't come through here, as they use the <label> forms of the relevant
5752 // instructions.
5753 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5754 if (!CE)
5755 return Error (E, "constant expression expected");
5756
5757 unsigned Align = 0;
5758 switch (CE->getValue()) {
5759 default:
5760 return Error(E,
5761 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5762 case 16: Align = 2; break;
5763 case 32: Align = 4; break;
5764 case 64: Align = 8; break;
5765 case 128: Align = 16; break;
5766 case 256: Align = 32; break;
5767 }
5768
5769 // Now we should have the closing ']'
5770 if (Parser.getTok().isNot(AsmToken::RBrac))
5771 return Error(Parser.getTok().getLoc(), "']' expected");
5772 E = Parser.getTok().getEndLoc();
5773 Parser.Lex(); // Eat right bracket token.
5774
5775 // Don't worry about range checking the value here. That's handled by
5776 // the is*() predicates.
5777 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5779 false, S, E, AlignmentLoc));
5780
5781 // If there's a pre-indexing writeback marker, '!', just add it as a token
5782 // operand.
5783 if (Parser.getTok().is(AsmToken::Exclaim)) {
5784 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5785 Parser.Lex(); // Eat the '!'.
5786 }
5787
5788 return false;
5789 }
5790
5791 // If we have a '#' or '$', it's an immediate offset, else assume it's a
5792 // register offset. Be friendly and also accept a plain integer or expression
5793 // (without a leading hash) for gas compatibility.
5794 if (Parser.getTok().is(AsmToken::Hash) ||
5795 Parser.getTok().is(AsmToken::Dollar) ||
5796 Parser.getTok().is(AsmToken::LParen) ||
5797 Parser.getTok().is(AsmToken::Integer)) {
5798 if (Parser.getTok().is(AsmToken::Hash) ||
5799 Parser.getTok().is(AsmToken::Dollar))
5800 Parser.Lex(); // Eat '#' or '$'
5801 E = Parser.getTok().getLoc();
5802
5803 bool isNegative = getParser().getTok().is(AsmToken::Minus);
5804 const MCExpr *Offset, *AdjustedOffset;
5805 if (getParser().parseExpression(Offset))
5806 return true;
5807
5808 if (const auto *CE = dyn_cast<MCConstantExpr>(Offset)) {
5809 // If the constant was #-0, represent it as
5810 // std::numeric_limits<int32_t>::min().
5811 int32_t Val = CE->getValue();
5812 if (isNegative && Val == 0)
5813 CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5814 getContext());
5815 // Don't worry about range checking the value here. That's handled by
5816 // the is*() predicates.
5817 AdjustedOffset = CE;
5818 } else
5819 AdjustedOffset = Offset;
5820 Operands.push_back(ARMOperand::CreateMem(
5821 BaseRegNum, AdjustedOffset, 0, ARM_AM::no_shift, 0, 0, false, S, E));
5822
5823 // Now we should have the closing ']'
5824 if (Parser.getTok().isNot(AsmToken::RBrac))
5825 return Error(Parser.getTok().getLoc(), "']' expected");
5826 E = Parser.getTok().getEndLoc();
5827 Parser.Lex(); // Eat right bracket token.
5828
5829 // If there's a pre-indexing writeback marker, '!', just add it as a token
5830 // operand.
5831 if (Parser.getTok().is(AsmToken::Exclaim)) {
5832 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5833 Parser.Lex(); // Eat the '!'.
5834 }
5835
5836 return false;
5837 }
5838
5839 // The register offset is optionally preceded by a '+' or '-'
5840 bool isNegative = false;
5841 if (Parser.getTok().is(AsmToken::Minus)) {
5842 isNegative = true;
5843 Parser.Lex(); // Eat the '-'.
5844 } else if (Parser.getTok().is(AsmToken::Plus)) {
5845 // Nothing to do.
5846 Parser.Lex(); // Eat the '+'.
5847 }
5848
5849 E = Parser.getTok().getLoc();
5850 int OffsetRegNum = tryParseRegister();
5851 if (OffsetRegNum == -1)
5852 return Error(E, "register expected");
5853
5854 // If there's a shift operator, handle it.
5856 unsigned ShiftImm = 0;
5857 if (Parser.getTok().is(AsmToken::Comma)) {
5858 Parser.Lex(); // Eat the ','.
5859 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5860 return true;
5861 }
5862
5863 // Now we should have the closing ']'
5864 if (Parser.getTok().isNot(AsmToken::RBrac))
5865 return Error(Parser.getTok().getLoc(), "']' expected");
5866 E = Parser.getTok().getEndLoc();
5867 Parser.Lex(); // Eat right bracket token.
5868
5869 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5870 ShiftType, ShiftImm, 0, isNegative,
5871 S, E));
5872
5873 // If there's a pre-indexing writeback marker, '!', just add it as a token
5874 // operand.
5875 if (Parser.getTok().is(AsmToken::Exclaim)) {
5876 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5877 Parser.Lex(); // Eat the '!'.
5878 }
5879
5880 return false;
5881}
5882
5883/// parseMemRegOffsetShift - one of these two:
5884/// ( lsl | lsr | asr | ror ) , # shift_amount
5885/// rrx
5886/// return true if it parses a shift otherwise it returns false.
5887bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5888 unsigned &Amount) {
5889 MCAsmParser &Parser = getParser();
5890 SMLoc Loc = Parser.getTok().getLoc();
5891 const AsmToken &Tok = Parser.getTok();
5892 if (Tok.isNot(AsmToken::Identifier))
5893 return Error(Loc, "illegal shift operator");
5894 StringRef ShiftName = Tok.getString();
5895 if (ShiftName == "lsl" || ShiftName == "LSL" ||
5896 ShiftName == "asl" || ShiftName == "ASL")
5897 St = ARM_AM::lsl;
5898 else if (ShiftName == "lsr" || ShiftName == "LSR")
5899 St = ARM_AM::lsr;
5900 else if (ShiftName == "asr" || ShiftName == "ASR")
5901 St = ARM_AM::asr;
5902 else if (ShiftName == "ror" || ShiftName == "ROR")
5903 St = ARM_AM::ror;
5904 else if (ShiftName == "rrx" || ShiftName == "RRX")
5905 St = ARM_AM::rrx;
5906 else if (ShiftName == "uxtw" || ShiftName == "UXTW")
5907 St = ARM_AM::uxtw;
5908 else
5909 return Error(Loc, "illegal shift operator");
5910 Parser.Lex(); // Eat shift type token.
5911
5912 // rrx stands alone.
5913 Amount = 0;
5914 if (St != ARM_AM::rrx) {
5915 Loc = Parser.getTok().getLoc();
5916 // A '#' and a shift amount.
5917 const AsmToken &HashTok = Parser.getTok();
5918 if (HashTok.isNot(AsmToken::Hash) &&
5919 HashTok.isNot(AsmToken::Dollar))
5920 return Error(HashTok.getLoc(), "'#' expected");
5921 Parser.Lex(); // Eat hash token.
5922
5923 const MCExpr *Expr;
5924 if (getParser().parseExpression(Expr))
5925 return true;
5926 // Range check the immediate.
5927 // lsl, ror: 0 <= imm <= 31
5928 // lsr, asr: 0 <= imm <= 32
5929 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5930 if (!CE)
5931 return Error(Loc, "shift amount must be an immediate");
5932 int64_t Imm = CE->getValue();
5933 if (Imm < 0 ||
5934 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5935 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5936 return Error(Loc, "immediate shift value out of range");
5937 // If <ShiftTy> #0, turn it into a no_shift.
5938 if (Imm == 0)
5939 St = ARM_AM::lsl;
5940 // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5941 if (Imm == 32)
5942 Imm = 0;
5943 Amount = Imm;
5944 }
5945
5946 return false;
5947}
5948
5949/// parseFPImm - A floating point immediate expression operand.
5950ParseStatus ARMAsmParser::parseFPImm(OperandVector &Operands) {
5951 MCAsmParser &Parser = getParser();
5952 // Anything that can accept a floating point constant as an operand
5953 // needs to go through here, as the regular parseExpression is
5954 // integer only.
5955 //
5956 // This routine still creates a generic Immediate operand, containing
5957 // a bitcast of the 64-bit floating point value. The various operands
5958 // that accept floats can check whether the value is valid for them
5959 // via the standard is*() predicates.
5960
5961 SMLoc S = Parser.getTok().getLoc();
5962
5963 if (Parser.getTok().isNot(AsmToken::Hash) &&
5964 Parser.getTok().isNot(AsmToken::Dollar))
5965 return ParseStatus::NoMatch;
5966
5967 // Disambiguate the VMOV forms that can accept an FP immediate.
5968 // vmov.f32 <sreg>, #imm
5969 // vmov.f64 <dreg>, #imm
5970 // vmov.f32 <dreg>, #imm @ vector f32x2
5971 // vmov.f32 <qreg>, #imm @ vector f32x4
5972 //
5973 // There are also the NEON VMOV instructions which expect an
5974 // integer constant. Make sure we don't try to parse an FPImm
5975 // for these:
5976 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5977 ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5978 bool isVmovf = TyOp.isToken() &&
5979 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5980 TyOp.getToken() == ".f16");
5981 ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5982 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5983 Mnemonic.getToken() == "fconsts");
5984 if (!(isVmovf || isFconst))
5985 return ParseStatus::NoMatch;
5986
5987 Parser.Lex(); // Eat '#' or '$'.
5988
5989 // Handle negation, as that still comes through as a separate token.
5990 bool isNegative = false;
5991 if (Parser.getTok().is(AsmToken::Minus)) {
5992 isNegative = true;
5993 Parser.Lex();
5994 }
5995 const AsmToken &Tok = Parser.getTok();
5996 SMLoc Loc = Tok.getLoc();
5997 if (Tok.is(AsmToken::Real) && isVmovf) {
5998 APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
5999 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
6000 // If we had a '-' in front, toggle the sign bit.
6001 IntVal ^= (uint64_t)isNegative << 31;
6002 Parser.Lex(); // Eat the token.
6003 Operands.push_back(ARMOperand::CreateImm(
6004 MCConstantExpr::create(IntVal, getContext()),
6005 S, Parser.getTok().getLoc()));
6006 return ParseStatus::Success;
6007 }
6008 // Also handle plain integers. Instructions which allow floating point
6009 // immediates also allow a raw encoded 8-bit value.
6010 if (Tok.is(AsmToken::Integer) && isFconst) {
6011 int64_t Val = Tok.getIntVal();
6012 Parser.Lex(); // Eat the token.
6013 if (Val > 255 || Val < 0)
6014 return Error(Loc, "encoded floating point value out of range");
6015 float RealVal = ARM_AM::getFPImmFloat(Val);
6016 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
6017
6018 Operands.push_back(ARMOperand::CreateImm(
6019 MCConstantExpr::create(Val, getContext()), S,
6020 Parser.getTok().getLoc()));
6021 return ParseStatus::Success;
6022 }
6023
6024 return Error(Loc, "invalid floating point immediate");
6025}
6026
6027/// Parse a arm instruction operand. For now this parses the operand regardless
6028/// of the mnemonic.
6029bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
6030 MCAsmParser &Parser = getParser();
6031 SMLoc S, E;
6032
6033 // Check if the current operand has a custom associated parser, if so, try to
6034 // custom parse the operand, or fallback to the general approach.
6035 ParseStatus ResTy = MatchOperandParserImpl(Operands, Mnemonic);
6036 if (ResTy.isSuccess())
6037 return false;
6038 // If there wasn't a custom match, try the generic matcher below. Otherwise,
6039 // there was a match, but an error occurred, in which case, just return that
6040 // the operand parsing failed.
6041 if (ResTy.isFailure())
6042 return true;
6043
6044 switch (getLexer().getKind()) {
6045 default:
6046 Error(Parser.getTok().getLoc(), "unexpected token in operand");
6047 return true;
6048 case AsmToken::Identifier: {
6049 // If we've seen a branch mnemonic, the next operand must be a label. This
6050 // is true even if the label is a register name. So "br r1" means branch to
6051 // label "r1".
6052 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
6053 if (!ExpectLabel) {
6054 if (!tryParseRegisterWithWriteBack(Operands))
6055 return false;
6056 int Res = tryParseShiftRegister(Operands);
6057 if (Res == 0) // success
6058 return false;
6059 else if (Res == -1) // irrecoverable error
6060 return true;
6061 // If this is VMRS, check for the apsr_nzcv operand.
6062 if (Mnemonic == "vmrs" &&
6063 Parser.getTok().getString().equals_insensitive("apsr_nzcv")) {
6064 S = Parser.getTok().getLoc();
6065 Parser.Lex();
6066 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
6067 return false;
6068 }
6069 }
6070
6071 // Fall though for the Identifier case that is not a register or a
6072 // special name.
6073 [[fallthrough]];
6074 }
6075 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
6076 case AsmToken::Integer: // things like 1f and 2b as a branch targets
6077 case AsmToken::String: // quoted label names.
6078 case AsmToken::Dot: { // . as a branch target
6079 // This was not a register so parse other operands that start with an
6080 // identifier (like labels) as expressions and create them as immediates.
6081 const MCExpr *IdVal;
6082 S = Parser.getTok().getLoc();
6083 if (getParser().parseExpression(IdVal))
6084 return true;
6085 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6086 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
6087 return false;
6088 }
6089 case AsmToken::LBrac:
6090 return parseMemory(Operands);
6091 case AsmToken::LCurly: {
6092 bool AllowOutOfBoundReg = Mnemonic == "vlldm" || Mnemonic == "vlstm";
6093 return parseRegisterList(Operands, !Mnemonic.starts_with("clr"), false,
6094 AllowOutOfBoundReg);
6095 }
6096 case AsmToken::Dollar:
6097 case AsmToken::Hash: {
6098 // #42 -> immediate
6099 // $ 42 -> immediate
6100 // $foo -> symbol name
6101 // $42 -> symbol name
6102 S = Parser.getTok().getLoc();
6103
6104 // Favor the interpretation of $-prefixed operands as symbol names.
6105 // Cases where immediates are explicitly expected are handled by their
6106 // specific ParseMethod implementations.
6107 auto AdjacentToken = getLexer().peekTok(/*ShouldSkipSpace=*/false);
6108 bool ExpectIdentifier = Parser.getTok().is(AsmToken::Dollar) &&
6109 (AdjacentToken.is(AsmToken::Identifier) ||
6110 AdjacentToken.is(AsmToken::Integer));
6111 if (!ExpectIdentifier) {
6112 // Token is not part of identifier. Drop leading $ or # before parsing
6113 // expression.
6114 Parser.Lex();
6115 }
6116
6117 if (Parser.getTok().isNot(AsmToken::Colon)) {
6118 bool IsNegative = Parser.getTok().is(AsmToken::Minus);
6119 const MCExpr *ImmVal;
6120 if (getParser().parseExpression(ImmVal))
6121 return true;
6122 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
6123 if (CE) {
6124 int32_t Val = CE->getValue();
6125 if (IsNegative && Val == 0)
6126 ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6127 getContext());
6128 }
6129 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6130 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
6131
6132 // There can be a trailing '!' on operands that we want as a separate
6133 // '!' Token operand. Handle that here. For example, the compatibility
6134 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
6135 if (Parser.getTok().is(AsmToken::Exclaim)) {
6136 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
6137 Parser.getTok().getLoc()));
6138 Parser.Lex(); // Eat exclaim token
6139 }
6140 return false;
6141 }
6142 // w/ a ':' after the '#', it's just like a plain ':'.
6143 [[fallthrough]];
6144 }
6145 case AsmToken::Colon: {
6146 S = Parser.getTok().getLoc();
6147 // ":lower16:", ":upper16:", ":lower0_7:", ":lower8_15:", ":upper0_7:" and
6148 // ":upper8_15:", expression prefixes
6149 // FIXME: Check it's an expression prefix,
6150 // e.g. (FOO - :lower16:BAR) isn't legal.
6151 ARMMCExpr::VariantKind RefKind;
6152 if (parsePrefix(RefKind))
6153 return true;
6154
6155 const MCExpr *SubExprVal;
6156 if (getParser().parseExpression(SubExprVal))
6157 return true;
6158
6159 const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
6160 getContext());
6161 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6162 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
6163 return false;
6164 }
6165 case AsmToken::Equal: {
6166 S = Parser.getTok().getLoc();
6167 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
6168 return Error(S, "unexpected token in operand");
6169 Parser.Lex(); // Eat '='
6170 const MCExpr *SubExprVal;
6171 if (getParser().parseExpression(SubExprVal))
6172 return true;
6173 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6174
6175 // execute-only: we assume that assembly programmers know what they are
6176 // doing and allow literal pool creation here
6177 Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
6178 return false;
6179 }
6180 }
6181}
6182
6183bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6184 const MCExpr *Expr = nullptr;
6185 SMLoc L = getParser().getTok().getLoc();
6186 if (check(getParser().parseExpression(Expr), L, "expected expression"))
6187 return true;
6188 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6189 if (check(!Value, L, "expected constant expression"))
6190 return true;
6191 Out = Value->getValue();
6192 return false;
6193}
6194
6195// parsePrefix - Parse ARM 16-bit relocations expression prefixes, i.e.
6196// :lower16: and :upper16: and Thumb 8-bit relocation expression prefixes, i.e.
6197// :upper8_15:, :upper0_7:, :lower8_15: and :lower0_7:
6198bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
6199 MCAsmParser &Parser = getParser();
6200 RefKind = ARMMCExpr::VK_ARM_None;
6201
6202 // consume an optional '#' (GNU compatibility)
6203 if (getLexer().is(AsmToken::Hash))
6204 Parser.Lex();
6205
6206 assert(getLexer().is(AsmToken::Colon) && "expected a :");
6207 Parser.Lex(); // Eat ':'
6208
6209 if (getLexer().isNot(AsmToken::Identifier)) {
6210 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
6211 return true;
6212 }
6213
6214 enum {
6215 COFF = (1 << MCContext::IsCOFF),
6216 ELF = (1 << MCContext::IsELF),
6217 MACHO = (1 << MCContext::IsMachO),
6218 WASM = (1 << MCContext::IsWasm),
6219 };
6220 static const struct PrefixEntry {
6221 const char *Spelling;
6222 ARMMCExpr::VariantKind VariantKind;
6223 uint8_t SupportedFormats;
6224 } PrefixEntries[] = {
6225 {"upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO},
6226 {"lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO},
6227 {"upper8_15", ARMMCExpr::VK_ARM_HI_8_15, ELF},
6228 {"upper0_7", ARMMCExpr::VK_ARM_HI_0_7, ELF},
6229 {"lower8_15", ARMMCExpr::VK_ARM_LO_8_15, ELF},
6230 {"lower0_7", ARMMCExpr::VK_ARM_LO_0_7, ELF},
6231 };
6232
6233 StringRef IDVal = Parser.getTok().getIdentifier();
6234
6235 const auto &Prefix =
6236 llvm::find_if(PrefixEntries, [&IDVal](const PrefixEntry &PE) {
6237 return PE.Spelling == IDVal;
6238 });
6239 if (Prefix == std::end(PrefixEntries)) {
6240 Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
6241 return true;
6242 }
6243
6244 uint8_t CurrentFormat;
6245 switch (getContext().getObjectFileType()) {
6246 case MCContext::IsMachO:
6247 CurrentFormat = MACHO;
6248 break;
6249 case MCContext::IsELF:
6250 CurrentFormat = ELF;
6251 break;
6252 case MCContext::IsCOFF:
6253 CurrentFormat = COFF;
6254 break;
6255 case MCContext::IsWasm:
6256 CurrentFormat = WASM;
6257 break;
6258 case MCContext::IsGOFF:
6259 case MCContext::IsSPIRV:
6260 case MCContext::IsXCOFF:
6262 llvm_unreachable("unexpected object format");
6263 break;
6264 }
6265
6266 if (~Prefix->SupportedFormats & CurrentFormat) {
6267 Error(Parser.getTok().getLoc(),
6268 "cannot represent relocation in the current file format");
6269 return true;
6270 }
6271
6272 RefKind = Prefix->VariantKind;
6273 Parser.Lex();
6274
6275 if (getLexer().isNot(AsmToken::Colon)) {
6276 Error(Parser.getTok().getLoc(), "unexpected token after prefix");
6277 return true;
6278 }
6279 Parser.Lex(); // Eat the last ':'
6280
6281 // consume an optional trailing '#' (GNU compatibility) bla
6282 parseOptionalToken(AsmToken::Hash);
6283
6284 return false;
6285}
6286
6287/// Given a mnemonic, split out possible predication code and carry
6288/// setting letters to form a canonical mnemonic and flags.
6289//
6290// FIXME: Would be nice to autogen this.
6291// FIXME: This is a bit of a maze of special cases.
6292StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
6293 ARMCC::CondCodes &PredicationCode,
6294 ARMVCC::VPTCodes &VPTPredicationCode,
6295 bool &CarrySetting,
6296 unsigned &ProcessorIMod,
6297 StringRef &ITMask) {
6298 PredicationCode = ARMCC::AL;
6299 VPTPredicationCode = ARMVCC::None;
6300 CarrySetting = false;
6301 ProcessorIMod = 0;
6302
6303 // Ignore some mnemonics we know aren't predicated forms.
6304 //
6305 // FIXME: Would be nice to autogen this.
6306 if ((Mnemonic == "movs" && isThumb()) || Mnemonic == "teq" ||
6307 Mnemonic == "vceq" || Mnemonic == "svc" || Mnemonic == "mls" ||
6308 Mnemonic == "smmls" || Mnemonic == "vcls" || Mnemonic == "vmls" ||
6309 Mnemonic == "vnmls" || Mnemonic == "vacge" || Mnemonic == "vcge" ||
6310 Mnemonic == "vclt" || Mnemonic == "vacgt" || Mnemonic == "vaclt" ||
6311 Mnemonic == "vacle" || Mnemonic == "hlt" || Mnemonic == "vcgt" ||
6312 Mnemonic == "vcle" || Mnemonic == "smlal" || Mnemonic == "umaal" ||
6313 Mnemonic == "umlal" || Mnemonic == "vabal" || Mnemonic == "vmlal" ||
6314 Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || Mnemonic == "fmuls" ||
6315 Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
6316 Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
6317 Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
6318 Mnemonic == "vrintm" || Mnemonic == "hvc" ||
6319 Mnemonic.starts_with("vsel") || Mnemonic == "vins" ||
6320 Mnemonic == "vmovx" || Mnemonic == "bxns" || Mnemonic == "blxns" ||
6321 Mnemonic == "vdot" || Mnemonic == "vmmla" || Mnemonic == "vudot" ||
6322 Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6323 Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "wls" ||
6324 Mnemonic == "le" || Mnemonic == "dls" || Mnemonic == "csel" ||
6325 Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6326 Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6327 Mnemonic == "cset" || Mnemonic == "csetm" || Mnemonic == "aut" ||
6328 Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "bti")
6329 return Mnemonic;
6330
6331 // First, split out any predication code. Ignore mnemonics we know aren't
6332 // predicated but do have a carry-set and so weren't caught above.
6333 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
6334 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
6335 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
6336 Mnemonic != "sbcs" && Mnemonic != "rscs" &&
6337 !(hasMVE() &&
6338 (Mnemonic == "vmine" || Mnemonic == "vshle" || Mnemonic == "vshlt" ||
6339 Mnemonic == "vshllt" || Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
6340 Mnemonic == "vmvne" || Mnemonic == "vorne" || Mnemonic == "vnege" ||
6341 Mnemonic == "vnegt" || Mnemonic == "vmule" || Mnemonic == "vmult" ||
6342 Mnemonic == "vrintne" || Mnemonic == "vcmult" ||
6343 Mnemonic == "vcmule" || Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
6344 Mnemonic.starts_with("vq")))) {
6345 unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
6346 if (CC != ~0U) {
6347 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
6348 PredicationCode = static_cast<ARMCC::CondCodes>(CC);
6349 }
6350 }
6351
6352 // Next, determine if we have a carry setting bit. We explicitly ignore all
6353 // the instructions we know end in 's'.
6354 if (Mnemonic.ends_with("s") &&
6355 !(Mnemonic == "cps" || Mnemonic == "mls" || Mnemonic == "mrs" ||
6356 Mnemonic == "smmls" || Mnemonic == "vabs" || Mnemonic == "vcls" ||
6357 Mnemonic == "vmls" || Mnemonic == "vmrs" || Mnemonic == "vnmls" ||
6358 Mnemonic == "vqabs" || Mnemonic == "vrecps" || Mnemonic == "vrsqrts" ||
6359 Mnemonic == "srs" || Mnemonic == "flds" || Mnemonic == "fmrs" ||
6360 Mnemonic == "fsqrts" || Mnemonic == "fsubs" || Mnemonic == "fsts" ||
6361 Mnemonic == "fcpys" || Mnemonic == "fdivs" || Mnemonic == "fmuls" ||
6362 Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || Mnemonic == "vfms" ||
6363 Mnemonic == "vfnms" || Mnemonic == "fconsts" || Mnemonic == "bxns" ||
6364 Mnemonic == "blxns" || Mnemonic == "vfmas" || Mnemonic == "vmlas" ||
6365 (Mnemonic == "movs" && isThumb()))) {
6366 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
6367 CarrySetting = true;
6368 }
6369
6370 // The "cps" instruction can have a interrupt mode operand which is glued into
6371 // the mnemonic. Check if this is the case, split it and parse the imod op
6372 if (Mnemonic.starts_with("cps")) {
6373 // Split out any imod code.
6374 unsigned IMod =
6375 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
6376 .Case("ie", ARM_PROC::IE)
6377 .Case("id", ARM_PROC::ID)
6378 .Default(~0U);
6379 if (IMod != ~0U) {
6380 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
6381 ProcessorIMod = IMod;
6382 }
6383 }
6384
6385 if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
6386 Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
6387 Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
6388 Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
6389 Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" &&
6390 Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" &&
6391 Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") {
6392 unsigned VCC =
6393 ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size() - 1));
6394 if (VCC != ~0U) {
6395 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
6396 VPTPredicationCode = static_cast<ARMVCC::VPTCodes>(VCC);
6397 }
6398 return Mnemonic;
6399 }
6400
6401 // The "it" instruction has the condition mask on the end of the mnemonic.
6402 if (Mnemonic.starts_with("it")) {
6403 ITMask = Mnemonic.slice(2, Mnemonic.size());
6404 Mnemonic = Mnemonic.slice(0, 2);
6405 }
6406
6407 if (Mnemonic.starts_with("vpst")) {
6408 ITMask = Mnemonic.slice(4, Mnemonic.size());
6409 Mnemonic = Mnemonic.slice(0, 4);
6410 } else if (Mnemonic.starts_with("vpt")) {
6411 ITMask = Mnemonic.slice(3, Mnemonic.size());
6412 Mnemonic = Mnemonic.slice(0, 3);
6413 }
6414
6415 return Mnemonic;
6416}
6417
6418/// Given a canonical mnemonic, determine if the instruction ever allows
6419/// inclusion of carry set or predication code operands.
6420//
6421// FIXME: It would be nice to autogen this.
6422void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6423 StringRef ExtraToken,
6424 StringRef FullInst,
6425 bool &CanAcceptCarrySet,
6426 bool &CanAcceptPredicationCode,
6427 bool &CanAcceptVPTPredicationCode) {
6428 CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6429
6430 CanAcceptCarrySet =
6431 Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6432 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6433 Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6434 Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6435 Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6436 Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6437 Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6438 (!isThumb() &&
6439 (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6440 Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6441
6442 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6443 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6444 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6445 Mnemonic.starts_with("crc32") || Mnemonic.starts_with("cps") ||
6446 Mnemonic.starts_with("vsel") || Mnemonic == "vmaxnm" ||
6447 Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6448 Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6449 Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6450 Mnemonic.starts_with("aes") || Mnemonic == "hvc" ||
6451 Mnemonic == "setpan" || Mnemonic.starts_with("sha1") ||
6452 Mnemonic.starts_with("sha256") ||
6453 (FullInst.starts_with("vmull") && FullInst.ends_with(".p64")) ||
6454 Mnemonic == "vmovx" || Mnemonic == "vins" || Mnemonic == "vudot" ||
6455 Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6456 Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "vfmat" ||
6457 Mnemonic == "vfmab" || Mnemonic == "vdot" || Mnemonic == "vmmla" ||
6458 Mnemonic == "sb" || Mnemonic == "ssbb" || Mnemonic == "pssbb" ||
6459 Mnemonic == "vsmmla" || Mnemonic == "vummla" || Mnemonic == "vusmmla" ||
6460 Mnemonic == "vusdot" || Mnemonic == "vsudot" || Mnemonic == "bfcsel" ||
6461 Mnemonic == "wls" || Mnemonic == "dls" || Mnemonic == "le" ||
6462 Mnemonic == "csel" || Mnemonic == "csinc" || Mnemonic == "csinv" ||
6463 Mnemonic == "csneg" || Mnemonic == "cinc" || Mnemonic == "cinv" ||
6464 Mnemonic == "cneg" || Mnemonic == "cset" || Mnemonic == "csetm" ||
6465 (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6466 !MS.isITPredicableCDEInstr(Mnemonic)) ||
6467 Mnemonic.starts_with("vpt") || Mnemonic.starts_with("vpst") ||
6468 Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "aut" ||
6469 Mnemonic == "bti" ||
6470 (hasMVE() &&
6471 (Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vld2") ||
6472 Mnemonic.starts_with("vst4") || Mnemonic.starts_with("vld4") ||
6473 Mnemonic.starts_with("wlstp") || Mnemonic.starts_with("dlstp") ||
6474 Mnemonic.starts_with("letp")))) {
6475 // These mnemonics are never predicable
6476 CanAcceptPredicationCode = false;
6477 } else if (!isThumb()) {
6478 // Some instructions are only predicable in Thumb mode
6479 CanAcceptPredicationCode =
6480 Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6481 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6482 Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6483 Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6484 Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6485 Mnemonic != "stc2" && Mnemonic != "stc2l" && Mnemonic != "tsb" &&
6486 !Mnemonic.starts_with("rfe") && !Mnemonic.starts_with("srs");
6487 } else if (isThumbOne()) {
6488 if (hasV6MOps())
6489 CanAcceptPredicationCode = Mnemonic != "movs";
6490 else
6491 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6492 } else
6493 CanAcceptPredicationCode = true;
6494}
6495
6496// Some Thumb instructions have two operand forms that are not
6497// available as three operand, convert to two operand form if possible.
6498//
6499// FIXME: We would really like to be able to tablegen'erate this.
6500void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
6501 bool CarrySetting,
6503 if (Operands.size() != 6)
6504 return;
6505
6506 const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6507 auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
6508 if (!Op3.isReg() || !Op4.isReg())
6509 return;
6510
6511 auto Op3Reg = Op3.getReg();
6512 auto Op4Reg = Op4.getReg();
6513
6514 // For most Thumb2 cases we just generate the 3 operand form and reduce
6515 // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6516 // won't accept SP or PC so we do the transformation here taking care
6517 // with immediate range in the 'add sp, sp #imm' case.
6518 auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
6519 if (isThumbTwo()) {
6520 if (Mnemonic != "add")
6521 return;
6522 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6523 (Op5.isReg() && Op5.getReg() == ARM::PC);
6524 if (!TryTransform) {
6525 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6526 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6527 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6528 Op5.isImm() && !Op5.isImm0_508s4());
6529 }
6530 if (!TryTransform)
6531 return;
6532 } else if (!isThumbOne())
6533 return;
6534
6535 if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6536 Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6537 Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6538 Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6539 return;
6540
6541 // If first 2 operands of a 3 operand instruction are the same
6542 // then transform to 2 operand version of the same instruction
6543 // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6544 bool Transform = Op3Reg == Op4Reg;
6545
6546 // For communtative operations, we might be able to transform if we swap
6547 // Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially
6548 // as tADDrsp.
6549 const ARMOperand *LastOp = &Op5;
6550 bool Swap = false;
6551 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6552 ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6553 Mnemonic == "and" || Mnemonic == "eor" ||
6554 Mnemonic == "adc" || Mnemonic == "orr")) {
6555 Swap = true;
6556 LastOp = &Op4;
6557 Transform = true;
6558 }
6559
6560 // If both registers are the same then remove one of them from
6561 // the operand list, with certain exceptions.
6562 if (Transform) {
6563 // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6564 // 2 operand forms don't exist.
6565 if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6566 LastOp->isReg())
6567 Transform = false;
6568
6569 // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6570 // 3-bits because the ARMARM says not to.
6571 if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6572 Transform = false;
6573 }
6574
6575 if (Transform) {
6576 if (Swap)
6577 std::swap(Op4, Op5);
6578 Operands.erase(Operands.begin() + 3);
6579 }
6580}
6581
6582// this function returns true if the operand is one of the following
6583// relocations: :upper8_15:, :upper0_7:, :lower8_15: or :lower0_7:
6585 ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
6586 if (!Op.isImm())
6587 return false;
6588 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6589 if (CE)
6590 return false;
6591 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6592 if (!E)
6593 return false;
6594 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6595 if (ARM16Expr && (ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_8_15 ||
6596 ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_0_7 ||
6597 ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_8_15 ||
6598 ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_0_7))
6599 return true;
6600 return false;
6601}
6602
6603bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
6605 // FIXME: This is all horribly hacky. We really need a better way to deal
6606 // with optional operands like this in the matcher table.
6607
6608 // The 'mov' mnemonic is special. One variant has a cc_out operand, while
6609 // another does not. Specifically, the MOVW instruction does not. So we
6610 // special case it here and remove the defaulted (non-setting) cc_out
6611 // operand if that's the instruction we're trying to match.
6612 //
6613 // We do this as post-processing of the explicit operands rather than just
6614 // conditionally adding the cc_out in the first place because we need
6615 // to check the type of the parsed immediate operand.
6616 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
6617 !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
6618 static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
6619 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6620 return true;
6621
6622 if (Mnemonic == "movs" && Operands.size() > 3 && isThumb() &&
6624 return true;
6625
6626 // Register-register 'add' for thumb does not have a cc_out operand
6627 // when there are only two register operands.
6628 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
6629 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6630 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6631 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6632 return true;
6633 // Register-register 'add' for thumb does not have a cc_out operand
6634 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
6635 // have to check the immediate range here since Thumb2 has a variant
6636 // that can handle a different range and has a cc_out operand.
6637 if (((isThumb() && Mnemonic == "add") ||
6638 (isThumbTwo() && Mnemonic == "sub")) &&
6639 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6640 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6641 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
6642 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6643 ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
6644 static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
6645 return true;
6646 // For Thumb2, add/sub immediate does not have a cc_out operand for the
6647 // imm0_4095 variant. That's the least-preferred variant when
6648 // selecting via the generic "add" mnemonic, so to know that we
6649 // should remove the cc_out operand, we have to explicitly check that
6650 // it's not one of the other variants. Ugh.
6651 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6652 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6653 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6654 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6655 // Nest conditions rather than one big 'if' statement for readability.
6656 //
6657 // If both registers are low, we're in an IT block, and the immediate is
6658 // in range, we should use encoding T1 instead, which has a cc_out.
6659 if (inITBlock() &&
6660 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
6661 isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
6662 static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
6663 return false;
6664 // Check against T3. If the second register is the PC, this is an
6665 // alternate form of ADR, which uses encoding T4, so check for that too.
6666 if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
6667 (static_cast<ARMOperand &>(*Operands[5]).isT2SOImm() ||
6668 static_cast<ARMOperand &>(*Operands[5]).isT2SOImmNeg()))
6669 return false;
6670
6671 // Otherwise, we use encoding T4, which does not have a cc_out
6672 // operand.
6673 return true;
6674 }
6675
6676 // The thumb2 multiply instruction doesn't have a CCOut register, so
6677 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
6678 // use the 16-bit encoding or not.
6679 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
6680 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6681 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6682 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6683 static_cast<ARMOperand &>(*Operands[5]).isReg() &&
6684 // If the registers aren't low regs, the destination reg isn't the
6685 // same as one of the source regs, or the cc_out operand is zero
6686 // outside of an IT block, we have to use the 32-bit encoding, so
6687 // remove the cc_out operand.
6688 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6689 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6690 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
6691 !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6692 static_cast<ARMOperand &>(*Operands[5]).getReg() &&
6693 static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6694 static_cast<ARMOperand &>(*Operands[4]).getReg())))
6695 return true;
6696
6697 // Also check the 'mul' syntax variant that doesn't specify an explicit
6698 // destination register.
6699 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
6700 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6701 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6702 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6703 // If the registers aren't low regs or the cc_out operand is zero
6704 // outside of an IT block, we have to use the 32-bit encoding, so
6705 // remove the cc_out operand.
6706 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6707 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6708 !inITBlock()))
6709 return true;
6710
6711 // Register-register 'add/sub' for thumb does not have a cc_out operand
6712 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
6713 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
6714 // right, this will result in better diagnostics (which operand is off)
6715 // anyway.
6716 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
6717 (Operands.size() == 5 || Operands.size() == 6) &&
6718 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6719 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
6720 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6721 (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
6722 (Operands.size() == 6 &&
6723 static_cast<ARMOperand &>(*Operands[5]).isImm()))) {
6724 // Thumb2 (add|sub){s}{p}.w GPRnopc, sp, #{T2SOImm} has cc_out
6725 return (!(isThumbTwo() &&
6726 (static_cast<ARMOperand &>(*Operands[4]).isT2SOImm() ||
6727 static_cast<ARMOperand &>(*Operands[4]).isT2SOImmNeg())));
6728 }
6729 // Fixme: Should join all the thumb+thumb2 (add|sub) in a single if case
6730 // Thumb2 ADD r0, #4095 -> ADDW r0, r0, #4095 (T4)
6731 // Thumb2 SUB r0, #4095 -> SUBW r0, r0, #4095
6732 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6733 (Operands.size() == 5) &&
6734 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6735 static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::SP &&
6736 static_cast<ARMOperand &>(*Operands[3]).getReg() != ARM::PC &&
6737 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6738 static_cast<ARMOperand &>(*Operands[4]).isImm()) {
6739 const ARMOperand &IMM = static_cast<ARMOperand &>(*Operands[4]);
6740 if (IMM.isT2SOImm() || IMM.isT2SOImmNeg())
6741 return false; // add.w / sub.w
6742 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IMM.getImm())) {
6743 const int64_t Value = CE->getValue();
6744 // Thumb1 imm8 sub / add
6745 if ((Value < ((1 << 7) - 1) << 2) && inITBlock() && (!(Value & 3)) &&
6746 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()))
6747 return false;
6748 return true; // Thumb2 T4 addw / subw
6749 }
6750 }
6751 return false;
6752}
6753
6754bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
6756 // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
6757 unsigned RegIdx = 3;
6758 if ((((Mnemonic == "vrintz" || Mnemonic == "vrintx") && !hasMVE()) ||
6759 Mnemonic == "vrintr") &&
6760 (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
6761 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
6762 if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6763 (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
6764 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
6765 RegIdx = 4;
6766
6767 if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
6768 (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6769 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
6770 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6771 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
6772 return true;
6773 }
6774 return false;
6775}
6776
6777bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
6779 if (!hasMVE() || Operands.size() < 3)
6780 return true;
6781
6782 if (Mnemonic.starts_with("vld2") || Mnemonic.starts_with("vld4") ||
6783 Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vst4"))
6784 return true;
6785
6786 if (Mnemonic.starts_with("vctp") || Mnemonic.starts_with("vpnot"))
6787 return false;
6788
6789 if (Mnemonic.starts_with("vmov") &&
6790 !(Mnemonic.starts_with("vmovl") || Mnemonic.starts_with("vmovn") ||
6791 Mnemonic.starts_with("vmovx"))) {
6792 for (auto &Operand : Operands) {
6793 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6794 ((*Operand).isReg() &&
6795 (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6796 (*Operand).getReg()) ||
6797 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6798 (*Operand).getReg())))) {
6799 return true;
6800 }
6801 }
6802 return false;
6803 } else {
6804 for (auto &Operand : Operands) {
6805 // We check the larger class QPR instead of just the legal class
6806 // MQPR, to more accurately report errors when using Q registers
6807 // outside of the allowed range.
6808 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6809 (Operand->isReg() &&
6810 (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6811 Operand->getReg()))))
6812 return false;
6813 }
6814 return true;
6815 }
6816}
6817
6818static bool isDataTypeToken(StringRef Tok) {
6819 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
6820 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
6821 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
6822 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
6823 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
6824 Tok == ".f" || Tok == ".d";
6825}
6826
6827// FIXME: This bit should probably be handled via an explicit match class
6828// in the .td files that matches the suffix instead of having it be
6829// a literal string token the way it is now.
6831 return Mnemonic.starts_with("vldm") || Mnemonic.starts_with("vstm");
6832}
6833
6834static void applyMnemonicAliases(StringRef &Mnemonic,
6835 const FeatureBitset &Features,
6836 unsigned VariantID);
6837
6838// The GNU assembler has aliases of ldrd and strd with the second register
6839// omitted. We don't have a way to do that in tablegen, so fix it up here.
6840//
6841// We have to be careful to not emit an invalid Rt2 here, because the rest of
6842// the assembly parser could then generate confusing diagnostics refering to
6843// it. If we do find anything that prevents us from doing the transformation we
6844// bail out, and let the assembly parser report an error on the instruction as
6845// it is written.
6846void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6848 if (Mnemonic != "ldrd" && Mnemonic != "strd")
6849 return;
6850 if (Operands.size() < 4)
6851 return;
6852
6853 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
6854 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6855
6856 if (!Op2.isReg())
6857 return;
6858 if (!Op3.isGPRMem())
6859 return;
6860
6861 const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6862 if (!GPR.contains(Op2.getReg()))
6863 return;
6864
6865 unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
6866 if (!isThumb() && (RtEncoding & 1)) {
6867 // In ARM mode, the registers must be from an aligned pair, this
6868 // restriction does not apply in Thumb mode.
6869 return;
6870 }
6871 if (Op2.getReg() == ARM::PC)
6872 return;
6873 unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
6874 if (!PairedReg || PairedReg == ARM::PC ||
6875 (PairedReg == ARM::SP && !hasV8Ops()))
6876 return;
6877
6878 Operands.insert(
6879 Operands.begin() + 3,
6880 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6881}
6882
6883// Dual-register instruction have the following syntax:
6884// <mnemonic> <predicate>? <coproc>, <Rdest>, <Rdest+1>, <Rsrc>, ..., #imm
6885// This function tries to remove <Rdest+1> and replace <Rdest> with a pair
6886// operand. If the conversion fails an error is diagnosed, and the function
6887// returns true.
6888bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
6890 assert(MS.isCDEDualRegInstr(Mnemonic));
6891 bool isPredicable =
6892 Mnemonic == "cx1da" || Mnemonic == "cx2da" || Mnemonic == "cx3da";
6893 size_t NumPredOps = isPredicable ? 1 : 0;
6894
6895 if (Operands.size() <= 3 + NumPredOps)
6896 return false;
6897
6898 StringRef Op2Diag(
6899 "operand must be an even-numbered register in the range [r0, r10]");
6900
6901 const MCParsedAsmOperand &Op2 = *Operands[2 + NumPredOps];
6902 if (!Op2.isReg())
6903 return Error(Op2.getStartLoc(), Op2Diag);
6904
6905 unsigned RNext;
6906 unsigned RPair;
6907 switch (Op2.getReg()) {
6908 default:
6909 return Error(Op2.getStartLoc(), Op2Diag);
6910 case ARM::R0:
6911 RNext = ARM::R1;
6912 RPair = ARM::R0_R1;
6913 break;
6914 case ARM::R2:
6915 RNext = ARM::R3;
6916 RPair = ARM::R2_R3;
6917 break;
6918 case ARM::R4:
6919 RNext = ARM::R5;
6920 RPair = ARM::R4_R5;
6921 break;
6922 case ARM::R6:
6923 RNext = ARM::R7;
6924 RPair = ARM::R6_R7;
6925 break;
6926 case ARM::R8:
6927 RNext = ARM::R9;
6928 RPair = ARM::R8_R9;
6929 break;
6930 case ARM::R10:
6931 RNext = ARM::R11;
6932 RPair = ARM::R10_R11;
6933 break;
6934 }
6935
6936 const MCParsedAsmOperand &Op3 = *Operands[3 + NumPredOps];
6937 if (!Op3.isReg() || Op3.getReg() != RNext)
6938 return Error(Op3.getStartLoc(), "operand must be a consecutive register");
6939
6940 Operands.erase(Operands.begin() + 3 + NumPredOps);
6941 Operands[2 + NumPredOps] =
6942 ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc());
6943 return false;
6944}
6945
6946/// Parse an arm instruction mnemonic followed by its operands.
6947bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
6948 SMLoc NameLoc, OperandVector &Operands) {
6949 MCAsmParser &Parser = getParser();
6950
6951 // Apply mnemonic aliases before doing anything else, as the destination
6952 // mnemonic may include suffices and we want to handle them normally.
6953 // The generic tblgen'erated code does this later, at the start of
6954 // MatchInstructionImpl(), but that's too late for aliases that include
6955 // any sort of suffix.
6956 const FeatureBitset &AvailableFeatures = getAvailableFeatures();
6957 unsigned AssemblerDialect = getParser().getAssemblerDialect();
6958 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
6959
6960 // First check for the ARM-specific .req directive.
6961 if (Parser.getTok().is(AsmToken::Identifier) &&
6962 Parser.getTok().getIdentifier().lower() == ".req") {
6963 parseDirectiveReq(Name, NameLoc);
6964 // We always return 'error' for this, as we're done with this
6965 // statement and don't need to match the 'instruction."
6966 return true;
6967 }
6968
6969 // Create the leading tokens for the mnemonic, split by '.' characters.
6970 size_t Start = 0, Next = Name.find('.');
6971 StringRef Mnemonic = Name.slice(Start, Next);
6972 StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
6973
6974 // Split out the predication code and carry setting flag from the mnemonic.
6975 ARMCC::CondCodes PredicationCode;
6976 ARMVCC::VPTCodes VPTPredicationCode;
6977 unsigned ProcessorIMod;
6978 bool CarrySetting;
6979 StringRef ITMask;
6980 Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
6981 CarrySetting, ProcessorIMod, ITMask);
6982
6983 // In Thumb1, only the branch (B) instruction can be predicated.
6984 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
6985 return Error(NameLoc, "conditional execution not supported in Thumb1");
6986 }
6987
6988 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
6989
6990 // Handle the mask for IT and VPT instructions. In ARMOperand and
6991 // MCOperand, this is stored in a format independent of the
6992 // condition code: the lowest set bit indicates the end of the
6993 // encoding, and above that, a 1 bit indicates 'else', and an 0
6994 // indicates 'then'. E.g.
6995 // IT -> 1000
6996 // ITx -> x100 (ITT -> 0100, ITE -> 1100)
6997 // ITxy -> xy10 (e.g. ITET -> 1010)
6998 // ITxyz -> xyz1 (e.g. ITEET -> 1101)
6999 // Note: See the ARM::PredBlockMask enum in
7000 // /lib/Target/ARM/Utils/ARMBaseInfo.h
7001 if (Mnemonic == "it" || Mnemonic.starts_with("vpt") ||
7002 Mnemonic.starts_with("vpst")) {
7003 SMLoc Loc = Mnemonic == "it" ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
7004 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
7005 SMLoc::getFromPointer(NameLoc.getPointer() + 4);
7006 if (ITMask.size() > 3) {
7007 if (Mnemonic == "it")
7008 return Error(Loc, "too many conditions on IT instruction");
7009 return Error(Loc, "too many conditions on VPT instruction");
7010 }
7011 unsigned Mask = 8;
7012 for (char Pos : llvm::reverse(ITMask)) {
7013 if (Pos != 't' && Pos != 'e') {
7014 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
7015 }
7016 Mask >>= 1;
7017 if (Pos == 'e')
7018 Mask |= 8;
7019 }
7020 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
7021 }
7022
7023 // FIXME: This is all a pretty gross hack. We should automatically handle
7024 // optional operands like this via tblgen.
7025
7026 // Next, add the CCOut and ConditionCode operands, if needed.
7027 //
7028 // For mnemonics which can ever incorporate a carry setting bit or predication
7029 // code, our matching model involves us always generating CCOut and
7030 // ConditionCode operands to match the mnemonic "as written" and then we let
7031 // the matcher deal with finding the right instruction or generating an
7032 // appropriate error.
7033 bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7034 getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
7035 CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7036
7037 // If we had a carry-set on an instruction that can't do that, issue an
7038 // error.
7039 if (!CanAcceptCarrySet && CarrySetting) {
7040 return Error(NameLoc, "instruction '" + Mnemonic +
7041 "' can not set flags, but 's' suffix specified");
7042 }
7043 // If we had a predication code on an instruction that can't do that, issue an
7044 // error.
7045 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
7046 return Error(NameLoc, "instruction '" + Mnemonic +
7047 "' is not predicable, but condition code specified");
7048 }
7049
7050 // If we had a VPT predication code on an instruction that can't do that, issue an
7051 // error.
7052 if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
7053 return Error(NameLoc, "instruction '" + Mnemonic +
7054 "' is not VPT predicable, but VPT code T/E is specified");
7055 }
7056
7057 // Add the carry setting operand, if necessary.
7058 if (CanAcceptCarrySet) {
7059 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
7060 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
7061 Loc));
7062 }
7063
7064 // Add the predication code operand, if necessary.
7065 if (CanAcceptPredicationCode) {
7066 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7067 CarrySetting);
7068 Operands.push_back(ARMOperand::CreateCondCode(
7069 ARMCC::CondCodes(PredicationCode), Loc));
7070 }
7071
7072 // Add the VPT predication code operand, if necessary.
7073 // FIXME: We don't add them for the instructions filtered below as these can
7074 // have custom operands which need special parsing. This parsing requires
7075 // the operand to be in the same place in the OperandVector as their
7076 // definition in tblgen. Since these instructions may also have the
7077 // scalar predication operand we do not add the vector one and leave until
7078 // now to fix it up.
7079 if (CanAcceptVPTPredicationCode && Mnemonic != "vmov" &&
7080 !Mnemonic.starts_with("vcmp") &&
7081 !(Mnemonic.starts_with("vcvt") && Mnemonic != "vcvta" &&
7082 Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
7083 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7084 CarrySetting);
7085 Operands.push_back(ARMOperand::CreateVPTPred(
7086 ARMVCC::VPTCodes(VPTPredicationCode), Loc));
7087 }
7088
7089 // Add the processor imod operand, if necessary.
7090 if (ProcessorIMod) {
7091 Operands.push_back(ARMOperand::CreateImm(
7092 MCConstantExpr::create(ProcessorIMod, getContext()),
7093 NameLoc, NameLoc));
7094 } else if (Mnemonic == "cps" && isMClass()) {
7095 return Error(NameLoc, "instruction 'cps' requires effect for M-class");
7096 }
7097
7098 // Add the remaining tokens in the mnemonic.
7099 while (Next != StringRef::npos) {
7100 Start = Next;
7101 Next = Name.find('.', Start + 1);
7102 ExtraToken = Name.slice(Start, Next);
7103
7104 // Some NEON instructions have an optional datatype suffix that is
7105 // completely ignored. Check for that.
7106 if (isDataTypeToken(ExtraToken) &&
7107 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
7108 continue;
7109
7110 // For for ARM mode generate an error if the .n qualifier is used.
7111 if (ExtraToken == ".n" && !isThumb()) {
7112 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7113 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
7114 "arm mode");
7115 }
7116
7117 // The .n qualifier is always discarded as that is what the tables
7118 // and matcher expect. In ARM mode the .w qualifier has no effect,
7119 // so discard it to avoid errors that can be caused by the matcher.
7120 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
7121 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7122 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
7123 }
7124 }
7125
7126 // Read the remaining operands.
7127 if (getLexer().isNot(AsmToken::EndOfStatement)) {
7128 // Read the first operand.
7129 if (parseOperand(Operands, Mnemonic)) {
7130 return true;
7131 }
7132
7133 while (parseOptionalToken(AsmToken::Comma)) {
7134 // Parse and remember the operand.
7135 if (parseOperand(Operands, Mnemonic)) {
7136 return true;
7137 }
7138 }
7139 }
7140
7141 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
7142 return true;
7143
7144 tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
7145
7146 if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7147 // Dual-register instructions use even-odd register pairs as their
7148 // destination operand, in assembly such pair is spelled as two
7149 // consecutive registers, without any special syntax. ConvertDualRegOperand
7150 // tries to convert such operand into register pair, e.g. r2, r3 -> r2_r3.
7151 // It returns true, if an error message has been emitted. If the function
7152 // returns false, the function either succeeded or an error (e.g. missing
7153 // operand) will be diagnosed elsewhere.
7154 if (MS.isCDEDualRegInstr(Mnemonic)) {
7155 bool GotError = CDEConvertDualRegOperand(Mnemonic, Operands);
7156 if (GotError)
7157 return GotError;
7158 }
7159 }
7160
7161 // Some instructions, mostly Thumb, have forms for the same mnemonic that
7162 // do and don't have a cc_out optional-def operand. With some spot-checks
7163 // of the operand list, we can figure out which variant we're trying to
7164 // parse and adjust accordingly before actually matching. We shouldn't ever
7165 // try to remove a cc_out operand that was explicitly set on the
7166 // mnemonic, of course (CarrySetting == true). Reason number #317 the
7167 // table driven matcher doesn't fit well with the ARM instruction set.
7168 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
7169 Operands.erase(Operands.begin() + 1);
7170
7171 // Some instructions have the same mnemonic, but don't always
7172 // have a predicate. Distinguish them here and delete the
7173 // appropriate predicate if needed. This could be either the scalar
7174 // predication code or the vector predication code.
7175 if (PredicationCode == ARMCC::AL &&
7176 shouldOmitPredicateOperand(Mnemonic, Operands))
7177 Operands.erase(Operands.begin() + 1);
7178
7179
7180 if (hasMVE()) {
7181 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) &&
7182 Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
7183 // Very nasty hack to deal with the vector predicated variant of vmovlt
7184 // the scalar predicated vmov with condition 'lt'. We can not tell them
7185 // apart until we have parsed their operands.
7186 Operands.erase(Operands.begin() + 1);
7187 Operands.erase(Operands.begin());
7188 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7189 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7190 Mnemonic.size() - 1 + CarrySetting);
7191 Operands.insert(Operands.begin(),
7192 ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
7193 Operands.insert(Operands.begin(),
7194 ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
7195 } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
7196 !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7197 // Another nasty hack to deal with the ambiguity between vcvt with scalar
7198 // predication 'ne' and vcvtn with vector predication 'e'. As above we
7199 // can only distinguish between the two after we have parsed their
7200 // operands.
7201 Operands.erase(Operands.begin() + 1);
7202 Operands.erase(Operands.begin());
7203 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7204 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7205 Mnemonic.size() - 1 + CarrySetting);
7206 Operands.insert(Operands.begin(),
7207 ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc));
7208 Operands.insert(Operands.begin(),
7209 ARMOperand::CreateToken(StringRef("vcvtn"), MLoc));
7210 } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
7211 !shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7212 // Another hack, this time to distinguish between scalar predicated vmul
7213 // with 'lt' predication code and the vector instruction vmullt with
7214 // vector predication code "none"
7215 Operands.erase(Operands.begin() + 1);
7216 Operands.erase(Operands.begin());
7217 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7218 Operands.insert(Operands.begin(),
7219 ARMOperand::CreateToken(StringRef("vmullt"), MLoc));
7220 }
7221 // For vmov and vcmp, as mentioned earlier, we did not add the vector
7222 // predication code, since these may contain operands that require
7223 // special parsing. So now we have to see if they require vector
7224 // predication and replace the scalar one with the vector predication
7225 // operand if that is the case.
7226 else if (Mnemonic == "vmov" || Mnemonic.starts_with("vcmp") ||
7227 (Mnemonic.starts_with("vcvt") && !Mnemonic.starts_with("vcvta") &&
7228 !Mnemonic.starts_with("vcvtn") &&
7229 !Mnemonic.starts_with("vcvtp") &&
7230 !Mnemonic.starts_with("vcvtm"))) {
7231 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7232 // We could not split the vector predicate off vcvt because it might
7233 // have been the scalar vcvtt instruction. Now we know its a vector
7234 // instruction, we still need to check whether its the vector
7235 // predicated vcvt with 'Then' predication or the vector vcvtt. We can
7236 // distinguish the two based on the suffixes, if it is any of
7237 // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
7238 if (Mnemonic.starts_with("vcvtt") && Operands.size() >= 4) {
7239 auto Sz1 = static_cast<ARMOperand &>(*Operands[2]);
7240 auto Sz2 = static_cast<ARMOperand &>(*Operands[3]);
7241 if (!(Sz1.isToken() && Sz1.getToken().starts_with(".f") &&
7242 Sz2.isToken() && Sz2.getToken().starts_with(".f"))) {
7243 Operands.erase(Operands.begin());
7244 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7245 VPTPredicationCode = ARMVCC::Then;
7246
7247 Mnemonic = Mnemonic.substr(0, 4);
7248 Operands.insert(Operands.begin(),
7249 ARMOperand::CreateToken(Mnemonic, MLoc));
7250 }
7251 }
7252 Operands.erase(Operands.begin() + 1);
7253 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7254 Mnemonic.size() + CarrySetting);
7255 Operands.insert(Operands.begin() + 1,
7256 ARMOperand::CreateVPTPred(
7257 ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
7258 }
7259 } else if (CanAcceptVPTPredicationCode) {
7260 // For all other instructions, make sure only one of the two
7261 // predication operands is left behind, depending on whether we should
7262 // use the vector predication.
7263 if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
7264 if (CanAcceptPredicationCode)
7265 Operands.erase(Operands.begin() + 2);
7266 else
7267 Operands.erase(Operands.begin() + 1);
7268 } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) {
7269 Operands.erase(Operands.begin() + 1);
7270 }
7271 }
7272 }
7273
7274 if (VPTPredicationCode != ARMVCC::None) {
7275 bool usedVPTPredicationCode = false;
7276 for (unsigned I = 1; I < Operands.size(); ++I)
7277 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7278 usedVPTPredicationCode = true;
7279 if (!usedVPTPredicationCode) {
7280 // If we have a VPT predication code and we haven't just turned it
7281 // into an operand, then it was a mistake for splitMnemonic to
7282 // separate it from the rest of the mnemonic in the first place,
7283 // and this may lead to wrong disassembly (e.g. scalar floating
7284 // point VCMPE is actually a different instruction from VCMP, so
7285 // we mustn't treat them the same). In that situation, glue it
7286 // back on.
7287 Mnemonic = Name.slice(0, Mnemonic.size() + 1);
7288 Operands.erase(Operands.begin());
7289 Operands.insert(Operands.begin(),
7290 ARMOperand::CreateToken(Mnemonic, NameLoc));
7291 }
7292 }
7293
7294 // ARM mode 'blx' need special handling, as the register operand version
7295 // is predicable, but the label operand version is not. So, we can't rely
7296 // on the Mnemonic based checking to correctly figure out when to put
7297 // a k_CondCode operand in the list. If we're trying to match the label
7298 // version, remove the k_CondCode operand here.
7299 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
7300 static_cast<ARMOperand &>(*Operands[2]).isImm())
7301 Operands.erase(Operands.begin() + 1);
7302
7303 // Adjust operands of ldrexd/strexd to MCK_GPRPair.
7304 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
7305 // a single GPRPair reg operand is used in the .td file to replace the two
7306 // GPRs. However, when parsing from asm, the two GRPs cannot be
7307 // automatically
7308 // expressed as a GPRPair, so we have to manually merge them.
7309 // FIXME: We would really like to be able to tablegen'erate this.
7310 if (!isThumb() && Operands.size() > 4 &&
7311 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
7312 Mnemonic == "stlexd")) {
7313 bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
7314 unsigned Idx = isLoad ? 2 : 3;
7315 ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
7316 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
7317
7318 const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
7319 // Adjust only if Op1 and Op2 are GPRs.
7320 if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
7321 MRC.contains(Op2.getReg())) {
7322 unsigned Reg1 = Op1.getReg();
7323 unsigned Reg2 = Op2.getReg();
7324 unsigned Rt = MRI->getEncodingValue(Reg1);
7325 unsigned Rt2 = MRI->getEncodingValue(Reg2);
7326
7327 // Rt2 must be Rt + 1 and Rt must be even.
7328 if (Rt + 1 != Rt2 || (Rt & 1)) {
7329 return Error(Op2.getStartLoc(),
7330 isLoad ? "destination operands must be sequential"
7331 : "source operands must be sequential");
7332 }
7333 unsigned NewReg = MRI->getMatchingSuperReg(
7334 Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
7335 Operands[Idx] =
7336 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
7337 Operands.erase(Operands.begin() + Idx + 1);
7338 }
7339 }
7340
7341 // GNU Assembler extension (compatibility).
7342 fixupGNULDRDAlias(Mnemonic, Operands);
7343
7344 // FIXME: As said above, this is all a pretty gross hack. This instruction
7345 // does not fit with other "subs" and tblgen.
7346 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
7347 // so the Mnemonic is the original name "subs" and delete the predicate
7348 // operand so it will match the table entry.
7349 if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
7350 static_cast<ARMOperand &>(*Operands[3]).isReg() &&
7351 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
7352 static_cast<ARMOperand &>(*Operands[4]).isReg() &&
7353 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
7354 static_cast<ARMOperand &>(*Operands[5]).isImm()) {
7355 Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
7356 Operands.erase(Operands.begin() + 1);
7357 }
7358 return false;
7359}
7360
7361// Validate context-sensitive operand constraints.
7362
7363// return 'true' if register list contains non-low GPR registers,
7364// 'false' otherwise. If Reg is in the register list or is HiReg, set
7365// 'containsReg' to true.
7366static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
7367 unsigned Reg, unsigned HiReg,
7368 bool &containsReg) {
7369 containsReg = false;
7370 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
7371 unsigned OpReg = Inst.getOperand(i).getReg();
7372 if (OpReg == Reg)
7373 containsReg = true;
7374 // Anything other than a low register isn't legal here.
7375 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
7376 return true;
7377 }
7378 return false;
7379}
7380
7381// Check if the specified regisgter is in the register list of the inst,
7382// starting at the indicated operand number.
7383static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
7384 for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
7385 unsigned OpReg = Inst.getOperand(i).getReg();
7386 if (OpReg == Reg)
7387 return true;
7388 }
7389 return false;
7390}
7391
7392// Return true if instruction has the interesting property of being
7393// allowed in IT blocks, but not being predicable.
7394static bool instIsBreakpoint(const MCInst &Inst) {
7395 return Inst.getOpcode() == ARM::tBKPT ||
7396 Inst.getOpcode() == ARM::BKPT ||
7397 Inst.getOpcode() == ARM::tHLT ||
7398 Inst.getOpcode() == ARM::HLT;
7399}
7400
7401bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
7402 const OperandVector &Operands,
7403 unsigned ListNo, bool IsARPop) {
7404 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7405 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7406
7407 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7408 bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
7409 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7410
7411 if (!IsARPop && ListContainsSP)
7412 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7413 "SP may not be in the register list");
7414 else if (ListContainsPC && ListContainsLR)
7415 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7416 "PC and LR may not be in the register list simultaneously");
7417 return false;
7418}
7419
7420bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
7421 const OperandVector &Operands,
7422 unsigned ListNo) {
7423 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
7424 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
7425
7426 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
7427 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
7428
7429 if (ListContainsSP && ListContainsPC)
7430 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7431 "SP and PC may not be in the register list");
7432 else if (ListContainsSP)
7433 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7434 "SP may not be in the register list");
7435 else if (ListContainsPC)
7436 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
7437 "PC may not be in the register list");
7438 return false;
7439}
7440
7441bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
7442 const OperandVector &Operands,
7443 bool Load, bool ARMMode, bool Writeback) {
7444 unsigned RtIndex = Load || !Writeback ? 0 : 1;
7445 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
7446 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
7447
7448 if (ARMMode) {
7449 // Rt can't be R14.
7450 if (Rt == 14)
7451 return Error(Operands[3]->getStartLoc(),
7452 "Rt can't be R14");
7453
7454 // Rt must be even-numbered.
7455 if ((Rt & 1) == 1)
7456 return Error(Operands[3]->getStartLoc(),
7457 "Rt must be even-numbered");
7458
7459 // Rt2 must be Rt + 1.
7460 if (Rt2 != Rt + 1) {
7461 if (Load)
7462 return Error(Operands[3]->getStartLoc(),
7463 "destination operands must be sequential");
7464 else
7465 return Error(Operands[3]->getStartLoc(),
7466 "source operands must be sequential");
7467 }
7468
7469 // FIXME: Diagnose m == 15
7470 // FIXME: Diagnose ldrd with m == t || m == t2.
7471 }
7472
7473 if (!ARMMode && Load) {
7474 if (Rt2 == Rt)
7475 return Error(Operands[3]->getStartLoc(),
7476 "destination operands can't be identical");
7477 }
7478
7479 if (Writeback) {
7480 unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
7481
7482 if (Rn == Rt || Rn == Rt2) {
7483 if (Load)
7484 return Error(Operands[3]->getStartLoc(),
7485 "base register needs to be different from destination "
7486 "registers");
7487 else
7488 return Error(Operands[3]->getStartLoc(),
7489 "source register and base register can't be identical");
7490 }
7491
7492 // FIXME: Diagnose ldrd/strd with writeback and n == 15.
7493 // (Except the immediate form of ldrd?)
7494 }
7495
7496 return false;
7497}
7498
7500 for (unsigned i = 0; i < MCID.NumOperands; ++i) {
7501 if (ARM::isVpred(MCID.operands()[i].OperandType))
7502 return i;
7503 }
7504 return -1;
7505}
7506
7507static bool isVectorPredicable(const MCInstrDesc &MCID) {
7508 return findFirstVectorPredOperandIdx(MCID) != -1;
7509}
7510
7512 ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
7513 if (!Op.isImm())
7514 return false;
7515 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7516 if (CE)
7517 return false;
7518 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
7519 if (!E)
7520 return false;
7521 return true;
7522}
7523
7524// FIXME: We would really like to be able to tablegen'erate this.
7525bool ARMAsmParser::validateInstruction(MCInst &Inst,
7526 const OperandVector &Operands) {
7527 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
7528 SMLoc Loc = Operands[0]->getStartLoc();
7529
7530 // Check the IT block state first.
7531 // NOTE: BKPT and HLT instructions have the interesting property of being
7532 // allowed in IT blocks, but not being predicable. They just always execute.
7533 if (inITBlock() && !instIsBreakpoint(Inst)) {
7534 // The instruction must be predicable.
7535 if (!MCID.isPredicable())
7536 return Error(Loc, "instructions in IT block must be predicable");
7539 if (Cond != currentITCond()) {
7540 // Find the condition code Operand to get its SMLoc information.
7541 SMLoc CondLoc;
7542 for (unsigned I = 1; I < Operands.size(); ++I)
7543 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
7544 CondLoc = Operands[I]->getStartLoc();
7545 return Error(CondLoc, "incorrect condition in IT block; got '" +
7547 "', but expected '" +
7548 ARMCondCodeToString(currentITCond()) + "'");
7549 }
7550 // Check for non-'al' condition codes outside of the IT block.
7551 } else if (isThumbTwo() && MCID.isPredicable() &&
7552 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7553 ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
7554 Inst.getOpcode() != ARM::t2Bcc &&
7555 Inst.getOpcode() != ARM::t2BFic) {
7556 return Error(Loc, "predicated instructions must be in IT block");
7557 } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
7558 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7559 ARMCC::AL) {
7560 return Warning(Loc, "predicated instructions should be in IT block");
7561 } else if (!MCID.isPredicable()) {
7562 // Check the instruction doesn't have a predicate operand anyway
7563 // that it's not allowed to use. Sometimes this happens in order
7564 // to keep instructions the same shape even though one cannot
7565 // legally be predicated, e.g. vmul.f16 vs vmul.f32.
7566 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
7567 if (MCID.operands()[i].isPredicate()) {
7568 if (Inst.getOperand(i).getImm() != ARMCC::AL)
7569 return Error(Loc, "instruction is not predicable");
7570 break;
7571 }
7572 }
7573 }
7574
7575 // PC-setting instructions in an IT block, but not the last instruction of
7576 // the block, are UNPREDICTABLE.
7577 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7578 return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
7579 }
7580
7581 if (inVPTBlock() && !instIsBreakpoint(Inst)) {
7582 unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7583 if (!isVectorPredicable(MCID))
7584 return Error(Loc, "instruction in VPT block must be predicable");
7585 unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
7586 unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
7587 if (Pred != VPTPred) {
7588 SMLoc PredLoc;
7589 for (unsigned I = 1; I < Operands.size(); ++I)
7590 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7591 PredLoc = Operands[I]->getStartLoc();
7592 return Error(PredLoc, "incorrect predication in VPT block; got '" +
7594 "', but expected '" +
7595 ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
7596 }
7597 }
7598 else if (isVectorPredicable(MCID) &&
7599 Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
7601 return Error(Loc, "VPT predicated instructions must be in VPT block");
7602
7603 const unsigned Opcode = Inst.getOpcode();
7604 switch (Opcode) {
7605 case ARM::VLLDM:
7606 case ARM::VLLDM_T2:
7607 case ARM::VLSTM:
7608 case ARM::VLSTM_T2: {
7609 // Since in some cases both T1 and T2 are valid, tablegen can not always
7610 // pick the correct instruction.
7611 if (Operands.size() == 4) { // a register list has been provided
7612 ARMOperand &Op = static_cast<ARMOperand &>(
7613 *Operands[3]); // the register list, a dpr_reglist
7614 assert(Op.isDPRRegList());
7615 auto &RegList = Op.getRegList();
7616 // T2 requires v8.1-M.Main (cannot be handled by tablegen)
7617 if (RegList.size() == 32 && !hasV8_1MMainline()) {
7618 return Error(Op.getEndLoc(), "T2 version requires v8.1-M.Main");
7619 }
7620 // When target has 32 D registers, T1 is undefined.
7621 if (hasD32() && RegList.size() != 32) {
7622 return Error(Op.getEndLoc(), "operand must be exactly {d0-d31}");
7623 }
7624 // When target has 16 D registers, both T1 and T2 are valid.
7625 if (!hasD32() && (RegList.size() != 16 && RegList.size() != 32)) {
7626 return Error(Op.getEndLoc(),
7627 "operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)");
7628 }
7629 }
7630 return false;
7631 }
7632 case ARM::t2IT: {
7633 // Encoding is unpredictable if it ever results in a notional 'NV'
7634 // predicate. Since we don't parse 'NV' directly this means an 'AL'
7635 // predicate with an "else" mask bit.
7636 unsigned Cond = Inst.getOperand(0).getImm();
7637 unsigned Mask = Inst.getOperand(1).getImm();
7638
7639 // Conditions only allowing a 't' are those with no set bit except
7640 // the lowest-order one that indicates the end of the sequence. In
7641 // other words, powers of 2.
7642 if (Cond == ARMCC::AL && llvm::popcount(Mask) != 1)
7643 return Error(Loc, "unpredictable IT predicate sequence");
7644 break;
7645 }
7646 case ARM::LDRD:
7647 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7648 /*Writeback*/false))
7649 return true;
7650 break;
7651 case ARM::LDRD_PRE:
7652 case ARM::LDRD_POST:
7653 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
7654 /*Writeback*/true))
7655 return true;
7656 break;
7657 case ARM::t2LDRDi8:
7658 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7659 /*Writeback*/false))
7660 return true;
7661 break;
7662 case ARM::t2LDRD_PRE:
7663 case ARM::t2LDRD_POST:
7664 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
7665 /*Writeback*/true))
7666 return true;
7667 break;
7668 case ARM::t2BXJ: {
7669 const unsigned RmReg = Inst.getOperand(0).getReg();
7670 // Rm = SP is no longer unpredictable in v8-A
7671 if (RmReg == ARM::SP && !hasV8Ops())
7672 return Error(Operands[2]->getStartLoc(),
7673 "r13 (SP) is an unpredictable operand to BXJ");
7674 return false;
7675 }
7676 case ARM::STRD:
7677 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7678 /*Writeback*/false))
7679 return true;
7680 break;
7681 case ARM::STRD_PRE:
7682 case ARM::STRD_POST:
7683 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7684 /*Writeback*/true))
7685 return true;
7686 break;
7687 case ARM::t2STRD_PRE:
7688 case ARM::t2STRD_POST:
7689 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
7690 /*Writeback*/true))
7691 return true;
7692 break;
7693 case ARM::STR_PRE_IMM:
7694 case ARM::STR_PRE_REG:
7695 case ARM::t2STR_PRE:
7696 case ARM::STR_POST_IMM:
7697 case ARM::STR_POST_REG:
7698 case ARM::t2STR_POST:
7699 case ARM::STRH_PRE:
7700 case ARM::t2STRH_PRE:
7701 case ARM::STRH_POST:
7702 case ARM::t2STRH_POST:
7703 case ARM::STRB_PRE_IMM:
7704 case ARM::STRB_PRE_REG:
7705 case ARM::t2STRB_PRE:
7706 case ARM::STRB_POST_IMM:
7707 case ARM::STRB_POST_REG:
7708 case ARM::t2STRB_POST: {
7709 // Rt must be different from Rn.
7710 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7711 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7712
7713 if (Rt == Rn)
7714 return Error(Operands[3]->getStartLoc(),
7715 "source register and base register can't be identical");
7716 return false;
7717 }
7718 case ARM::t2LDR_PRE_imm:
7719 case ARM::t2LDR_POST_imm:
7720 case ARM::t2STR_PRE_imm:
7721 case ARM::t2STR_POST_imm: {
7722 // Rt must be different from Rn.
7723 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7724 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7725
7726 if (Rt == Rn)
7727 return Error(Operands[3]->getStartLoc(),
7728 "destination register and base register can't be identical");
7729 if (Inst.getOpcode() == ARM::t2LDR_POST_imm ||
7730 Inst.getOpcode() == ARM::t2STR_POST_imm) {
7731 int Imm = Inst.getOperand(2).getImm();
7732 if (Imm > 255 || Imm < -255)
7733 return Error(Operands[5]->getStartLoc(),
7734 "operand must be in range [-255, 255]");
7735 }
7736 if (Inst.getOpcode() == ARM::t2STR_PRE_imm ||
7737 Inst.getOpcode() == ARM::t2STR_POST_imm) {
7738 if (Inst.getOperand(0).getReg() == ARM::PC) {
7739 return Error(Operands[3]->getStartLoc(),
7740 "operand must be a register in range [r0, r14]");
7741 }
7742 }
7743 return false;
7744 }
7745
7746 case ARM::t2LDRB_OFFSET_imm:
7747 case ARM::t2LDRB_PRE_imm:
7748 case ARM::t2LDRB_POST_imm:
7749 case ARM::t2STRB_OFFSET_imm:
7750 case ARM::t2STRB_PRE_imm:
7751 case ARM::t2STRB_POST_imm: {
7752 if (Inst.getOpcode() == ARM::t2LDRB_POST_imm ||
7753 Inst.getOpcode() == ARM::t2STRB_POST_imm ||
7754 Inst.getOpcode() == ARM::t2LDRB_PRE_imm ||
7755 Inst.getOpcode() == ARM::t2STRB_PRE_imm) {
7756 int Imm = Inst.getOperand(2).getImm();
7757 if (Imm > 255 || Imm < -255)
7758 return Error(Operands[5]->getStartLoc(),
7759 "operand must be in range [-255, 255]");
7760 } else if (Inst.getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7761 Inst.getOpcode() == ARM::t2STRB_OFFSET_imm) {
7762 int Imm = Inst.getOperand(2).getImm();
7763 if (Imm > 0 || Imm < -255)
7764 return Error(Operands[5]->getStartLoc(),
7765 "operand must be in range [0, 255] with a negative sign");
7766 }
7767 if (Inst.getOperand(0).getReg() == ARM::PC) {
7768 return Error(Operands[3]->getStartLoc(),
7769 "if operand is PC, should call the LDRB (literal)");
7770 }
7771 return false;
7772 }
7773
7774 case ARM::t2LDRH_OFFSET_imm:
7775 case ARM::t2LDRH_PRE_imm:
7776 case ARM::t2LDRH_POST_imm:
7777 case ARM::t2STRH_OFFSET_imm:
7778 case ARM::t2STRH_PRE_imm:
7779 case ARM::t2STRH_POST_imm: {
7780 if (Inst.getOpcode() == ARM::t2LDRH_POST_imm ||
7781 Inst.getOpcode() == ARM::t2STRH_POST_imm ||
7782 Inst.getOpcode() == ARM::t2LDRH_PRE_imm ||
7783 Inst.getOpcode() == ARM::t2STRH_PRE_imm) {
7784 int Imm = Inst.getOperand(2).getImm();
7785 if (Imm > 255 || Imm < -255)
7786 return Error(Operands[5]->getStartLoc(),
7787 "operand must be in range [-255, 255]");
7788 } else if (Inst.getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7789 Inst.getOpcode() == ARM::t2STRH_OFFSET_imm) {
7790 int Imm = Inst.getOperand(2).getImm();
7791 if (Imm > 0 || Imm < -255)
7792 return Error(Operands[5]->getStartLoc(),
7793 "operand must be in range [0, 255] with a negative sign");
7794 }
7795 if (Inst.getOperand(0).getReg() == ARM::PC) {
7796 return Error(Operands[3]->getStartLoc(),
7797 "if operand is PC, should call the LDRH (literal)");
7798 }
7799 return false;
7800 }
7801
7802 case ARM::t2LDRSB_OFFSET_imm:
7803 case ARM::t2LDRSB_PRE_imm:
7804 case ARM::t2LDRSB_POST_imm: {
7805 if (Inst.getOpcode() == ARM::t2LDRSB_POST_imm ||
7806 Inst.getOpcode() == ARM::t2LDRSB_PRE_imm) {
7807 int Imm = Inst.getOperand(2).getImm();
7808 if (Imm > 255 || Imm < -255)
7809 return Error(Operands[5]->getStartLoc(),
7810 "operand must be in range [-255, 255]");
7811 } else if (Inst.getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7812 int Imm = Inst.getOperand(2).getImm();
7813 if (Imm > 0 || Imm < -255)
7814 return Error(Operands[5]->getStartLoc(),
7815 "operand must be in range [0, 255] with a negative sign");
7816 }
7817 if (Inst.getOperand(0).getReg() == ARM::PC) {
7818 return Error(Operands[3]->getStartLoc(),
7819 "if operand is PC, should call the LDRH (literal)");
7820 }
7821 return false;
7822 }
7823
7824 case ARM::t2LDRSH_OFFSET_imm:
7825 case ARM::t2LDRSH_PRE_imm:
7826 case ARM::t2LDRSH_POST_imm: {
7827 if (Inst.getOpcode() == ARM::t2LDRSH_POST_imm ||
7828 Inst.getOpcode() == ARM::t2LDRSH_PRE_imm) {
7829 int Imm = Inst.getOperand(2).getImm();
7830 if (Imm > 255 || Imm < -255)
7831 return Error(Operands[5]->getStartLoc(),
7832 "operand must be in range [-255, 255]");
7833 } else if (Inst.getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7834 int Imm = Inst.getOperand(2).getImm();
7835 if (Imm > 0 || Imm < -255)
7836 return Error(Operands[5]->getStartLoc(),
7837 "operand must be in range [0, 255] with a negative sign");
7838 }
7839 if (Inst.getOperand(0).getReg() == ARM::PC) {
7840 return Error(Operands[3]->getStartLoc(),
7841 "if operand is PC, should call the LDRH (literal)");
7842 }
7843 return false;
7844 }
7845
7846 case ARM::LDR_PRE_IMM:
7847 case ARM::LDR_PRE_REG:
7848 case ARM::t2LDR_PRE:
7849 case ARM::LDR_POST_IMM:
7850 case ARM::LDR_POST_REG:
7851 case ARM::t2LDR_POST:
7852 case ARM::LDRH_PRE:
7853 case ARM::t2LDRH_PRE:
7854 case ARM::LDRH_POST:
7855 case ARM::t2LDRH_POST:
7856 case ARM::LDRSH_PRE:
7857 case ARM::t2LDRSH_PRE:
7858 case ARM::LDRSH_POST:
7859 case ARM::t2LDRSH_POST:
7860 case ARM::LDRB_PRE_IMM:
7861 case ARM::LDRB_PRE_REG:
7862 case ARM::t2LDRB_PRE:
7863 case ARM::LDRB_POST_IMM:
7864 case ARM::LDRB_POST_REG:
7865 case ARM::t2LDRB_POST:
7866 case ARM::LDRSB_PRE:
7867 case ARM::t2LDRSB_PRE:
7868 case ARM::LDRSB_POST:
7869 case ARM::t2LDRSB_POST: {
7870 // Rt must be different from Rn.
7871 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7872 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7873
7874 if (Rt == Rn)
7875 return Error(Operands[3]->getStartLoc(),
7876 "destination register and base register can't be identical");
7877 return false;
7878 }
7879
7880 case ARM::MVE_VLDRBU8_rq:
7881 case ARM::MVE_VLDRBU16_rq:
7882 case ARM::MVE_VLDRBS16_rq:
7883 case ARM::MVE_VLDRBU32_rq:
7884 case ARM::MVE_VLDRBS32_rq:
7885 case ARM::MVE_VLDRHU16_rq:
7886 case ARM::MVE_VLDRHU16_rq_u:
7887 case ARM::MVE_VLDRHU32_rq:
7888 case ARM::MVE_VLDRHU32_rq_u:
7889 case ARM::MVE_VLDRHS32_rq:
7890 case ARM::MVE_VLDRHS32_rq_u:
7891 case ARM::MVE_VLDRWU32_rq:
7892 case ARM::MVE_VLDRWU32_rq_u:
7893 case ARM::MVE_VLDRDU64_rq:
7894 case ARM::MVE_VLDRDU64_rq_u:
7895 case ARM::MVE_VLDRWU32_qi:
7896 case ARM::MVE_VLDRWU32_qi_pre:
7897 case ARM::MVE_VLDRDU64_qi:
7898 case ARM::MVE_VLDRDU64_qi_pre: {
7899 // Qd must be different from Qm.
7900 unsigned QdIdx = 0, QmIdx = 2;
7901 bool QmIsPointer = false;
7902 switch (Opcode) {
7903 case ARM::MVE_VLDRWU32_qi:
7904 case ARM::MVE_VLDRDU64_qi:
7905 QmIdx = 1;
7906 QmIsPointer = true;
7907 break;
7908 case ARM::MVE_VLDRWU32_qi_pre:
7909 case ARM::MVE_VLDRDU64_qi_pre:
7910 QdIdx = 1;
7911 QmIsPointer = true;
7912 break;
7913 }
7914
7915 const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg());
7916 const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
7917
7918 if (Qd == Qm) {
7919 return Error(Operands[3]->getStartLoc(),
7920 Twine("destination vector register and vector ") +
7921 (QmIsPointer ? "pointer" : "offset") +
7922 " register can't be identical");
7923 }
7924 return false;
7925 }
7926
7927 case ARM::SBFX:
7928 case ARM::t2SBFX:
7929 case ARM::UBFX:
7930 case ARM::t2UBFX: {
7931 // Width must be in range [1, 32-lsb].
7932 unsigned LSB = Inst.getOperand(2).getImm();
7933 unsigned Widthm1 = Inst.getOperand(3).getImm();
7934 if (Widthm1 >= 32 - LSB)
7935 return Error(Operands[5]->getStartLoc(),
7936 "bitfield width must be in range [1,32-lsb]");
7937 return false;
7938 }
7939 // Notionally handles ARM::tLDMIA_UPD too.
7940 case ARM::tLDMIA: {
7941 // If we're parsing Thumb2, the .w variant is available and handles
7942 // most cases that are normally illegal for a Thumb1 LDM instruction.
7943 // We'll make the transformation in processInstruction() if necessary.
7944 //
7945 // Thumb LDM instructions are writeback iff the base register is not
7946 // in the register list.
7947 unsigned Rn = Inst.getOperand(0).getReg();
7948 bool HasWritebackToken =
7949 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7950 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
7951 bool ListContainsBase;
7952 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
7953 return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
7954 "registers must be in range r0-r7");
7955 // If we should have writeback, then there should be a '!' token.
7956 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
7957 return Error(Operands[2]->getStartLoc(),
7958 "writeback operator '!' expected");
7959 // If we should not have writeback, there must not be a '!'. This is
7960 // true even for the 32-bit wide encodings.
7961 if (ListContainsBase && HasWritebackToken)
7962 return Error(Operands[3]->getStartLoc(),
7963 "writeback operator '!' not allowed when base register "
7964 "in register list");
7965
7966 if (validatetLDMRegList(Inst, Operands, 3))
7967 return true;
7968 break;
7969 }
7970 case ARM::LDMIA_UPD:
7971 case ARM::LDMDB_UPD:
7972 case ARM::LDMIB_UPD:
7973 case ARM::LDMDA_UPD:
7974 // ARM variants loading and updating the same register are only officially
7975 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
7976 if (!hasV7Ops())
7977 break;
7978 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7979 return Error(Operands.back()->getStartLoc(),
7980 "writeback register not allowed in register list");
7981 break;
7982 case ARM::t2LDMIA:
7983 case ARM::t2LDMDB:
7984 if (validatetLDMRegList(Inst, Operands, 3))
7985 return true;
7986 break;
7987 case ARM::t2STMIA:
7988 case ARM::t2STMDB:
7989 if (validatetSTMRegList(Inst, Operands, 3))
7990 return true;
7991 break;
7992 case ARM::t2LDMIA_UPD:
7993 case ARM::t2LDMDB_UPD:
7994 case ARM::t2STMIA_UPD:
7995 case ARM::t2STMDB_UPD:
7996 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7997 return Error(Operands.back()->getStartLoc(),
7998 "writeback register not allowed in register list");
7999
8000 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
8001 if (validatetLDMRegList(Inst, Operands, 3))
8002 return true;
8003 } else {
8004 if (validatetSTMRegList(Inst, Operands, 3))
8005 return true;
8006 }
8007 break;
8008
8009 case ARM::sysLDMIA_UPD:
8010 case ARM::sysLDMDA_UPD:
8011 case ARM::sysLDMDB_UPD:
8012 case ARM::sysLDMIB_UPD:
8013 if (!listContainsReg(Inst, 3, ARM::PC))
8014 return Error(Operands[4]->getStartLoc(),
8015 "writeback register only allowed on system LDM "
8016 "if PC in register-list");
8017 break;
8018 case ARM::sysSTMIA_UPD:
8019 case ARM::sysSTMDA_UPD:
8020 case ARM::sysSTMDB_UPD:
8021 case ARM::sysSTMIB_UPD:
8022 return Error(Operands[2]->getStartLoc(),
8023 "system STM cannot have writeback register");
8024 case ARM::tMUL:
8025 // The second source operand must be the same register as the destination
8026 // operand.
8027 //
8028 // In this case, we must directly check the parsed operands because the
8029 // cvtThumbMultiply() function is written in such a way that it guarantees
8030 // this first statement is always true for the new Inst. Essentially, the
8031 // destination is unconditionally copied into the second source operand
8032 // without checking to see if it matches what we actually parsed.
8033 if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
8034 ((ARMOperand &)*Operands[5]).getReg()) &&
8035 (((ARMOperand &)*Operands[3]).getReg() !=
8036 ((ARMOperand &)*Operands[4]).getReg())) {
8037 return Error(Operands[3]->getStartLoc(),
8038 "destination register must match source register");
8039 }
8040 break;
8041
8042 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
8043 // so only issue a diagnostic for thumb1. The instructions will be
8044 // switched to the t2 encodings in processInstruction() if necessary.
8045 case ARM::tPOP: {
8046 bool ListContainsBase;
8047 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
8048 !isThumbTwo())
8049 return Error(Operands[2]->getStartLoc(),
8050 "registers must be in range r0-r7 or pc");
8051 if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
8052 return true;
8053 break;
8054 }
8055 case ARM::tPUSH: {
8056 bool ListContainsBase;
8057 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
8058 !isThumbTwo())
8059 return Error(Operands[2]->getStartLoc(),
8060 "registers must be in range r0-r7 or lr");
8061 if (validatetSTMRegList(Inst, Operands, 2))
8062 return true;
8063 break;
8064 }
8065 case ARM::tSTMIA_UPD: {
8066 bool ListContainsBase, InvalidLowList;
8067 InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
8068 0, ListContainsBase);
8069 if (InvalidLowList && !isThumbTwo())
8070 return Error(Operands[4]->getStartLoc(),
8071 "registers must be in range r0-r7");
8072
8073 // This would be converted to a 32-bit stm, but that's not valid if the
8074 // writeback register is in the list.
8075 if (InvalidLowList && ListContainsBase)
8076 return Error(Operands[4]->getStartLoc(),
8077 "writeback operator '!' not allowed when base register "
8078 "in register list");
8079
8080 if (validatetSTMRegList(Inst, Operands, 4))
8081 return true;
8082 break;
8083 }
8084 case ARM::tADDrSP:
8085 // If the non-SP source operand and the destination operand are not the
8086 // same, we need thumb2 (for the wide encoding), or we have an error.
8087 if (!isThumbTwo() &&
8088 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8089 return Error(Operands[4]->getStartLoc(),
8090 "source register must be the same as destination");
8091 }
8092 break;
8093
8094 case ARM::t2ADDrr:
8095 case ARM::t2ADDrs:
8096 case ARM::t2SUBrr:
8097 case ARM::t2SUBrs:
8098 if (Inst.getOperand(0).getReg() == ARM::SP &&
8099 Inst.getOperand(1).getReg() != ARM::SP)
8100 return Error(Operands[4]->getStartLoc(),
8101 "source register must be sp if destination is sp");
8102 break;
8103
8104 // Final range checking for Thumb unconditional branch instructions.
8105 case ARM::tB:
8106 if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
8107 return Error(Operands[2]->getStartLoc(), "branch target out of range");
8108 break;
8109 case ARM::t2B: {
8110 int op = (Operands[2]->isImm()) ? 2 : 3;
8111 ARMOperand &Operand = static_cast<ARMOperand &>(*Operands[op]);
8112 // Delay the checks of symbolic expressions until they are resolved.
8113 if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8114 !Operand.isSignedOffset<24, 1>())
8115 return Error(Operands[op]->getStartLoc(), "branch target out of range");
8116 break;
8117 }
8118 // Final range checking for Thumb conditional branch instructions.
8119 case ARM::tBcc:
8120 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
8121 return Error(Operands[2]->getStartLoc(), "branch target out of range");
8122 break;
8123 case ARM::t2Bcc: {
8124 int Op = (Operands[2]->isImm()) ? 2 : 3;
8125 if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
8126 return Error(Operands[Op]->getStartLoc(), "branch target out of range");
8127 break;
8128 }
8129 case ARM::tCBZ:
8130 case ARM::tCBNZ: {
8131 if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
8132 return Error(Operands[2]->getStartLoc(), "branch target out of range");
8133 break;
8134 }
8135 case ARM::MOVi16:
8136 case ARM::MOVTi16:
8137 case ARM::t2MOVi16:
8138 case ARM::t2MOVTi16:
8139 {
8140 // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
8141 // especially when we turn it into a movw and the expression <symbol> does
8142 // not have a :lower16: or :upper16 as part of the expression. We don't
8143 // want the behavior of silently truncating, which can be unexpected and
8144 // lead to bugs that are difficult to find since this is an easy mistake
8145 // to make.
8146 int i = (Operands[3]->isImm()) ? 3 : 4;
8147 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
8148 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8149 if (CE) break;
8150 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
8151 if (!E) break;
8152 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
8153 if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
8154 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
8155 return Error(
8156 Op.getStartLoc(),
8157 "immediate expression for mov requires :lower16: or :upper16");
8158 break;
8159 }
8160 case ARM::tADDi8: {
8163 return Error(Op.getStartLoc(),
8164 "Immediate expression for Thumb adds requires :lower0_7:,"
8165 " :lower8_15:, :upper0_7: or :upper8_15:");
8166 break;
8167 }
8168 case ARM::tMOVi8: {
8171 return Error(Op.getStartLoc(),
8172 "Immediate expression for Thumb movs requires :lower0_7:,"
8173 " :lower8_15:, :upper0_7: or :upper8_15:");
8174 break;
8175 }
8176 case ARM::HINT:
8177 case ARM::t2HINT: {
8178 unsigned Imm8 = Inst.getOperand(0).getImm();
8179 unsigned Pred = Inst.getOperand(1).getImm();
8180 // ESB is not predicable (pred must be AL). Without the RAS extension, this
8181 // behaves as any other unallocated hint.
8182 if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
8183 return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
8184 "predicable, but condition "
8185 "code specified");
8186 if (Imm8 == 0x14 && Pred != ARMCC::AL)
8187 return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
8188 "predicable, but condition "
8189 "code specified");
8190 break;
8191 }
8192 case ARM::t2BFi:
8193 case ARM::t2BFr:
8194 case ARM::t2BFLi:
8195 case ARM::t2BFLr: {
8196 if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() ||
8197 (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8198 return Error(Operands[2]->getStartLoc(),
8199 "branch location out of range or not a multiple of 2");
8200
8201 if (Opcode == ARM::t2BFi) {
8202 if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>())
8203 return Error(Operands[3]->getStartLoc(),
8204 "branch target out of range or not a multiple of 2");
8205 } else if (Opcode == ARM::t2BFLi) {
8206 if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>())
8207 return Error(Operands[3]->getStartLoc(),
8208 "branch target out of range or not a multiple of 2");
8209 }
8210 break;
8211 }
8212 case ARM::t2BFic: {
8213 if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() ||
8214 (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8215 return Error(Operands[1]->getStartLoc(),
8216 "branch location out of range or not a multiple of 2");
8217
8218 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>())
8219 return Error(Operands[2]->getStartLoc(),
8220 "branch target out of range or not a multiple of 2");
8221
8222 assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
8223 "branch location and else branch target should either both be "
8224 "immediates or both labels");
8225
8226 if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
8227 int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
8228 if (Diff != 4 && Diff != 2)
8229 return Error(
8230 Operands[3]->getStartLoc(),
8231 "else branch target must be 2 or 4 greater than the branch location");
8232 }
8233 break;
8234 }
8235 case ARM::t2CLRM: {
8236 for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
8237 if (Inst.getOperand(i).isReg() &&
8238 !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8239 Inst.getOperand(i).getReg())) {
8240 return Error(Operands[2]->getStartLoc(),
8241 "invalid register in register list. Valid registers are "
8242 "r0-r12, lr/r14 and APSR.");
8243 }
8244 }
8245 break;
8246 }
8247 case ARM::DSB:
8248 case ARM::t2DSB: {
8249
8250 if (Inst.getNumOperands() < 2)
8251 break;
8252
8253 unsigned Option = Inst.getOperand(0).getImm();
8254 unsigned Pred = Inst.getOperand(1).getImm();
8255
8256 // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
8257 if (Option == 0 && Pred != ARMCC::AL)
8258 return Error(Operands[1]->getStartLoc(),
8259 "instruction 'ssbb' is not predicable, but condition code "
8260 "specified");
8261 if (Option == 4 && Pred != ARMCC::AL)
8262 return Error(Operands[1]->getStartLoc(),
8263 "instruction 'pssbb' is not predicable, but condition code "
8264 "specified");
8265 break;
8266 }
8267 case ARM::VMOVRRS: {
8268 // Source registers must be sequential.
8269 const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
8270 const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
8271 if (Sm1 != Sm + 1)
8272 return Error(Operands[5]->getStartLoc(),
8273 "source operands must be sequential");
8274 break;
8275 }
8276 case ARM::VMOVSRR: {
8277 // Destination registers must be sequential.
8278 const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
8279 const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
8280 if (Sm1 != Sm + 1)
8281 return Error(Operands[3]->getStartLoc(),
8282 "destination operands must be sequential");
8283 break;
8284 }
8285 case ARM::VLDMDIA:
8286 case ARM::VSTMDIA: {
8287 ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]);
8288 auto &RegList = Op.getRegList();
8289 if (RegList.size() < 1 || RegList.size() > 16)
8290 return Error(Operands[3]->getStartLoc(),
8291 "list of registers must be at least 1 and at most 16");
8292 break;
8293 }
8294 case ARM::MVE_VQDMULLs32bh:
8295 case ARM::MVE_VQDMULLs32th:
8296 case ARM::MVE_VCMULf32:
8297 case ARM::MVE_VMULLBs32:
8298 case ARM::MVE_VMULLTs32:
8299 case ARM::MVE_VMULLBu32:
8300 case ARM::MVE_VMULLTu32: {
8301 if (Operands[3]->getReg() == Operands[4]->getReg()) {
8302 return Error (Operands[3]->getStartLoc(),
8303 "Qd register and Qn register can't be identical");
8304 }
8305 if (Operands[3]->getReg() == Operands[5]->getReg()) {
8306 return Error (Operands[3]->getStartLoc(),
8307 "Qd register and Qm register can't be identical");
8308 }
8309 break;
8310 }
8311 case ARM::MVE_VREV64_8:
8312 case ARM::MVE_VREV64_16:
8313 case ARM::MVE_VREV64_32:
8314 case ARM::MVE_VQDMULL_qr_s32bh:
8315 case ARM::MVE_VQDMULL_qr_s32th: {
8316 if (Operands[3]->getReg() == Operands[4]->getReg()) {
8317 return Error (Operands[3]->getStartLoc(),
8318 "Qd register and Qn register can't be identical");
8319 }
8320 break;
8321 }
8322 case ARM::MVE_VCADDi32:
8323 case ARM::MVE_VCADDf32:
8324 case ARM::MVE_VHCADDs32: {
8325 if (Operands[3]->getReg() == Operands[5]->getReg()) {
8326 return Error (Operands[3]->getStartLoc(),
8327 "Qd register and Qm register can't be identical");
8328 }
8329 break;
8330 }
8331 case ARM::MVE_VMOV_rr_q: {
8332 if (Operands[4]->getReg() != Operands[6]->getReg())
8333 return Error (Operands[4]->getStartLoc(), "Q-registers must be the same");
8334 if (static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() !=
8335 static_cast<ARMOperand &>(*Operands[7]).getVectorIndex() + 2)
8336 return Error (Operands[5]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8337 break;
8338 }
8339 case ARM::MVE_VMOV_q_rr: {
8340 if (Operands[2]->getReg() != Operands[4]->getReg())
8341 return Error (Operands[2]->getStartLoc(), "Q-registers must be the same");
8342 if (static_cast<ARMOperand &>(*Operands[3]).getVectorIndex() !=
8343 static_cast<ARMOperand &>(*Operands[5]).getVectorIndex() + 2)
8344 return Error (Operands[3]->getStartLoc(), "Q-register indexes must be 2 and 0 or 3 and 1");
8345 break;
8346 }
8347 case ARM::MVE_SQRSHR:
8348 case ARM::MVE_UQRSHL: {
8349 if (Operands[2]->getReg() == Operands[3]->getReg()) {
8350 return Error(Operands[2]->getStartLoc(),
8351 "Rda register and Rm register can't be identical");
8352 }
8353 break;
8354 }
8355 case ARM::UMAAL:
8356 case ARM::UMLAL:
8357 case ARM::UMULL:
8358 case ARM::t2UMAAL:
8359 case ARM::t2UMLAL:
8360 case ARM::t2UMULL:
8361 case ARM::SMLAL:
8362 case ARM::SMLALBB:
8363 case ARM::SMLALBT:
8364 case ARM::SMLALD:
8365 case ARM::SMLALDX:
8366 case ARM::SMLALTB:
8367 case ARM::SMLALTT:
8368 case ARM::SMLSLD:
8369 case ARM::SMLSLDX:
8370 case ARM::SMULL:
8371 case ARM::t2SMLAL:
8372 case ARM::t2SMLALBB:
8373 case ARM::t2SMLALBT:
8374 case ARM::t2SMLALD:
8375 case ARM::t2SMLALDX:
8376 case ARM::t2SMLALTB:
8377 case ARM::t2SMLALTT:
8378 case ARM::t2SMLSLD:
8379 case ARM::t2SMLSLDX:
8380 case ARM::t2SMULL: {
8381 unsigned RdHi = Inst.getOperand(0).getReg();
8382 unsigned RdLo = Inst.getOperand(1).getReg();
8383 if(RdHi == RdLo) {
8384 return Error(Loc,
8385 "unpredictable instruction, RdHi and RdLo must be different");
8386 }
8387 break;
8388 }
8389
8390 case ARM::CDE_CX1:
8391 case ARM::CDE_CX1A:
8392 case ARM::CDE_CX1D:
8393 case ARM::CDE_CX1DA:
8394 case ARM::CDE_CX2:
8395 case ARM::CDE_CX2A:
8396 case ARM::CDE_CX2D:
8397 case ARM::CDE_CX2DA:
8398 case ARM::CDE_CX3:
8399 case ARM::CDE_CX3A:
8400 case ARM::CDE_CX3D:
8401 case ARM::CDE_CX3DA:
8402 case ARM::CDE_VCX1_vec:
8403 case ARM::CDE_VCX1_fpsp:
8404 case ARM::CDE_VCX1_fpdp:
8405 case ARM::CDE_VCX1A_vec:
8406 case ARM::CDE_VCX1A_fpsp:
8407 case ARM::CDE_VCX1A_fpdp:
8408 case ARM::CDE_VCX2_vec:
8409 case ARM::CDE_VCX2_fpsp:
8410 case ARM::CDE_VCX2_fpdp:
8411 case ARM::CDE_VCX2A_vec:
8412 case ARM::CDE_VCX2A_fpsp:
8413 case ARM::CDE_VCX2A_fpdp:
8414 case ARM::CDE_VCX3_vec:
8415 case ARM::CDE_VCX3_fpsp:
8416 case ARM::CDE_VCX3_fpdp:
8417 case ARM::CDE_VCX3A_vec:
8418 case ARM::CDE_VCX3A_fpsp:
8419 case ARM::CDE_VCX3A_fpdp: {
8420 assert(Inst.getOperand(1).isImm() &&
8421 "CDE operand 1 must be a coprocessor ID");
8422 int64_t Coproc = Inst.getOperand(1).getImm();
8423 if (Coproc < 8 && !ARM::isCDECoproc(Coproc, *STI))
8424 return Error(Operands[1]->getStartLoc(),
8425 "coprocessor must be configured as CDE");
8426 else if (Coproc >= 8)
8427 return Error(Operands[1]->getStartLoc(),
8428 "coprocessor must be in the range [p0, p7]");
8429 break;
8430 }
8431
8432 case ARM::t2CDP:
8433 case ARM::t2CDP2:
8434 case ARM::t2LDC2L_OFFSET:
8435 case ARM::t2LDC2L_OPTION:
8436 case ARM::t2LDC2L_POST:
8437 case ARM::t2LDC2L_PRE:
8438 case ARM::t2LDC2_OFFSET:
8439 case ARM::t2LDC2_OPTION:
8440 case ARM::t2LDC2_POST:
8441 case ARM::t2LDC2_PRE:
8442 case ARM::t2LDCL_OFFSET:
8443 case ARM::t2LDCL_OPTION:
8444 case ARM::t2LDCL_POST:
8445 case ARM::t2LDCL_PRE:
8446 case ARM::t2LDC_OFFSET:
8447 case ARM::t2LDC_OPTION:
8448 case ARM::t2LDC_POST:
8449 case ARM::t2LDC_PRE:
8450 case ARM::t2MCR:
8451 case ARM::t2MCR2:
8452 case ARM::t2MCRR:
8453 case ARM::t2MCRR2:
8454 case ARM::t2MRC:
8455 case ARM::t2MRC2:
8456 case ARM::t2MRRC:
8457 case ARM::t2MRRC2:
8458 case ARM::t2STC2L_OFFSET:
8459 case ARM::t2STC2L_OPTION:
8460 case ARM::t2STC2L_POST:
8461 case ARM::t2STC2L_PRE:
8462 case ARM::t2STC2_OFFSET:
8463 case ARM::t2STC2_OPTION:
8464 case ARM::t2STC2_POST:
8465 case ARM::t2STC2_PRE:
8466 case ARM::t2STCL_OFFSET:
8467 case ARM::t2STCL_OPTION:
8468 case ARM::t2STCL_POST:
8469 case ARM::t2STCL_PRE:
8470 case ARM::t2STC_OFFSET:
8471 case ARM::t2STC_OPTION:
8472 case ARM::t2STC_POST:
8473 case ARM::t2STC_PRE: {
8474 unsigned Opcode = Inst.getOpcode();
8475 // Inst.getOperand indexes operands in the (oops ...) and (iops ...) dags,
8476 // CopInd is the index of the coprocessor operand.
8477 size_t CopInd = 0;
8478 if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8479 CopInd = 2;
8480 else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8481 CopInd = 1;
8482 assert(Inst.getOperand(CopInd).isImm() &&
8483 "Operand must be a coprocessor ID");
8484 int64_t Coproc = Inst.getOperand(CopInd).getImm();
8485 // Operands[2] is the coprocessor operand at syntactic level
8486 if (ARM::isCDECoproc(Coproc, *STI))
8487 return Error(Operands[2]->getStartLoc(),
8488 "coprocessor must be configured as GCP");
8489 break;
8490 }
8491 }
8492
8493 return false;
8494}
8495
8496static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
8497 switch(Opc) {
8498 default: llvm_unreachable("unexpected opcode!");
8499 // VST1LN
8500 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
8501 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8502 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8503 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
8504 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8505 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8506 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
8507 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
8508 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
8509
8510 // VST2LN
8511 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
8512 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8513 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8514 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8515 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8516
8517 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
8518 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8519 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8520 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8521 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8522
8523 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
8524 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
8525 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
8526 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
8527 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
8528
8529 // VST3LN
8530 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
8531 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8532 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8533 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
8534 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8535 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
8536 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8537 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8538 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
8539 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8540 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
8541 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
8542 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
8543 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
8544 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
8545
8546 // VST3
8547 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
8548 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8549 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8550 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
8551 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8552 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8553 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
8554 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8555 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8556 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
8557 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8558 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8559 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
8560 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
8561 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
8562 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
8563 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
8564 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
8565
8566 // VST4LN
8567 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
8568 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8569 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8570 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
8571 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8572 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
8573 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8574 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8575 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
8576 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8577 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
8578 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
8579 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
8580 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
8581 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
8582
8583 // VST4
8584 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
8585 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8586 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8587 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
8588 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8589 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8590 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
8591 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8592 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8593 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
8594 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8595 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8596 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
8597 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
8598 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
8599 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
8600 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
8601 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
8602 }
8603}
8604
8605static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
8606 switch(Opc) {
8607 default: llvm_unreachable("unexpected opcode!");
8608 // VLD1LN
8609 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
8610 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8611 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8612 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
8613 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8614 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8615 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
8616 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
8617 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
8618
8619 // VLD2LN
8620 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
8621 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8622 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8623 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
8624 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8625 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
8626 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8627 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8628 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
8629 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8630 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
8631 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
8632 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
8633 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
8634 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
8635
8636 // VLD3DUP
8637 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
8638 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8639 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8640 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
8641 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8642 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8643 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
8644 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8645 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8646 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
8647 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8648 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8649 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
8650 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
8651 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
8652 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
8653 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
8654 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
8655
8656 // VLD3LN
8657 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
8658 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8659 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8660 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
8661 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8662 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
8663 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8664 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8665 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
8666 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8667 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
8668 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
8669 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
8670 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
8671 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
8672
8673 // VLD3
8674 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
8675 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8676 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8677 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
8678 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8679 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8680 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
8681 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8682 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8683 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
8684 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8685 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8686 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
8687 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
8688 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
8689 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
8690 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
8691 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
8692
8693 // VLD4LN
8694 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
8695 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8696 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8697 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8698 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8699 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
8700 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8701 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8702 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8703 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8704 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
8705 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
8706 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
8707 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
8708 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
8709
8710 // VLD4DUP
8711 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
8712 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8713 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8714 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
8715 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
8716 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8717 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
8718 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8719 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8720 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
8721 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
8722 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8723 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
8724 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
8725 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
8726 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
8727 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
8728 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
8729
8730 // VLD4
8731 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
8732 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8733 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8734 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
8735 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8736 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8737 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
8738 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8739 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8740 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
8741 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8742 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8743 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
8744 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
8745 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
8746 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
8747 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
8748 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
8749 }
8750}
8751
8752bool ARMAsmParser::processInstruction(MCInst &Inst,
8753 const OperandVector &Operands,
8754 MCStreamer &Out) {
8755 // Check if we have the wide qualifier, because if it's present we
8756 // must avoid selecting a 16-bit thumb instruction.
8757 bool HasWideQualifier = false;
8758 for (auto &Op : Operands) {
8759 ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
8760 if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
8761 HasWideQualifier = true;
8762 break;
8763 }
8764 }
8765
8766 switch (Inst.getOpcode()) {
8767 case ARM::VLLDM:
8768 case ARM::VLSTM: {
8769 // In some cases both T1 and T2 are valid, causing tablegen pick T1 instead
8770 // of T2
8771 if (Operands.size() == 4) { // a register list has been provided
8772 ARMOperand &Op = static_cast<ARMOperand &>(
8773 *Operands[3]); // the register list, a dpr_reglist
8774 assert(Op.isDPRRegList());
8775 auto &RegList = Op.getRegList();
8776 // When the register list is {d0-d31} the instruction has to be the T2
8777 // variant
8778 if (RegList.size() == 32) {
8779 const unsigned Opcode =
8780 (Inst.getOpcode() == ARM::VLLDM) ? ARM::VLLDM_T2 : ARM::VLSTM_T2;
8781 MCInst TmpInst;
8782 TmpInst.setOpcode(Opcode);
8783 TmpInst.addOperand(Inst.getOperand(0));
8784 TmpInst.addOperand(Inst.getOperand(1));
8785 TmpInst.addOperand(Inst.getOperand(2));
8786 TmpInst.addOperand(Inst.getOperand(3));
8787 Inst = TmpInst;
8788 return true;
8789 }
8790 }
8791 return false;
8792 }
8793 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
8794 case ARM::LDRT_POST:
8795 case ARM::LDRBT_POST: {
8796 const unsigned Opcode =
8797 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8798 : ARM::LDRBT_POST_IMM;
8799 MCInst TmpInst;
8800 TmpInst.setOpcode(Opcode);
8801 TmpInst.addOperand(Inst.getOperand(0));
8802 TmpInst.addOperand(Inst.getOperand(1));
8803 TmpInst.addOperand(Inst.getOperand(1));
8804 TmpInst.addOperand(MCOperand::createReg(0));
8805 TmpInst.addOperand(MCOperand::createImm(0));
8806 TmpInst.addOperand(Inst.getOperand(2));
8807 TmpInst.addOperand(Inst.getOperand(3));
8808 Inst = TmpInst;
8809 return true;
8810 }
8811 // Alias for 'ldr{sb,h,sh}t Rt, [Rn] {, #imm}' for ommitted immediate.
8812 case ARM::LDRSBTii:
8813 case ARM::LDRHTii:
8814 case ARM::LDRSHTii: {
8815 MCInst TmpInst;
8816
8817 if (Inst.getOpcode() == ARM::LDRSBTii)
8818 TmpInst.setOpcode(ARM::LDRSBTi);
8819 else if (Inst.getOpcode() == ARM::LDRHTii)
8820 TmpInst.setOpcode(ARM::LDRHTi);
8821 else if (Inst.getOpcode() == ARM::LDRSHTii)
8822 TmpInst.setOpcode(ARM::LDRSHTi);
8823 TmpInst.addOperand(Inst.getOperand(0));
8824 TmpInst.addOperand(Inst.getOperand(1));
8825 TmpInst.addOperand(Inst.getOperand(1));
8826 TmpInst.addOperand(MCOperand::createImm(256));
8827 TmpInst.addOperand(Inst.getOperand(2));
8828 Inst = TmpInst;
8829 return true;
8830 }
8831 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
8832 case ARM::STRT_POST:
8833 case ARM::STRBT_POST: {
8834 const unsigned Opcode =
8835 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
8836 : ARM::STRBT_POST_IMM;
8837 MCInst TmpInst;
8838 TmpInst.setOpcode(Opcode);
8839 TmpInst.addOperand(Inst.getOperand(1));
8840 TmpInst.addOperand(Inst.getOperand(0));
8841 TmpInst.addOperand(Inst.getOperand(1));
8842 TmpInst.addOperand(MCOperand::createReg(0));
8843 TmpInst.addOperand(MCOperand::createImm(0));
8844 TmpInst.addOperand(Inst.getOperand(2));
8845 TmpInst.addOperand(Inst.getOperand(3));
8846 Inst = TmpInst;
8847 return true;
8848 }
8849 // Alias for alternate form of 'ADR Rd, #imm' instruction.
8850 case ARM::ADDri: {
8851 if (Inst.getOperand(1).getReg() != ARM::PC ||
8852 Inst.getOperand(5).getReg() != 0 ||
8853 !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
8854 return false;
8855 MCInst TmpInst;
8856 TmpInst.setOpcode(ARM::ADR);
8857 TmpInst.addOperand(Inst.getOperand(0));
8858 if (Inst.getOperand(2).isImm()) {
8859 // Immediate (mod_imm) will be in its encoded form, we must unencode it
8860 // before passing it to the ADR instruction.
8861 unsigned Enc = Inst.getOperand(2).getImm();
8863 llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
8864 } else {
8865 // Turn PC-relative expression into absolute expression.
8866 // Reading PC provides the start of the current instruction + 8 and
8867 // the transform to adr is biased by that.
8868 MCSymbol *Dot = getContext().createTempSymbol();
8869 Out.emitLabel(Dot);
8870 const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
8871 const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
8873 getContext());
8874 const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
8875 const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
8876 getContext());
8877 const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
8878 getContext());
8879 TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
8880 }
8881 TmpInst.addOperand(Inst.getOperand(3));
8882 TmpInst.addOperand(Inst.getOperand(4));
8883 Inst = TmpInst;
8884 return true;
8885 }
8886 // Aliases for imm syntax of LDR instructions.
8887 case ARM::t2LDR_PRE_imm:
8888 case ARM::t2LDR_POST_imm: {
8889 MCInst TmpInst;
8890 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDR_PRE_imm ? ARM::t2LDR_PRE
8891 : ARM::t2LDR_POST);
8892 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8893 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8894 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8895 TmpInst.addOperand(Inst.getOperand(2)); // imm
8896 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8897 Inst = TmpInst;
8898 return true;
8899 }
8900 // Aliases for imm syntax of STR instructions.
8901 case ARM::t2STR_PRE_imm:
8902 case ARM::t2STR_POST_imm: {
8903 MCInst TmpInst;
8904 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STR_PRE_imm ? ARM::t2STR_PRE
8905 : ARM::t2STR_POST);
8906 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8907 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8908 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8909 TmpInst.addOperand(Inst.getOperand(2)); // imm
8910 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8911 Inst = TmpInst;
8912 return true;
8913 }
8914 // Aliases for imm syntax of LDRB instructions.
8915 case ARM::t2LDRB_OFFSET_imm: {
8916 MCInst TmpInst;
8917 TmpInst.setOpcode(ARM::t2LDRBi8);
8918 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8919 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8920 TmpInst.addOperand(Inst.getOperand(2)); // imm
8921 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8922 Inst = TmpInst;
8923 return true;
8924 }
8925 case ARM::t2LDRB_PRE_imm:
8926 case ARM::t2LDRB_POST_imm: {
8927 MCInst TmpInst;
8928 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRB_PRE_imm
8929 ? ARM::t2LDRB_PRE
8930 : ARM::t2LDRB_POST);
8931 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8932 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8933 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8934 TmpInst.addOperand(Inst.getOperand(2)); // imm
8935 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8936 Inst = TmpInst;
8937 return true;
8938 }
8939 // Aliases for imm syntax of STRB instructions.
8940 case ARM::t2STRB_OFFSET_imm: {
8941 MCInst TmpInst;
8942 TmpInst.setOpcode(ARM::t2STRBi8);
8943 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8944 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8945 TmpInst.addOperand(Inst.getOperand(2)); // imm
8946 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8947 Inst = TmpInst;
8948 return true;
8949 }
8950 case ARM::t2STRB_PRE_imm:
8951 case ARM::t2STRB_POST_imm: {
8952 MCInst TmpInst;
8953 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRB_PRE_imm
8954 ? ARM::t2STRB_PRE
8955 : ARM::t2STRB_POST);
8956 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8957 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8958 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8959 TmpInst.addOperand(Inst.getOperand(2)); // imm
8960 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8961 Inst = TmpInst;
8962 return true;
8963 }
8964 // Aliases for imm syntax of LDRH instructions.
8965 case ARM::t2LDRH_OFFSET_imm: {
8966 MCInst TmpInst;
8967 TmpInst.setOpcode(ARM::t2LDRHi8);
8968 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8969 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8970 TmpInst.addOperand(Inst.getOperand(2)); // imm
8971 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8972 Inst = TmpInst;
8973 return true;
8974 }
8975 case ARM::t2LDRH_PRE_imm:
8976 case ARM::t2LDRH_POST_imm: {
8977 MCInst TmpInst;
8978 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRH_PRE_imm
8979 ? ARM::t2LDRH_PRE
8980 : ARM::t2LDRH_POST);
8981 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8982 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8983 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8984 TmpInst.addOperand(Inst.getOperand(2)); // imm
8985 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8986 Inst = TmpInst;
8987 return true;
8988 }
8989 // Aliases for imm syntax of STRH instructions.
8990 case ARM::t2STRH_OFFSET_imm: {
8991 MCInst TmpInst;
8992 TmpInst.setOpcode(ARM::t2STRHi8);
8993 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8994 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8995 TmpInst.addOperand(Inst.getOperand(2)); // imm
8996 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8997 Inst = TmpInst;
8998 return true;
8999 }
9000 case ARM::t2STRH_PRE_imm:
9001 case ARM::t2STRH_POST_imm: {
9002 MCInst TmpInst;
9003 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRH_PRE_imm
9004 ? ARM::t2STRH_PRE
9005 : ARM::t2STRH_POST);
9006 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9007 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9008 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9009 TmpInst.addOperand(Inst.getOperand(2)); // imm
9010 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9011 Inst = TmpInst;
9012 return true;
9013 }
9014 // Aliases for imm syntax of LDRSB instructions.
9015 case ARM::t2LDRSB_OFFSET_imm: {
9016 MCInst TmpInst;
9017 TmpInst.setOpcode(ARM::t2LDRSBi8);
9018 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9019 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9020 TmpInst.addOperand(Inst.getOperand(2)); // imm
9021 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9022 Inst = TmpInst;
9023 return true;
9024 }
9025 case ARM::t2LDRSB_PRE_imm:
9026 case ARM::t2LDRSB_POST_imm: {
9027 MCInst TmpInst;
9028 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSB_PRE_imm
9029 ? ARM::t2LDRSB_PRE
9030 : ARM::t2LDRSB_POST);
9031 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9032 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9033 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9034 TmpInst.addOperand(Inst.getOperand(2)); // imm
9035 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9036 Inst = TmpInst;
9037 return true;
9038 }
9039 // Aliases for imm syntax of LDRSH instructions.
9040 case ARM::t2LDRSH_OFFSET_imm: {
9041 MCInst TmpInst;
9042 TmpInst.setOpcode(ARM::t2LDRSHi8);
9043 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9044 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9045 TmpInst.addOperand(Inst.getOperand(2)); // imm
9046 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9047 Inst = TmpInst;
9048 return true;
9049 }
9050 case ARM::t2LDRSH_PRE_imm:
9051 case ARM::t2LDRSH_POST_imm: {
9052 MCInst TmpInst;
9053 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSH_PRE_imm
9054 ? ARM::t2LDRSH_PRE
9055 : ARM::t2LDRSH_POST);
9056 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9057 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9058 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9059 TmpInst.addOperand(Inst.getOperand(2)); // imm
9060 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9061 Inst = TmpInst;
9062 return true;
9063 }
9064 // Aliases for alternate PC+imm syntax of LDR instructions.
9065 case ARM::t2LDRpcrel:
9066 // Select the narrow version if the immediate will fit.
9067 if (Inst.getOperand(1).getImm() > 0 &&
9068 Inst.getOperand(1).getImm() <= 0xff &&
9069 !HasWideQualifier)
9070 Inst.setOpcode(ARM::tLDRpci);
9071 else
9072 Inst.setOpcode(ARM::t2LDRpci);
9073 return true;
9074 case ARM::t2LDRBpcrel:
9075 Inst.setOpcode(ARM::t2LDRBpci);
9076 return true;
9077 case ARM::t2LDRHpcrel:
9078 Inst.setOpcode(ARM::t2LDRHpci);
9079 return true;
9080 case ARM::t2LDRSBpcrel:
9081 Inst.setOpcode(ARM::t2LDRSBpci);
9082 return true;
9083 case ARM::t2LDRSHpcrel:
9084 Inst.setOpcode(ARM::t2LDRSHpci);
9085 return true;
9086 case ARM::LDRConstPool:
9087 case ARM::tLDRConstPool:
9088 case ARM::t2LDRConstPool: {
9089 // Pseudo instruction ldr rt, =immediate is converted to a
9090 // MOV rt, immediate if immediate is known and representable
9091 // otherwise we create a constant pool entry that we load from.
9092 MCInst TmpInst;
9093 if (Inst.getOpcode() == ARM::LDRConstPool)
9094 TmpInst.setOpcode(ARM::LDRi12);
9095 else if (Inst.getOpcode() == ARM::tLDRConstPool)
9096 TmpInst.setOpcode(ARM::tLDRpci);
9097 else if (Inst.getOpcode() == ARM::t2LDRConstPool)
9098 TmpInst.setOpcode(ARM::t2LDRpci);
9099 const ARMOperand &PoolOperand =
9100 (HasWideQualifier ?
9101 static_cast<ARMOperand &>(*Operands[4]) :
9102 static_cast<ARMOperand &>(*Operands[3]));
9103 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9104 // If SubExprVal is a constant we may be able to use a MOV
9105 if (isa<MCConstantExpr>(SubExprVal) &&
9106 Inst.getOperand(0).getReg() != ARM::PC &&
9107 Inst.getOperand(0).getReg() != ARM::SP) {
9108 int64_t Value =
9109 (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9110 bool UseMov = true;
9111 bool MovHasS = true;
9112 if (Inst.getOpcode() == ARM::LDRConstPool) {
9113 // ARM Constant
9114 if (ARM_AM::getSOImmVal(Value) != -1) {
9116 TmpInst.setOpcode(ARM::MOVi);
9117 }
9118 else if (ARM_AM::getSOImmVal(~Value) != -1) {
9120 TmpInst.setOpcode(ARM::MVNi);
9121 }
9122 else if (hasV6T2Ops() &&
9123 Value >=0 && Value < 65536) {
9124 TmpInst.setOpcode(ARM::MOVi16);
9125 MovHasS = false;
9126 }
9127 else
9128 UseMov = false;
9129 }
9130 else {
9131 // Thumb/Thumb2 Constant
9132 if (hasThumb2() &&
9134 TmpInst.setOpcode(ARM::t2MOVi);
9135 else if (hasThumb2() &&
9136 ARM_AM::getT2SOImmVal(~Value) != -1) {
9137 TmpInst.setOpcode(ARM::t2MVNi);
9138 Value = ~Value;
9139 }
9140 else if (hasV8MBaseline() &&
9141 Value >=0 && Value < 65536) {
9142 TmpInst.setOpcode(ARM::t2MOVi16);
9143 MovHasS = false;
9144 }
9145 else
9146 UseMov = false;
9147 }
9148 if (UseMov) {
9149 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9150 TmpInst.addOperand(MCOperand::createImm(Value)); // Immediate
9151 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9152 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9153 if (MovHasS)
9154 TmpInst.addOperand(MCOperand::createReg(0)); // S
9155 Inst = TmpInst;
9156 return true;
9157 }
9158 }
9159 // No opportunity to use MOV/MVN create constant pool
9160 const MCExpr *CPLoc =
9161 getTargetStreamer().addConstantPoolEntry(SubExprVal,
9162 PoolOperand.getStartLoc());
9163 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9164 TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
9165 if (TmpInst.getOpcode() == ARM::LDRi12)
9166 TmpInst.addOperand(MCOperand::createImm(0)); // unused offset
9167 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9168 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9169 Inst = TmpInst;
9170 return true;
9171 }
9172 // Handle NEON VST complex aliases.
9173 case ARM::VST1LNdWB_register_Asm_8:
9174 case ARM::VST1LNdWB_register_Asm_16:
9175 case ARM::VST1LNdWB_register_Asm_32: {
9176 MCInst TmpInst;
9177 // Shuffle the operands around so the lane index operand is in the
9178 // right place.
9179 unsigned Spacing;
9180 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9181 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9182 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9183 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9184 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9185 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9186 TmpInst.addOperand(Inst.getOperand(1)); // lane
9187 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9188 TmpInst.addOperand(Inst.getOperand(6));
9189 Inst = TmpInst;
9190 return true;
9191 }
9192
9193 case ARM::VST2LNdWB_register_Asm_8:
9194 case ARM::VST2LNdWB_register_Asm_16:
9195 case ARM::VST2LNdWB_register_Asm_32:
9196 case ARM::VST2LNqWB_register_Asm_16:
9197 case ARM::VST2LNqWB_register_Asm_32: {
9198 MCInst TmpInst;
9199 // Shuffle the operands around so the lane index operand is in the
9200 // right place.
9201 unsigned Spacing;
9202 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9203 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9204 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9205 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9206 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9207 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9209 Spacing));
9210 TmpInst.addOperand(Inst.getOperand(1)); // lane
9211 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9212 TmpInst.addOperand(Inst.getOperand(6));
9213 Inst = TmpInst;
9214 return true;
9215 }
9216
9217 case ARM::VST3LNdWB_register_Asm_8:
9218 case ARM::VST3LNdWB_register_Asm_16:
9219 case ARM::VST3LNdWB_register_Asm_32:
9220 case ARM::VST3LNqWB_register_Asm_16:
9221 case ARM::VST3LNqWB_register_Asm_32: {
9222 MCInst TmpInst;
9223 // Shuffle the operands around so the lane index operand is in the
9224 // right place.
9225 unsigned Spacing;
9226 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9227 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9228 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9229 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9230 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9231 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9233 Spacing));
9235 Spacing * 2));
9236 TmpInst.addOperand(Inst.getOperand(1)); // lane
9237 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9238 TmpInst.addOperand(Inst.getOperand(6));
9239 Inst = TmpInst;
9240 return true;
9241 }
9242
9243 case ARM::VST4LNdWB_register_Asm_8:
9244 case ARM::VST4LNdWB_register_Asm_16:
9245 case ARM::VST4LNdWB_register_Asm_32:
9246 case ARM::VST4LNqWB_register_Asm_16:
9247 case ARM::VST4LNqWB_register_Asm_32: {
9248 MCInst TmpInst;
9249 // Shuffle the operands around so the lane index operand is in the
9250 // right place.
9251 unsigned Spacing;
9252 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9253 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9254 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9255 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9256 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9257 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9259 Spacing));
9261 Spacing * 2));
9263 Spacing * 3));
9264 TmpInst.addOperand(Inst.getOperand(1)); // lane
9265 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9266 TmpInst.addOperand(Inst.getOperand(6));
9267 Inst = TmpInst;
9268 return true;
9269 }
9270
9271 case ARM::VST1LNdWB_fixed_Asm_8:
9272 case ARM::VST1LNdWB_fixed_Asm_16:
9273 case ARM::VST1LNdWB_fixed_Asm_32: {
9274 MCInst TmpInst;
9275 // Shuffle the operands around so the lane index operand is in the
9276 // right place.
9277 unsigned Spacing;
9278 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9279 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9280 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9281 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9282 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9283 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9284 TmpInst.addOperand(Inst.getOperand(1)); // lane
9285 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9286 TmpInst.addOperand(Inst.getOperand(5));
9287 Inst = TmpInst;
9288 return true;
9289 }
9290
9291 case ARM::VST2LNdWB_fixed_Asm_8:
9292 case ARM::VST2LNdWB_fixed_Asm_16:
9293 case ARM::VST2LNdWB_fixed_Asm_32:
9294 case ARM::VST2LNqWB_fixed_Asm_16:
9295 case ARM::VST2LNqWB_fixed_Asm_32: {
9296 MCInst TmpInst;
9297 // Shuffle the operands around so the lane index operand is in the
9298 // right place.
9299 unsigned Spacing;
9300 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9301 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9302 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9303 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9304 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9305 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9307 Spacing));
9308 TmpInst.addOperand(Inst.getOperand(1)); // lane
9309 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9310 TmpInst.addOperand(Inst.getOperand(5));
9311 Inst = TmpInst;
9312 return true;
9313 }
9314
9315 case ARM::VST3LNdWB_fixed_Asm_8:
9316 case ARM::VST3LNdWB_fixed_Asm_16:
9317 case ARM::VST3LNdWB_fixed_Asm_32:
9318 case ARM::VST3LNqWB_fixed_Asm_16:
9319 case ARM::VST3LNqWB_fixed_Asm_32: {
9320 MCInst TmpInst;
9321 // Shuffle the operands around so the lane index operand is in the
9322 // right place.
9323 unsigned Spacing;
9324 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9325 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9326 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9327 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9328 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9329 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9331 Spacing));
9333 Spacing * 2));
9334 TmpInst.addOperand(Inst.getOperand(1)); // lane
9335 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9336 TmpInst.addOperand(Inst.getOperand(5));
9337 Inst = TmpInst;
9338 return true;
9339 }
9340
9341 case ARM::VST4LNdWB_fixed_Asm_8:
9342 case ARM::VST4LNdWB_fixed_Asm_16:
9343 case ARM::VST4LNdWB_fixed_Asm_32:
9344 case ARM::VST4LNqWB_fixed_Asm_16:
9345 case ARM::VST4LNqWB_fixed_Asm_32: {
9346 MCInst TmpInst;
9347 // Shuffle the operands around so the lane index operand is in the
9348 // right place.
9349 unsigned Spacing;
9350 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9351 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9352 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9353 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9354 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9355 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9357 Spacing));
9359 Spacing * 2));
9361 Spacing * 3));
9362 TmpInst.addOperand(Inst.getOperand(1)); // lane
9363 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9364 TmpInst.addOperand(Inst.getOperand(5));
9365 Inst = TmpInst;
9366 return true;
9367 }
9368
9369 case ARM::VST1LNdAsm_8:
9370 case ARM::VST1LNdAsm_16:
9371 case ARM::VST1LNdAsm_32: {
9372 MCInst TmpInst;
9373 // Shuffle the operands around so the lane index operand is in the
9374 // right place.
9375 unsigned Spacing;
9376 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9377 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9378 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9379 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9380 TmpInst.addOperand(Inst.getOperand(1)); // lane
9381 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9382 TmpInst.addOperand(Inst.getOperand(5));
9383 Inst = TmpInst;
9384 return true;
9385 }
9386
9387 case ARM::VST2LNdAsm_8:
9388 case ARM::VST2LNdAsm_16:
9389 case ARM::VST2LNdAsm_32:
9390 case ARM::VST2LNqAsm_16:
9391 case ARM::VST2LNqAsm_32: {
9392 MCInst TmpInst;
9393 // Shuffle the operands around so the lane index operand is in the
9394 // right place.
9395 unsigned Spacing;
9396 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9397 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9398 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9399 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9401 Spacing));
9402 TmpInst.addOperand(Inst.getOperand(1)); // lane
9403 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9404 TmpInst.addOperand(Inst.getOperand(5));
9405 Inst = TmpInst;
9406 return true;
9407 }
9408
9409 case ARM::VST3LNdAsm_8:
9410 case ARM::VST3LNdAsm_16:
9411 case ARM::VST3LNdAsm_32:
9412 case ARM::VST3LNqAsm_16:
9413 case ARM::VST3LNqAsm_32: {
9414 MCInst TmpInst;
9415 // Shuffle the operands around so the lane index operand is in the
9416 // right place.
9417 unsigned Spacing;
9418 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9419 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9420 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9421 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9423 Spacing));
9425 Spacing * 2));
9426 TmpInst.addOperand(Inst.getOperand(1)); // lane
9427 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9428 TmpInst.addOperand(Inst.getOperand(5));
9429 Inst = TmpInst;
9430 return true;
9431 }
9432
9433 case ARM::VST4LNdAsm_8:
9434 case ARM::VST4LNdAsm_16:
9435 case ARM::VST4LNdAsm_32:
9436 case ARM::VST4LNqAsm_16:
9437 case ARM::VST4LNqAsm_32: {
9438 MCInst TmpInst;
9439 // Shuffle the operands around so the lane index operand is in the
9440 // right place.
9441 unsigned Spacing;
9442 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9443 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9444 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9445 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9447 Spacing));
9449 Spacing * 2));
9451 Spacing * 3));
9452 TmpInst.addOperand(Inst.getOperand(1)); // lane
9453 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9454 TmpInst.addOperand(Inst.getOperand(5));
9455 Inst = TmpInst;
9456 return true;
9457 }
9458
9459 // Handle NEON VLD complex aliases.
9460 case ARM::VLD1LNdWB_register_Asm_8:
9461 case ARM::VLD1LNdWB_register_Asm_16:
9462 case ARM::VLD1LNdWB_register_Asm_32: {
9463 MCInst TmpInst;
9464 // Shuffle the operands around so the lane index operand is in the
9465 // right place.
9466 unsigned Spacing;
9467 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9468 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9469 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9470 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9471 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9472 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9473 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9474 TmpInst.addOperand(Inst.getOperand(1)); // lane
9475 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9476 TmpInst.addOperand(Inst.getOperand(6));
9477 Inst = TmpInst;
9478 return true;
9479 }
9480
9481 case ARM::VLD2LNdWB_register_Asm_8:
9482 case ARM::VLD2LNdWB_register_Asm_16:
9483 case ARM::VLD2LNdWB_register_Asm_32:
9484 case ARM::VLD2LNqWB_register_Asm_16:
9485 case ARM::VLD2LNqWB_register_Asm_32: {
9486 MCInst TmpInst;
9487 // Shuffle the operands around so the lane index operand is in the
9488 // right place.
9489 unsigned Spacing;
9490 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9491 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9493 Spacing));
9494 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9495 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9496 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9497 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9498 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9500 Spacing));
9501 TmpInst.addOperand(Inst.getOperand(1)); // lane
9502 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9503 TmpInst.addOperand(Inst.getOperand(6));
9504 Inst = TmpInst;
9505 return true;
9506 }
9507
9508 case ARM::VLD3LNdWB_register_Asm_8:
9509 case ARM::VLD3LNdWB_register_Asm_16:
9510 case ARM::VLD3LNdWB_register_Asm_32:
9511 case ARM::VLD3LNqWB_register_Asm_16:
9512 case ARM::VLD3LNqWB_register_Asm_32: {
9513 MCInst TmpInst;
9514 // Shuffle the operands around so the lane index operand is in the
9515 // right place.
9516 unsigned Spacing;
9517 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9518 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9520 Spacing));
9522 Spacing * 2));
9523 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9524 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9525 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9526 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9527 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9529 Spacing));
9531 Spacing * 2));
9532 TmpInst.addOperand(Inst.getOperand(1)); // lane
9533 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9534 TmpInst.addOperand(Inst.getOperand(6));
9535 Inst = TmpInst;
9536 return true;
9537 }
9538
9539 case ARM::VLD4LNdWB_register_Asm_8:
9540 case ARM::VLD4LNdWB_register_Asm_16:
9541 case ARM::VLD4LNdWB_register_Asm_32:
9542 case ARM::VLD4LNqWB_register_Asm_16:
9543 case ARM::VLD4LNqWB_register_Asm_32: {
9544 MCInst TmpInst;
9545 // Shuffle the operands around so the lane index operand is in the
9546 // right place.
9547 unsigned Spacing;
9548 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9549 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9551 Spacing));
9553 Spacing * 2));
9555 Spacing * 3));
9556 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9557 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9558 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9559 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9560 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9562 Spacing));
9564 Spacing * 2));
9566 Spacing * 3));
9567 TmpInst.addOperand(Inst.getOperand(1)); // lane
9568 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9569 TmpInst.addOperand(Inst.getOperand(6));
9570 Inst = TmpInst;
9571 return true;
9572 }
9573
9574 case ARM::VLD1LNdWB_fixed_Asm_8:
9575 case ARM::VLD1LNdWB_fixed_Asm_16:
9576 case ARM::VLD1LNdWB_fixed_Asm_32: {
9577 MCInst TmpInst;
9578 // Shuffle the operands around so the lane index operand is in the
9579 // right place.
9580 unsigned Spacing;
9581 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9582 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9583 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9584 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9585 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9586 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9587 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9588 TmpInst.addOperand(Inst.getOperand(1)); // lane
9589 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9590 TmpInst.addOperand(Inst.getOperand(5));
9591 Inst = TmpInst;
9592 return true;
9593 }
9594
9595 case ARM::VLD2LNdWB_fixed_Asm_8:
9596 case ARM::VLD2LNdWB_fixed_Asm_16:
9597 case ARM::VLD2LNdWB_fixed_Asm_32:
9598 case ARM::VLD2LNqWB_fixed_Asm_16:
9599 case ARM::VLD2LNqWB_fixed_Asm_32: {
9600 MCInst TmpInst;
9601 // Shuffle the operands around so the lane index operand is in the
9602 // right place.
9603 unsigned Spacing;
9604 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9605 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9607 Spacing));
9608 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9609 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9610 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9611 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9612 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9614 Spacing));
9615 TmpInst.addOperand(Inst.getOperand(1)); // lane
9616 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9617 TmpInst.addOperand(Inst.getOperand(5));
9618 Inst = TmpInst;
9619 return true;
9620 }
9621
9622 case ARM::VLD3LNdWB_fixed_Asm_8:
9623 case ARM::VLD3LNdWB_fixed_Asm_16:
9624 case ARM::VLD3LNdWB_fixed_Asm_32:
9625 case ARM::VLD3LNqWB_fixed_Asm_16:
9626 case ARM::VLD3LNqWB_fixed_Asm_32: {
9627 MCInst TmpInst;
9628 // Shuffle the operands around so the lane index operand is in the
9629 // right place.
9630 unsigned Spacing;
9631 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9632 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9634 Spacing));
9636 Spacing * 2));
9637 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9638 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9639 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9640 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9641 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9643 Spacing));
9645 Spacing * 2));
9646 TmpInst.addOperand(Inst.getOperand(1)); // lane
9647 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9648 TmpInst.addOperand(Inst.getOperand(5));
9649 Inst = TmpInst;
9650 return true;
9651 }
9652
9653 case ARM::VLD4LNdWB_fixed_Asm_8:
9654 case ARM::VLD4LNdWB_fixed_Asm_16:
9655 case ARM::VLD4LNdWB_fixed_Asm_32:
9656 case ARM::VLD4LNqWB_fixed_Asm_16:
9657 case ARM::VLD4LNqWB_fixed_Asm_32: {
9658 MCInst TmpInst;
9659 // Shuffle the operands around so the lane index operand is in the
9660 // right place.
9661 unsigned Spacing;
9662 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9663 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9665 Spacing));
9667 Spacing * 2));
9669 Spacing * 3));
9670 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9671 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9672 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9673 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9674 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9676 Spacing));
9678 Spacing * 2));
9680 Spacing * 3));
9681 TmpInst.addOperand(Inst.getOperand(1)); // lane
9682 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9683 TmpInst.addOperand(Inst.getOperand(5));
9684 Inst = TmpInst;
9685 return true;
9686 }
9687
9688 case ARM::VLD1LNdAsm_8:
9689 case ARM::VLD1LNdAsm_16:
9690 case ARM::VLD1LNdAsm_32: {
9691 MCInst TmpInst;
9692 // Shuffle the operands around so the lane index operand is in the
9693 // right place.
9694 unsigned Spacing;
9695 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9696 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9697 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9698 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9699 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9700 TmpInst.addOperand(Inst.getOperand(1)); // lane
9701 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9702 TmpInst.addOperand(Inst.getOperand(5));
9703 Inst = TmpInst;
9704 return true;
9705 }
9706
9707 case ARM::VLD2LNdAsm_8:
9708 case ARM::VLD2LNdAsm_16:
9709 case ARM::VLD2LNdAsm_32:
9710 case ARM::VLD2LNqAsm_16:
9711 case ARM::VLD2LNqAsm_32: {
9712 MCInst TmpInst;
9713 // Shuffle the operands around so the lane index operand is in the
9714 // right place.
9715 unsigned Spacing;
9716 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9717 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9719 Spacing));
9720 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9721 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9722 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9724 Spacing));
9725 TmpInst.addOperand(Inst.getOperand(1)); // lane
9726 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9727 TmpInst.addOperand(Inst.getOperand(5));
9728 Inst = TmpInst;
9729 return true;
9730 }
9731
9732 case ARM::VLD3LNdAsm_8:
9733 case ARM::VLD3LNdAsm_16:
9734 case ARM::VLD3LNdAsm_32:
9735 case ARM::VLD3LNqAsm_16:
9736 case ARM::VLD3LNqAsm_32: {
9737 MCInst TmpInst;
9738 // Shuffle the operands around so the lane index operand is in the
9739 // right place.
9740 unsigned Spacing;
9741 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9742 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9744 Spacing));
9746 Spacing * 2));
9747 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9748 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9749 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9751 Spacing));
9753 Spacing * 2));
9754 TmpInst.addOperand(Inst.getOperand(1)); // lane
9755 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9756 TmpInst.addOperand(Inst.getOperand(5));
9757 Inst = TmpInst;
9758 return true;
9759 }
9760
9761 case ARM::VLD4LNdAsm_8:
9762 case ARM::VLD4LNdAsm_16:
9763 case ARM::VLD4LNdAsm_32:
9764 case ARM::VLD4LNqAsm_16:
9765 case ARM::VLD4LNqAsm_32: {
9766 MCInst TmpInst;
9767 // Shuffle the operands around so the lane index operand is in the
9768 // right place.
9769 unsigned Spacing;
9770 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9771 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9773 Spacing));
9775 Spacing * 2));
9777 Spacing * 3));
9778 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9779 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9780 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9782 Spacing));
9784 Spacing * 2));
9786 Spacing * 3));
9787 TmpInst.addOperand(Inst.getOperand(1)); // lane
9788 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9789 TmpInst.addOperand(Inst.getOperand(5));
9790 Inst = TmpInst;
9791 return true;
9792 }
9793
9794 // VLD3DUP single 3-element structure to all lanes instructions.
9795 case ARM::VLD3DUPdAsm_8:
9796 case ARM::VLD3DUPdAsm_16:
9797 case ARM::VLD3DUPdAsm_32:
9798 case ARM::VLD3DUPqAsm_8:
9799 case ARM::VLD3DUPqAsm_16:
9800 case ARM::VLD3DUPqAsm_32: {
9801 MCInst TmpInst;
9802 unsigned Spacing;
9803 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9804 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9806 Spacing));
9808 Spacing * 2));
9809 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9810 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9811 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9812 TmpInst.addOperand(Inst.getOperand(4));
9813 Inst = TmpInst;
9814 return true;
9815 }
9816
9817 case ARM::VLD3DUPdWB_fixed_Asm_8:
9818 case ARM::VLD3DUPdWB_fixed_Asm_16:
9819 case ARM::VLD3DUPdWB_fixed_Asm_32:
9820 case ARM::VLD3DUPqWB_fixed_Asm_8:
9821 case ARM::VLD3DUPqWB_fixed_Asm_16:
9822 case ARM::VLD3DUPqWB_fixed_Asm_32: {
9823 MCInst TmpInst;
9824 unsigned Spacing;
9825 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9826 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9828 Spacing));
9830 Spacing * 2));
9831 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9832 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9833 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9834 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9835 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9836 TmpInst.addOperand(Inst.getOperand(4));
9837 Inst = TmpInst;
9838 return true;
9839 }
9840
9841 case ARM::VLD3DUPdWB_register_Asm_8:
9842 case ARM::VLD3DUPdWB_register_Asm_16:
9843 case ARM::VLD3DUPdWB_register_Asm_32:
9844 case ARM::VLD3DUPqWB_register_Asm_8:
9845 case ARM::VLD3DUPqWB_register_Asm_16:
9846 case ARM::VLD3DUPqWB_register_Asm_32: {
9847 MCInst TmpInst;
9848 unsigned Spacing;
9849 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9850 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9852 Spacing));
9854 Spacing * 2));
9855 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9856 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9857 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9858 TmpInst.addOperand(Inst.getOperand(3)); // Rm
9859 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9860 TmpInst.addOperand(Inst.getOperand(5));
9861 Inst = TmpInst;
9862 return true;
9863 }
9864
9865 // VLD3 multiple 3-element structure instructions.
9866 case ARM::VLD3dAsm_8:
9867 case ARM::VLD3dAsm_16:
9868 case ARM::VLD3dAsm_32:
9869 case ARM::VLD3qAsm_8:
9870 case ARM::VLD3qAsm_16:
9871 case ARM::VLD3qAsm_32: {
9872 MCInst TmpInst;
9873 unsigned Spacing;
9874 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9875 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9877 Spacing));
9879 Spacing * 2));
9880 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9881 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9882 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9883 TmpInst.addOperand(Inst.getOperand(4));
9884 Inst = TmpInst;
9885 return true;
9886 }
9887
9888 case ARM::VLD3dWB_fixed_Asm_8:
9889 case ARM::VLD3dWB_fixed_Asm_16:
9890 case ARM::VLD3dWB_fixed_Asm_32:
9891 case ARM::VLD3qWB_fixed_Asm_8:
9892 case ARM::VLD3qWB_fixed_Asm_16:
9893 case ARM::VLD3qWB_fixed_Asm_32: {
9894 MCInst TmpInst;
9895 unsigned Spacing;
9896 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9897 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9899 Spacing));
9901 Spacing * 2));
9902 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9903 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9904 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9905 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9906 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9907 TmpInst.addOperand(Inst.getOperand(4));
9908 Inst = TmpInst;
9909 return true;
9910 }
9911
9912 case ARM::VLD3dWB_register_Asm_8:
9913 case ARM::VLD3dWB_register_Asm_16:
9914 case ARM::VLD3dWB_register_Asm_32:
9915 case ARM::VLD3qWB_register_Asm_8:
9916 case ARM::VLD3qWB_register_Asm_16:
9917 case ARM::VLD3qWB_register_Asm_32: {
9918 MCInst TmpInst;
9919 unsigned Spacing;
9920 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9921 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9923 Spacing));
9925 Spacing * 2));
9926 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9927 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9928 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9929 TmpInst.addOperand(Inst.getOperand(3)); // Rm
9930 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9931 TmpInst.addOperand(Inst.getOperand(5));
9932 Inst = TmpInst;
9933 return true;
9934 }
9935
9936 // VLD4DUP single 3-element structure to all lanes instructions.
9937 case ARM::VLD4DUPdAsm_8:
9938 case ARM::VLD4DUPdAsm_16:
9939 case ARM::VLD4DUPdAsm_32:
9940 case ARM::VLD4DUPqAsm_8:
9941 case ARM::VLD4DUPqAsm_16:
9942 case ARM::VLD4DUPqAsm_32: {
9943 MCInst TmpInst;
9944 unsigned Spacing;
9945 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9946 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9948 Spacing));
9950 Spacing * 2));
9952 Spacing * 3));
9953 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9954 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9955 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9956 TmpInst.addOperand(Inst.getOperand(4));
9957 Inst = TmpInst;
9958 return true;
9959 }
9960
9961 case ARM::VLD4DUPdWB_fixed_Asm_8:
9962 case ARM::VLD4DUPdWB_fixed_Asm_16:
9963 case ARM::VLD4DUPdWB_fixed_Asm_32:
9964 case ARM::VLD4DUPqWB_fixed_Asm_8:
9965 case ARM::VLD4DUPqWB_fixed_Asm_16:
9966 case ARM::VLD4DUPqWB_fixed_Asm_32: {
9967 MCInst TmpInst;
9968 unsigned Spacing;
9969 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9970 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9972 Spacing));
9974 Spacing * 2));
9976 Spacing * 3));
9977 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9978 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9979 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9980 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9981 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9982 TmpInst.addOperand(Inst.getOperand(4));
9983 Inst = TmpInst;
9984 return true;
9985 }
9986
9987 case ARM::VLD4DUPdWB_register_Asm_8:
9988 case ARM::VLD4DUPdWB_register_Asm_16:
9989 case ARM::VLD4DUPdWB_register_Asm_32:
9990 case ARM::VLD4DUPqWB_register_Asm_8:
9991 case ARM::VLD4DUPqWB_register_Asm_16:
9992 case ARM::VLD4DUPqWB_register_Asm_32: {
9993 MCInst TmpInst;
9994 unsigned Spacing;
9995 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9996 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9998 Spacing));
10000 Spacing * 2));
10002 Spacing * 3));
10003 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10004 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10005 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10006 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10007 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10008 TmpInst.addOperand(Inst.getOperand(5));
10009 Inst = TmpInst;
10010 return true;
10011 }
10012
10013 // VLD4 multiple 4-element structure instructions.
10014 case ARM::VLD4dAsm_8:
10015 case ARM::VLD4dAsm_16:
10016 case ARM::VLD4dAsm_32:
10017 case ARM::VLD4qAsm_8:
10018 case ARM::VLD4qAsm_16:
10019 case ARM::VLD4qAsm_32: {
10020 MCInst TmpInst;
10021 unsigned Spacing;
10022 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10023 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10025 Spacing));
10027 Spacing * 2));
10029 Spacing * 3));
10030 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10031 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10032 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10033 TmpInst.addOperand(Inst.getOperand(4));
10034 Inst = TmpInst;
10035 return true;
10036 }
10037
10038 case ARM::VLD4dWB_fixed_Asm_8:
10039 case ARM::VLD4dWB_fixed_Asm_16:
10040 case ARM::VLD4dWB_fixed_Asm_32:
10041 case ARM::VLD4qWB_fixed_Asm_8:
10042 case ARM::VLD4qWB_fixed_Asm_16:
10043 case ARM::VLD4qWB_fixed_Asm_32: {
10044 MCInst TmpInst;
10045 unsigned Spacing;
10046 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10047 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10049 Spacing));
10051 Spacing * 2));
10053 Spacing * 3));
10054 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10055 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10056 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10057 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10058 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10059 TmpInst.addOperand(Inst.getOperand(4));
10060 Inst = TmpInst;
10061 return true;
10062 }
10063
10064 case ARM::VLD4dWB_register_Asm_8:
10065 case ARM::VLD4dWB_register_Asm_16:
10066 case ARM::VLD4dWB_register_Asm_32:
10067 case ARM::VLD4qWB_register_Asm_8:
10068 case ARM::VLD4qWB_register_Asm_16:
10069 case ARM::VLD4qWB_register_Asm_32: {
10070 MCInst TmpInst;
10071 unsigned Spacing;
10072 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10073 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10075 Spacing));
10077 Spacing * 2));
10079 Spacing * 3));
10080 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10081 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10082 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10083 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10084 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10085 TmpInst.addOperand(Inst.getOperand(5));
10086 Inst = TmpInst;
10087 return true;
10088 }
10089
10090 // VST3 multiple 3-element structure instructions.
10091 case ARM::VST3dAsm_8:
10092 case ARM::VST3dAsm_16:
10093 case ARM::VST3dAsm_32:
10094 case ARM::VST3qAsm_8:
10095 case ARM::VST3qAsm_16:
10096 case ARM::VST3qAsm_32: {
10097 MCInst TmpInst;
10098 unsigned Spacing;
10099 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10100 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10101 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10102 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10104 Spacing));
10106 Spacing * 2));
10107 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10108 TmpInst.addOperand(Inst.getOperand(4));
10109 Inst = TmpInst;
10110 return true;
10111 }
10112
10113 case ARM::VST3dWB_fixed_Asm_8:
10114 case ARM::VST3dWB_fixed_Asm_16:
10115 case ARM::VST3dWB_fixed_Asm_32:
10116 case ARM::VST3qWB_fixed_Asm_8:
10117 case ARM::VST3qWB_fixed_Asm_16:
10118 case ARM::VST3qWB_fixed_Asm_32: {
10119 MCInst TmpInst;
10120 unsigned Spacing;
10121 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10122 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10123 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10124 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10125 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10126 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10128 Spacing));
10130 Spacing * 2));
10131 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10132 TmpInst.addOperand(Inst.getOperand(4));
10133 Inst = TmpInst;
10134 return true;
10135 }
10136
10137 case ARM::VST3dWB_register_Asm_8:
10138 case ARM::VST3dWB_register_Asm_16:
10139 case ARM::VST3dWB_register_Asm_32:
10140 case ARM::VST3qWB_register_Asm_8:
10141 case ARM::VST3qWB_register_Asm_16:
10142 case ARM::VST3qWB_register_Asm_32: {
10143 MCInst TmpInst;
10144 unsigned Spacing;
10145 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10146 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10147 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10148 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10149 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10150 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10152 Spacing));
10154 Spacing * 2));
10155 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10156 TmpInst.addOperand(Inst.getOperand(5));
10157 Inst = TmpInst;
10158 return true;
10159 }
10160
10161 // VST4 multiple 3-element structure instructions.
10162 case ARM::VST4dAsm_8:
10163 case ARM::VST4dAsm_16:
10164 case ARM::VST4dAsm_32:
10165 case ARM::VST4qAsm_8:
10166 case ARM::VST4qAsm_16:
10167 case ARM::VST4qAsm_32: {
10168 MCInst TmpInst;
10169 unsigned Spacing;
10170 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10171 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10172 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10173 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10175 Spacing));
10177 Spacing * 2));
10179 Spacing * 3));
10180 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10181 TmpInst.addOperand(Inst.getOperand(4));
10182 Inst = TmpInst;
10183 return true;
10184 }
10185
10186 case ARM::VST4dWB_fixed_Asm_8:
10187 case ARM::VST4dWB_fixed_Asm_16:
10188 case ARM::VST4dWB_fixed_Asm_32:
10189 case ARM::VST4qWB_fixed_Asm_8:
10190 case ARM::VST4qWB_fixed_Asm_16:
10191 case ARM::VST4qWB_fixed_Asm_32: {
10192 MCInst TmpInst;
10193 unsigned Spacing;
10194 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10195 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10196 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10197 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10198 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10199 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10201 Spacing));
10203 Spacing * 2));
10205 Spacing * 3));
10206 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10207 TmpInst.addOperand(Inst.getOperand(4));
10208 Inst = TmpInst;
10209 return true;
10210 }
10211
10212 case ARM::VST4dWB_register_Asm_8:
10213 case ARM::VST4dWB_register_Asm_16:
10214 case ARM::VST4dWB_register_Asm_32:
10215 case ARM::VST4qWB_register_Asm_8:
10216 case ARM::VST4qWB_register_Asm_16:
10217 case ARM::VST4qWB_register_Asm_32: {
10218 MCInst TmpInst;
10219 unsigned Spacing;
10220 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10221 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10222 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10223 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10224 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10225 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10227 Spacing));
10229 Spacing * 2));
10231 Spacing * 3));
10232 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10233 TmpInst.addOperand(Inst.getOperand(5));
10234 Inst = TmpInst;
10235 return true;
10236 }
10237
10238 // Handle encoding choice for the shift-immediate instructions.
10239 case ARM::t2LSLri:
10240 case ARM::t2LSRri:
10241 case ARM::t2ASRri:
10242 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10243 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10244 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10245 !HasWideQualifier) {
10246 unsigned NewOpc;
10247 switch (Inst.getOpcode()) {
10248 default: llvm_unreachable("unexpected opcode");
10249 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
10250 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
10251 case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
10252 }
10253 // The Thumb1 operands aren't in the same order. Awesome, eh?
10254 MCInst TmpInst;
10255 TmpInst.setOpcode(NewOpc);
10256 TmpInst.addOperand(Inst.getOperand(0));
10257 TmpInst.addOperand(Inst.getOperand(5));
10258 TmpInst.addOperand(Inst.getOperand(1));
10259 TmpInst.addOperand(Inst.getOperand(2));
10260 TmpInst.addOperand(Inst.getOperand(3));
10261 TmpInst.addOperand(Inst.getOperand(4));
10262 Inst = TmpInst;
10263 return true;
10264 }
10265 return false;
10266
10267 // Handle the Thumb2 mode MOV complex aliases.
10268 case ARM::t2MOVsr:
10269 case ARM::t2MOVSsr: {
10270 // Which instruction to expand to depends on the CCOut operand and
10271 // whether we're in an IT block if the register operands are low
10272 // registers.
10273 bool isNarrow = false;
10274 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10275 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10276 isARMLowRegister(Inst.getOperand(2).getReg()) &&
10277 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10278 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
10279 !HasWideQualifier)
10280 isNarrow = true;
10281 MCInst TmpInst;
10282 unsigned newOpc;
10283 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
10284 default: llvm_unreachable("unexpected opcode!");
10285 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
10286 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
10287 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
10288 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
10289 }
10290 TmpInst.setOpcode(newOpc);
10291 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10292 if (isNarrow)
10294 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10295 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10296 TmpInst.addOperand(Inst.getOperand(2)); // Rm
10297 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10298 TmpInst.addOperand(Inst.getOperand(5));
10299 if (!isNarrow)
10301 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10302 Inst = TmpInst;
10303 return true;
10304 }
10305 case ARM::t2MOVsi:
10306 case ARM::t2MOVSsi: {
10307 // Which instruction to expand to depends on the CCOut operand and
10308 // whether we're in an IT block if the register operands are low
10309 // registers.
10310 bool isNarrow = false;
10311 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10312 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10313 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
10314 !HasWideQualifier)
10315 isNarrow = true;
10316 MCInst TmpInst;
10317 unsigned newOpc;
10318 unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10319 unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
10320 bool isMov = false;
10321 // MOV rd, rm, LSL #0 is actually a MOV instruction
10322 if (Shift == ARM_AM::lsl && Amount == 0) {
10323 isMov = true;
10324 // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
10325 // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
10326 // unpredictable in an IT block so the 32-bit encoding T3 has to be used
10327 // instead.
10328 if (inITBlock()) {
10329 isNarrow = false;
10330 }
10331 newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10332 } else {
10333 switch(Shift) {
10334 default: llvm_unreachable("unexpected opcode!");
10335 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
10336 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
10337 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
10338 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
10339 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
10340 }
10341 }
10342 if (Amount == 32) Amount = 0;
10343 TmpInst.setOpcode(newOpc);
10344 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10345 if (isNarrow && !isMov)
10347 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10348 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10349 if (newOpc != ARM::t2RRX && !isMov)
10350 TmpInst.addOperand(MCOperand::createImm(Amount));
10351 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10352 TmpInst.addOperand(Inst.getOperand(4));
10353 if (!isNarrow)
10355 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10356 Inst = TmpInst;
10357 return true;
10358 }
10359 // Handle the ARM mode MOV complex aliases.
10360 case ARM::ASRr:
10361 case ARM::LSRr:
10362 case ARM::LSLr:
10363 case ARM::RORr: {
10364 ARM_AM::ShiftOpc ShiftTy;
10365 switch(Inst.getOpcode()) {
10366 default: llvm_unreachable("unexpected opcode!");
10367 case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
10368 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
10369 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
10370 case ARM::RORr: ShiftTy = ARM_AM::ror; break;
10371 }
10372 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
10373 MCInst TmpInst;
10374 TmpInst.setOpcode(ARM::MOVsr);
10375 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10376 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10377 TmpInst.addOperand(Inst.getOperand(2)); // Rm
10378 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10379 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10380 TmpInst.addOperand(Inst.getOperand(4));
10381 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10382 Inst = TmpInst;
10383 return true;
10384 }
10385 case ARM::ASRi:
10386 case ARM::LSRi:
10387 case ARM::LSLi:
10388 case ARM::RORi: {
10389 ARM_AM::ShiftOpc ShiftTy;
10390 switch(Inst.getOpcode()) {
10391 default: llvm_unreachable("unexpected opcode!");
10392 case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
10393 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
10394 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
10395 case ARM::RORi: ShiftTy = ARM_AM::ror; break;
10396 }
10397 // A shift by zero is a plain MOVr, not a MOVsi.
10398 unsigned Amt = Inst.getOperand(2).getImm();
10399 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10400 // A shift by 32 should be encoded as 0 when permitted
10401 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
10402 Amt = 0;
10403 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
10404 MCInst TmpInst;
10405 TmpInst.setOpcode(Opc);
10406 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10407 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10408 if (Opc == ARM::MOVsi)
10409 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10410 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10411 TmpInst.addOperand(Inst.getOperand(4));
10412 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10413 Inst = TmpInst;
10414 return true;
10415 }
10416 case ARM::RRXi: {
10417 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
10418 MCInst TmpInst;
10419 TmpInst.setOpcode(ARM::MOVsi);
10420 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10421 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10422 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10423 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10424 TmpInst.addOperand(Inst.getOperand(3));
10425 TmpInst.addOperand(Inst.getOperand(4)); // cc_out
10426 Inst = TmpInst;
10427 return true;
10428 }
10429 case ARM::t2LDMIA_UPD: {
10430 // If this is a load of a single register, then we should use
10431 // a post-indexed LDR instruction instead, per the ARM ARM.
10432 if (Inst.getNumOperands() != 5)
10433 return false;
10434 MCInst TmpInst;
10435 TmpInst.setOpcode(ARM::t2LDR_POST);
10436 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10437 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10438 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10439 TmpInst.addOperand(MCOperand::createImm(4));
10440 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10441 TmpInst.addOperand(Inst.getOperand(3));
10442 Inst = TmpInst;
10443 return true;
10444 }
10445 case ARM::t2STMDB_UPD: {
10446 // If this is a store of a single register, then we should use
10447 // a pre-indexed STR instruction instead, per the ARM ARM.
10448 if (Inst.getNumOperands() != 5)
10449 return false;
10450 MCInst TmpInst;
10451 TmpInst.setOpcode(ARM::t2STR_PRE);
10452 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10453 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10454 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10455 TmpInst.addOperand(MCOperand::createImm(-4));
10456 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10457 TmpInst.addOperand(Inst.getOperand(3));
10458 Inst = TmpInst;
10459 return true;
10460 }
10461 case ARM::LDMIA_UPD:
10462 // If this is a load of a single register via a 'pop', then we should use
10463 // a post-indexed LDR instruction instead, per the ARM ARM.
10464 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
10465 Inst.getNumOperands() == 5) {
10466 MCInst TmpInst;
10467 TmpInst.setOpcode(ARM::LDR_POST_IMM);
10468 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10469 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10470 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10471 TmpInst.addOperand(MCOperand::createReg(0)); // am2offset
10472 TmpInst.addOperand(MCOperand::createImm(4));
10473 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10474 TmpInst.addOperand(Inst.getOperand(3));
10475 Inst = TmpInst;
10476 return true;
10477 }
10478 break;
10479 case ARM::STMDB_UPD:
10480 // If this is a store of a single register via a 'push', then we should use
10481 // a pre-indexed STR instruction instead, per the ARM ARM.
10482 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
10483 Inst.getNumOperands() == 5) {
10484 MCInst TmpInst;
10485 TmpInst.setOpcode(ARM::STR_PRE_IMM);
10486 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10487 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10488 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
10489 TmpInst.addOperand(MCOperand::createImm(-4));
10490 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10491 TmpInst.addOperand(Inst.getOperand(3));
10492 Inst = TmpInst;
10493 }
10494 break;
10495 case ARM::t2ADDri12:
10496 case ARM::t2SUBri12:
10497 case ARM::t2ADDspImm12:
10498 case ARM::t2SUBspImm12: {
10499 // If the immediate fits for encoding T3 and the generic
10500 // mnemonic was used, encoding T3 is preferred.
10501 const StringRef Token = static_cast<ARMOperand &>(*Operands[0]).getToken();
10502 if ((Token != "add" && Token != "sub") ||
10503 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
10504 break;
10505 switch (Inst.getOpcode()) {
10506 case ARM::t2ADDri12:
10507 Inst.setOpcode(ARM::t2ADDri);
10508 break;
10509 case ARM::t2SUBri12:
10510 Inst.setOpcode(ARM::t2SUBri);
10511 break;
10512 case ARM::t2ADDspImm12:
10513 Inst.setOpcode(ARM::t2ADDspImm);
10514 break;
10515 case ARM::t2SUBspImm12:
10516 Inst.setOpcode(ARM::t2SUBspImm);
10517 break;
10518 }
10519
10520 Inst.addOperand(MCOperand::createReg(0)); // cc_out
10521 return true;
10522 }
10523 case ARM::tADDi8:
10524 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10525 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10526 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10527 // to encoding T1 if <Rd> is omitted."
10528 if (Inst.getOperand(3).isImm() &&
10529 (unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10530 Inst.setOpcode(ARM::tADDi3);
10531 return true;
10532 }
10533 break;
10534 case ARM::tSUBi8:
10535 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10536 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10537 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10538 // to encoding T1 if <Rd> is omitted."
10539 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
10540 Inst.setOpcode(ARM::tSUBi3);
10541 return true;
10542 }
10543 break;
10544 case ARM::t2ADDri:
10545 case ARM::t2SUBri: {
10546 // If the destination and first source operand are the same, and
10547 // the flags are compatible with the current IT status, use encoding T2
10548 // instead of T3. For compatibility with the system 'as'. Make sure the
10549 // wide encoding wasn't explicit.
10550 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
10551 !isARMLowRegister(Inst.getOperand(0).getReg()) ||
10552 (Inst.getOperand(2).isImm() &&
10553 (unsigned)Inst.getOperand(2).getImm() > 255) ||
10554 Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
10555 HasWideQualifier)
10556 break;
10557 MCInst TmpInst;
10558 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
10559 ARM::tADDi8 : ARM::tSUBi8);
10560 TmpInst.addOperand(Inst.getOperand(0));
10561 TmpInst.addOperand(Inst.getOperand(5));
10562 TmpInst.addOperand(Inst.getOperand(0));
10563 TmpInst.addOperand(Inst.getOperand(2));
10564 TmpInst.addOperand(Inst.getOperand(3));
10565 TmpInst.addOperand(Inst.getOperand(4));
10566 Inst = TmpInst;
10567 return true;
10568 }
10569 case ARM::t2ADDspImm:
10570 case ARM::t2SUBspImm: {
10571 // Prefer T1 encoding if possible
10572 if (Inst.getOperand(5).getReg() != 0 || HasWideQualifier)
10573 break;
10574 unsigned V = Inst.getOperand(2).getImm();
10575 if (V & 3 || V > ((1 << 7) - 1) << 2)
10576 break;
10577 MCInst TmpInst;
10578 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDspImm ? ARM::tADDspi
10579 : ARM::tSUBspi);
10580 TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // destination reg
10581 TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // source reg
10582 TmpInst.addOperand(MCOperand::createImm(V / 4)); // immediate
10583 TmpInst.addOperand(Inst.getOperand(3)); // pred
10584 TmpInst.addOperand(Inst.getOperand(4));
10585 Inst = TmpInst;
10586 return true;
10587 }
10588 case ARM::t2ADDrr: {
10589 // If the destination and first source operand are the same, and
10590 // there's no setting of the flags, use encoding T2 instead of T3.
10591 // Note that this is only for ADD, not SUB. This mirrors the system
10592 // 'as' behaviour. Also take advantage of ADD being commutative.
10593 // Make sure the wide encoding wasn't explicit.
10594 bool Swap = false;
10595 auto DestReg = Inst.getOperand(0).getReg();
10596 bool Transform = DestReg == Inst.getOperand(1).getReg();
10597 if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
10598 Transform = true;
10599 Swap = true;
10600 }
10601 if (!Transform ||
10602 Inst.getOperand(5).getReg() != 0 ||
10603 HasWideQualifier)
10604 break;
10605 MCInst TmpInst;
10606 TmpInst.setOpcode(ARM::tADDhirr);
10607 TmpInst.addOperand(Inst.getOperand(0));
10608 TmpInst.addOperand(Inst.getOperand(0));
10609 TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
10610 TmpInst.addOperand(Inst.getOperand(3));
10611 TmpInst.addOperand(Inst.getOperand(4));
10612 Inst = TmpInst;
10613 return true;
10614 }
10615 case ARM::tADDrSP:
10616 // If the non-SP source operand and the destination operand are not the
10617 // same, we need to use the 32-bit encoding if it's available.
10618 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
10619 Inst.setOpcode(ARM::t2ADDrr);
10620 Inst.addOperand(MCOperand::createReg(0)); // cc_out
10621 return true;
10622 }
10623 break;
10624 case ARM::tB:
10625 // A Thumb conditional branch outside of an IT block is a tBcc.
10626 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
10627 Inst.setOpcode(ARM::tBcc);
10628 return true;
10629 }
10630 break;
10631 case ARM::t2B:
10632 // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
10633 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
10634 Inst.setOpcode(ARM::t2Bcc);
10635 return true;
10636 }
10637 break;
10638 case ARM::t2Bcc:
10639 // If the conditional is AL or we're in an IT block, we really want t2B.
10640 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
10641 Inst.setOpcode(ARM::t2B);
10642 return true;
10643 }
10644 break;
10645 case ARM::tBcc:
10646 // If the conditional is AL, we really want tB.
10647 if (Inst.getOperand(1).getImm() == ARMCC::AL) {
10648 Inst.setOpcode(ARM::tB);
10649 return true;
10650 }
10651 break;
10652 case ARM::tLDMIA: {
10653 // If the register list contains any high registers, or if the writeback
10654 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
10655 // instead if we're in Thumb2. Otherwise, this should have generated
10656 // an error in validateInstruction().
10657 unsigned Rn = Inst.getOperand(0).getReg();
10658 bool hasWritebackToken =
10659 (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
10660 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
10661 bool listContainsBase;
10662 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
10663 (!listContainsBase && !hasWritebackToken) ||
10664 (listContainsBase && hasWritebackToken)) {
10665 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10666 assert(isThumbTwo());
10667 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10668 // If we're switching to the updating version, we need to insert
10669 // the writeback tied operand.
10670 if (hasWritebackToken)
10671 Inst.insert(Inst.begin(),
10673 return true;
10674 }
10675 break;
10676 }
10677 case ARM::tSTMIA_UPD: {
10678 // If the register list contains any high registers, we need to use
10679 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10680 // should have generated an error in validateInstruction().
10681 unsigned Rn = Inst.getOperand(0).getReg();
10682 bool listContainsBase;
10683 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
10684 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10685 assert(isThumbTwo());
10686 Inst.setOpcode(ARM::t2STMIA_UPD);
10687 return true;
10688 }
10689 break;
10690 }
10691 case ARM::tPOP: {
10692 bool listContainsBase;
10693 // If the register list contains any high registers, we need to use
10694 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10695 // should have generated an error in validateInstruction().
10696 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
10697 return false;
10698 assert(isThumbTwo());
10699 Inst.setOpcode(ARM::t2LDMIA_UPD);
10700 // Add the base register and writeback operands.
10701 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10702 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10703 return true;
10704 }
10705 case ARM::tPUSH: {
10706 bool listContainsBase;
10707 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
10708 return false;
10709 assert(isThumbTwo());
10710 Inst.setOpcode(ARM::t2STMDB_UPD);
10711 // Add the base register and writeback operands.
10712 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10713 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10714 return true;
10715 }
10716 case ARM::t2MOVi:
10717 // If we can use the 16-bit encoding and the user didn't explicitly
10718 // request the 32-bit variant, transform it here.
10719 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10720 (Inst.getOperand(1).isImm() &&
10721 (unsigned)Inst.getOperand(1).getImm() <= 255) &&
10722 Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10723 !HasWideQualifier) {
10724 // The operands aren't in the same order for tMOVi8...
10725 MCInst TmpInst;
10726 TmpInst.setOpcode(ARM::tMOVi8);
10727 TmpInst.addOperand(Inst.getOperand(0));
10728 TmpInst.addOperand(Inst.getOperand(4));
10729 TmpInst.addOperand(Inst.getOperand(1));
10730 TmpInst.addOperand(Inst.getOperand(2));
10731 TmpInst.addOperand(Inst.getOperand(3));
10732 Inst = TmpInst;
10733 return true;
10734 }
10735 break;
10736
10737 case ARM::t2MOVr:
10738 // If we can use the 16-bit encoding and the user didn't explicitly
10739 // request the 32-bit variant, transform it here.
10740 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10741 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10742 Inst.getOperand(2).getImm() == ARMCC::AL &&
10743 Inst.getOperand(4).getReg() == ARM::CPSR &&
10744 !HasWideQualifier) {
10745 // The operands aren't the same for tMOV[S]r... (no cc_out)
10746 MCInst TmpInst;
10747 unsigned Op = Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr;
10748 TmpInst.setOpcode(Op);
10749 TmpInst.addOperand(Inst.getOperand(0));
10750 TmpInst.addOperand(Inst.getOperand(1));
10751 if (Op == ARM::tMOVr) {
10752 TmpInst.addOperand(Inst.getOperand(2));
10753 TmpInst.addOperand(Inst.getOperand(3));
10754 }
10755 Inst = TmpInst;
10756 return true;
10757 }
10758 break;
10759
10760 case ARM::t2SXTH:
10761 case ARM::t2SXTB:
10762 case ARM::t2UXTH:
10763 case ARM::t2UXTB:
10764 // If we can use the 16-bit encoding and the user didn't explicitly
10765 // request the 32-bit variant, transform it here.
10766 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10767 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10768 Inst.getOperand(2).getImm() == 0 &&
10769 !HasWideQualifier) {
10770 unsigned NewOpc;
10771 switch (Inst.getOpcode()) {
10772 default: llvm_unreachable("Illegal opcode!");
10773 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
10774 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
10775 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
10776 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
10777 }
10778 // The operands aren't the same for thumb1 (no rotate operand).
10779 MCInst TmpInst;
10780 TmpInst.setOpcode(NewOpc);
10781 TmpInst.addOperand(Inst.getOperand(0));
10782 TmpInst.addOperand(Inst.getOperand(1));
10783 TmpInst.addOperand(Inst.getOperand(3));
10784 TmpInst.addOperand(Inst.getOperand(4));
10785 Inst = TmpInst;
10786 return true;
10787 }
10788 break;
10789
10790 case ARM::MOVsi: {
10792 // rrx shifts and asr/lsr of #32 is encoded as 0
10793 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
10794 return false;
10795 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
10796 // Shifting by zero is accepted as a vanilla 'MOVr'
10797 MCInst TmpInst;
10798 TmpInst.setOpcode(ARM::MOVr);
10799 TmpInst.addOperand(Inst.getOperand(0));
10800 TmpInst.addOperand(Inst.getOperand(1));
10801 TmpInst.addOperand(Inst.getOperand(3));
10802 TmpInst.addOperand(Inst.getOperand(4));
10803 TmpInst.addOperand(Inst.getOperand(5));
10804 Inst = TmpInst;
10805 return true;
10806 }
10807 return false;
10808 }
10809 case ARM::ANDrsi:
10810 case ARM::ORRrsi:
10811 case ARM::EORrsi:
10812 case ARM::BICrsi:
10813 case ARM::SUBrsi:
10814 case ARM::ADDrsi: {
10815 unsigned newOpc;
10817 if (SOpc == ARM_AM::rrx) return false;
10818 switch (Inst.getOpcode()) {
10819 default: llvm_unreachable("unexpected opcode!");
10820 case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
10821 case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
10822 case ARM::EORrsi: newOpc = ARM::EORrr; break;
10823 case ARM::BICrsi: newOpc = ARM::BICrr; break;
10824 case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
10825 case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
10826 }
10827 // If the shift is by zero, use the non-shifted instruction definition.
10828 // The exception is for right shifts, where 0 == 32
10829 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
10830 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
10831 MCInst TmpInst;
10832 TmpInst.setOpcode(newOpc);
10833 TmpInst.addOperand(Inst.getOperand(0));
10834 TmpInst.addOperand(Inst.getOperand(1));
10835 TmpInst.addOperand(Inst.getOperand(2));
10836 TmpInst.addOperand(Inst.getOperand(4));
10837 TmpInst.addOperand(Inst.getOperand(5));
10838 TmpInst.addOperand(Inst.getOperand(6));
10839 Inst = TmpInst;
10840 return true;
10841 }
10842 return false;
10843 }
10844 case ARM::ITasm:
10845 case ARM::t2IT: {
10846 // Set up the IT block state according to the IT instruction we just
10847 // matched.
10848 assert(!inITBlock() && "nested IT blocks?!");
10849 startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
10850 Inst.getOperand(1).getImm());
10851 break;
10852 }
10853 case ARM::t2LSLrr:
10854 case ARM::t2LSRrr:
10855 case ARM::t2ASRrr:
10856 case ARM::t2SBCrr:
10857 case ARM::t2RORrr:
10858 case ARM::t2BICrr:
10859 // Assemblers should use the narrow encodings of these instructions when permissible.
10860 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10861 isARMLowRegister(Inst.getOperand(2).getReg())) &&
10862 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10863 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10864 !HasWideQualifier) {
10865 unsigned NewOpc;
10866 switch (Inst.getOpcode()) {
10867 default: llvm_unreachable("unexpected opcode");
10868 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
10869 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
10870 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
10871 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
10872 case ARM::t2RORrr: NewOpc = ARM::tROR; break;
10873 case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
10874 }
10875 MCInst TmpInst;
10876 TmpInst.setOpcode(NewOpc);
10877 TmpInst.addOperand(Inst.getOperand(0));
10878 TmpInst.addOperand(Inst.getOperand(5));
10879 TmpInst.addOperand(Inst.getOperand(1));
10880 TmpInst.addOperand(Inst.getOperand(2));
10881 TmpInst.addOperand(Inst.getOperand(3));
10882 TmpInst.addOperand(Inst.getOperand(4));
10883 Inst = TmpInst;
10884 return true;
10885 }
10886 return false;
10887
10888 case ARM::t2ANDrr:
10889 case ARM::t2EORrr:
10890 case ARM::t2ADCrr:
10891 case ARM::t2ORRrr:
10892 // Assemblers should use the narrow encodings of these instructions when permissible.
10893 // These instructions are special in that they are commutable, so shorter encodings
10894 // are available more often.
10895 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10896 isARMLowRegister(Inst.getOperand(2).getReg())) &&
10897 (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
10898 Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
10899 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10900 !HasWideQualifier) {
10901 unsigned NewOpc;
10902 switch (Inst.getOpcode()) {
10903 default: llvm_unreachable("unexpected opcode");
10904 case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
10905 case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
10906 case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
10907 case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
10908 }
10909 MCInst TmpInst;
10910 TmpInst.setOpcode(NewOpc);
10911 TmpInst.addOperand(Inst.getOperand(0));
10912 TmpInst.addOperand(Inst.getOperand(5));
10913 if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
10914 TmpInst.addOperand(Inst.getOperand(1));
10915 TmpInst.addOperand(Inst.getOperand(2));
10916 } else {
10917 TmpInst.addOperand(Inst.getOperand(2));
10918 TmpInst.addOperand(Inst.getOperand(1));
10919 }
10920 TmpInst.addOperand(Inst.getOperand(3));
10921 TmpInst.addOperand(Inst.getOperand(4));
10922 Inst = TmpInst;
10923 return true;
10924 }
10925 return false;
10926 case ARM::MVE_VPST:
10927 case ARM::MVE_VPTv16i8:
10928 case ARM::MVE_VPTv8i16:
10929 case ARM::MVE_VPTv4i32:
10930 case ARM::MVE_VPTv16u8:
10931 case ARM::MVE_VPTv8u16:
10932 case ARM::MVE_VPTv4u32:
10933 case ARM::MVE_VPTv16s8:
10934 case ARM::MVE_VPTv8s16:
10935 case ARM::MVE_VPTv4s32:
10936 case ARM::MVE_VPTv4f32:
10937 case ARM::MVE_VPTv8f16:
10938 case ARM::MVE_VPTv16i8r:
10939 case ARM::MVE_VPTv8i16r:
10940 case ARM::MVE_VPTv4i32r:
10941 case ARM::MVE_VPTv16u8r:
10942 case ARM::MVE_VPTv8u16r:
10943 case ARM::MVE_VPTv4u32r:
10944 case ARM::MVE_VPTv16s8r:
10945 case ARM::MVE_VPTv8s16r:
10946 case ARM::MVE_VPTv4s32r:
10947 case ARM::MVE_VPTv4f32r:
10948 case ARM::MVE_VPTv8f16r: {
10949 assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
10950 MCOperand &MO = Inst.getOperand(0);
10951 VPTState.Mask = MO.getImm();
10952 VPTState.CurPosition = 0;
10953 break;
10954 }
10955 }
10956 return false;
10957}
10958
10959unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
10960 // 16-bit thumb arithmetic instructions either require or preclude the 'S'
10961 // suffix depending on whether they're in an IT block or not.
10962 unsigned Opc = Inst.getOpcode();
10963 const MCInstrDesc &MCID = MII.get(Opc);
10965 assert(MCID.hasOptionalDef() &&
10966 "optionally flag setting instruction missing optional def operand");
10967 assert(MCID.NumOperands == Inst.getNumOperands() &&
10968 "operand count mismatch!");
10969 // Find the optional-def operand (cc_out).
10970 unsigned OpNo;
10971 for (OpNo = 0;
10972 OpNo < MCID.NumOperands && !MCID.operands()[OpNo].isOptionalDef();
10973 ++OpNo)
10974 ;
10975 // If we're parsing Thumb1, reject it completely.
10976 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
10977 return Match_RequiresFlagSetting;
10978 // If we're parsing Thumb2, which form is legal depends on whether we're
10979 // in an IT block.
10980 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
10981 !inITBlock())
10982 return Match_RequiresITBlock;
10983 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
10984 inITBlock())
10985 return Match_RequiresNotITBlock;
10986 // LSL with zero immediate is not allowed in an IT block
10987 if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
10988 return Match_RequiresNotITBlock;
10989 } else if (isThumbOne()) {
10990 // Some high-register supporting Thumb1 encodings only allow both registers
10991 // to be from r0-r7 when in Thumb2.
10992 if (Opc == ARM::tADDhirr && !hasV6MOps() &&
10993 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10995 return Match_RequiresThumb2;
10996 // Others only require ARMv6 or later.
10997 else if (Opc == ARM::tMOVr && !hasV6Ops() &&
10998 isARMLowRegister(Inst.getOperand(0).getReg()) &&
11000 return Match_RequiresV6;
11001 }
11002
11003 // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
11004 // than the loop below can handle, so it uses the GPRnopc register class and
11005 // we do SP handling here.
11006 if (Opc == ARM::t2MOVr && !hasV8Ops())
11007 {
11008 // SP as both source and destination is not allowed
11009 if (Inst.getOperand(0).getReg() == ARM::SP &&
11010 Inst.getOperand(1).getReg() == ARM::SP)
11011 return Match_RequiresV8;
11012 // When flags-setting SP as either source or destination is not allowed
11013 if (Inst.getOperand(4).getReg() == ARM::CPSR &&
11014 (Inst.getOperand(0).getReg() == ARM::SP ||
11015 Inst.getOperand(1).getReg() == ARM::SP))
11016 return Match_RequiresV8;
11017 }
11018
11019 switch (Inst.getOpcode()) {
11020 case ARM::VMRS:
11021 case ARM::VMSR:
11022 case ARM::VMRS_FPCXTS:
11023 case ARM::VMRS_FPCXTNS:
11024 case ARM::VMSR_FPCXTS:
11025 case ARM::VMSR_FPCXTNS:
11026 case ARM::VMRS_FPSCR_NZCVQC:
11027 case ARM::VMSR_FPSCR_NZCVQC:
11028 case ARM::FMSTAT:
11029 case ARM::VMRS_VPR:
11030 case ARM::VMRS_P0:
11031 case ARM::VMSR_VPR:
11032 case ARM::VMSR_P0:
11033 // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
11034 // ARMv8-A.
11035 if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
11036 (isThumb() && !hasV8Ops()))
11037 return Match_InvalidOperand;
11038 break;
11039 case ARM::t2TBB:
11040 case ARM::t2TBH:
11041 // Rn = sp is only allowed with ARMv8-A
11042 if (!hasV8Ops() && (Inst.getOperand(0).getReg() == ARM::SP))
11043 return Match_RequiresV8;
11044 break;
11045 default:
11046 break;
11047 }
11048
11049 for (unsigned I = 0; I < MCID.NumOperands; ++I)
11050 if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) {
11051 // rGPRRegClass excludes PC, and also excluded SP before ARMv8
11052 const auto &Op = Inst.getOperand(I);
11053 if (!Op.isReg()) {
11054 // This can happen in awkward cases with tied operands, e.g. a
11055 // writeback load/store with a complex addressing mode in
11056 // which there's an output operand corresponding to the
11057 // updated written-back base register: the Tablegen-generated
11058 // AsmMatcher will have written a placeholder operand to that
11059 // slot in the form of an immediate 0, because it can't
11060 // generate the register part of the complex addressing-mode
11061 // operand ahead of time.
11062 continue;
11063 }
11064
11065 unsigned Reg = Op.getReg();
11066 if ((Reg == ARM::SP) && !hasV8Ops())
11067 return Match_RequiresV8;
11068 else if (Reg == ARM::PC)
11069 return Match_InvalidOperand;
11070 }
11071
11072 return Match_Success;
11073}
11074
11075namespace llvm {
11076
11077template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
11078 return true; // In an assembly source, no need to second-guess
11079}
11080
11081} // end namespace llvm
11082
11083// Returns true if Inst is unpredictable if it is in and IT block, but is not
11084// the last instruction in the block.
11085bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
11086 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11087
11088 // All branch & call instructions terminate IT blocks with the exception of
11089 // SVC.
11090 if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
11091 MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
11092 return true;
11093
11094 // Any arithmetic instruction which writes to the PC also terminates the IT
11095 // block.
11096 if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
11097 return true;
11098
11099 return false;
11100}
11101
11102unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
11104 bool MatchingInlineAsm,
11105 bool &EmitInITBlock,
11106 MCStreamer &Out) {
11107 // If we can't use an implicit IT block here, just match as normal.
11108 if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11109 return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11110
11111 // Try to match the instruction in an extension of the current IT block (if
11112 // there is one).
11113 if (inImplicitITBlock()) {
11114 extendImplicitITBlock(ITState.Cond);
11115 if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11116 Match_Success) {
11117 // The match succeded, but we still have to check that the instruction is
11118 // valid in this implicit IT block.
11119 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11120 if (MCID.isPredicable()) {
11121 ARMCC::CondCodes InstCond =
11123 .getImm();
11124 ARMCC::CondCodes ITCond = currentITCond();
11125 if (InstCond == ITCond) {
11126 EmitInITBlock = true;
11127 return Match_Success;
11128 } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
11129 invertCurrentITCondition();
11130 EmitInITBlock = true;
11131 return Match_Success;
11132 }
11133 }
11134 }
11135 rewindImplicitITPosition();
11136 }
11137
11138 // Finish the current IT block, and try to match outside any IT block.
11139 flushPendingInstructions(Out);
11140 unsigned PlainMatchResult =
11141 MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11142 if (PlainMatchResult == Match_Success) {
11143 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11144 if (MCID.isPredicable()) {
11145 ARMCC::CondCodes InstCond =
11147 .getImm();
11148 // Some forms of the branch instruction have their own condition code
11149 // fields, so can be conditionally executed without an IT block.
11150 if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
11151 EmitInITBlock = false;
11152 return Match_Success;
11153 }
11154 if (InstCond == ARMCC::AL) {
11155 EmitInITBlock = false;
11156 return Match_Success;
11157 }
11158 } else {
11159 EmitInITBlock = false;
11160 return Match_Success;
11161 }
11162 }
11163
11164 // Try to match in a new IT block. The matcher doesn't check the actual
11165 // condition, so we create an IT block with a dummy condition, and fix it up
11166 // once we know the actual condition.
11167 startImplicitITBlock();
11168 if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11169 Match_Success) {
11170 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11171 if (MCID.isPredicable()) {
11172 ITState.Cond =
11174 .getImm();
11175 EmitInITBlock = true;
11176 return Match_Success;
11177 }
11178 }
11179 discardImplicitITBlock();
11180
11181 // If none of these succeed, return the error we got when trying to match
11182 // outside any IT blocks.
11183 EmitInITBlock = false;
11184 return PlainMatchResult;
11185}
11186
11187static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
11188 unsigned VariantID = 0);
11189
11190static const char *getSubtargetFeatureName(uint64_t Val);
11191bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
11194 bool MatchingInlineAsm) {
11195 MCInst Inst;
11196 unsigned MatchResult;
11197 bool PendConditionalInstruction = false;
11198
11200 MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
11201 PendConditionalInstruction, Out);
11202
11203 switch (MatchResult) {
11204 case Match_Success:
11205 LLVM_DEBUG(dbgs() << "Parsed as: ";
11206 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11207 dbgs() << "\n");
11208
11209 // Context sensitive operand constraints aren't handled by the matcher,
11210 // so check them here.
11211 if (validateInstruction(Inst, Operands)) {
11212 // Still progress the IT block, otherwise one wrong condition causes
11213 // nasty cascading errors.
11214 forwardITPosition();
11215 forwardVPTPosition();
11216 return true;
11217 }
11218
11219 {
11220 // Some instructions need post-processing to, for example, tweak which
11221 // encoding is selected. Loop on it while changes happen so the
11222 // individual transformations can chain off each other. E.g.,
11223 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
11224 while (processInstruction(Inst, Operands, Out))
11225 LLVM_DEBUG(dbgs() << "Changed to: ";
11226 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11227 dbgs() << "\n");
11228 }
11229
11230 // Only move forward at the very end so that everything in validate
11231 // and process gets a consistent answer about whether we're in an IT
11232 // block.
11233 forwardITPosition();
11234 forwardVPTPosition();
11235
11236 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
11237 // doesn't actually encode.
11238 if (Inst.getOpcode() == ARM::ITasm)
11239 return false;
11240
11241 Inst.setLoc(IDLoc);
11242 if (PendConditionalInstruction) {
11243 PendingConditionalInsts.push_back(Inst);
11244 if (isITBlockFull() || isITBlockTerminator(Inst))
11245 flushPendingInstructions(Out);
11246 } else {
11247 Out.emitInstruction(Inst, getSTI());
11248 }
11249 return false;
11250 case Match_NearMisses:
11251 ReportNearMisses(NearMisses, IDLoc, Operands);
11252 return true;
11253 case Match_MnemonicFail: {
11254 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11255 std::string Suggestion = ARMMnemonicSpellCheck(
11256 ((ARMOperand &)*Operands[0]).getToken(), FBS);
11257 return Error(IDLoc, "invalid instruction" + Suggestion,
11258 ((ARMOperand &)*Operands[0]).getLocRange());
11259 }
11260 }
11261
11262 llvm_unreachable("Implement any new match types added!");
11263}
11264
11265/// parseDirective parses the arm specific directives
11266bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
11267 const MCContext::Environment Format = getContext().getObjectFileType();
11268 bool IsMachO = Format == MCContext::IsMachO;
11269 bool IsCOFF = Format == MCContext::IsCOFF;
11270
11271 std::string IDVal = DirectiveID.getIdentifier().lower();
11272 if (IDVal == ".word")
11273 parseLiteralValues(4, DirectiveID.getLoc());
11274 else if (IDVal == ".short" || IDVal == ".hword")
11275 parseLiteralValues(2, DirectiveID.getLoc());
11276 else if (IDVal == ".thumb")
11277 parseDirectiveThumb(DirectiveID.getLoc());
11278 else if (IDVal == ".arm")
11279 parseDirectiveARM(DirectiveID.getLoc());
11280 else if (IDVal == ".thumb_func")
11281 parseDirectiveThumbFunc(DirectiveID.getLoc());
11282 else if (IDVal == ".code")
11283 parseDirectiveCode(DirectiveID.getLoc());
11284 else if (IDVal == ".syntax")
11285 parseDirectiveSyntax(DirectiveID.getLoc());
11286 else if (IDVal == ".unreq")
11287 parseDirectiveUnreq(DirectiveID.getLoc());
11288 else if (IDVal == ".fnend")
11289 parseDirectiveFnEnd(DirectiveID.getLoc());
11290 else if (IDVal == ".cantunwind")
11291 parseDirectiveCantUnwind(DirectiveID.getLoc());
11292 else if (IDVal == ".personality")
11293 parseDirectivePersonality(DirectiveID.getLoc());
11294 else if (IDVal == ".handlerdata")
11295 parseDirectiveHandlerData(DirectiveID.getLoc());
11296 else if (IDVal == ".setfp")
11297 parseDirectiveSetFP(DirectiveID.getLoc());
11298 else if (IDVal == ".pad")
11299 parseDirectivePad(DirectiveID.getLoc());
11300 else if (IDVal == ".save")
11301 parseDirectiveRegSave(DirectiveID.getLoc(), false);
11302 else if (IDVal == ".vsave")
11303 parseDirectiveRegSave(DirectiveID.getLoc(), true);
11304 else if (IDVal == ".ltorg" || IDVal == ".pool")
11305 parseDirectiveLtorg(DirectiveID.getLoc());
11306 else if (IDVal == ".even")
11307 parseDirectiveEven(DirectiveID.getLoc());
11308 else if (IDVal == ".personalityindex")
11309 parseDirectivePersonalityIndex(DirectiveID.getLoc());
11310 else if (IDVal == ".unwind_raw")
11311 parseDirectiveUnwindRaw(DirectiveID.getLoc());
11312 else if (IDVal == ".movsp")
11313 parseDirectiveMovSP(DirectiveID.getLoc());
11314 else if (IDVal == ".arch_extension")
11315 parseDirectiveArchExtension(DirectiveID.getLoc());
11316 else if (IDVal == ".align")
11317 return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
11318 else if (IDVal == ".thumb_set")
11319 parseDirectiveThumbSet(DirectiveID.getLoc());
11320 else if (IDVal == ".inst")
11321 parseDirectiveInst(DirectiveID.getLoc());
11322 else if (IDVal == ".inst.n")
11323 parseDirectiveInst(DirectiveID.getLoc(), 'n');
11324 else if (IDVal == ".inst.w")
11325 parseDirectiveInst(DirectiveID.getLoc(), 'w');
11326 else if (!IsMachO && !IsCOFF) {
11327 if (IDVal == ".arch")
11328 parseDirectiveArch(DirectiveID.getLoc());
11329 else if (IDVal == ".cpu")
11330 parseDirectiveCPU(DirectiveID.getLoc());
11331 else if (IDVal == ".eabi_attribute")
11332 parseDirectiveEabiAttr(DirectiveID.getLoc());
11333 else if (IDVal == ".fpu")
11334 parseDirectiveFPU(DirectiveID.getLoc());
11335 else if (IDVal == ".fnstart")
11336 parseDirectiveFnStart(DirectiveID.getLoc());
11337 else if (IDVal == ".object_arch")
11338 parseDirectiveObjectArch(DirectiveID.getLoc());
11339 else if (IDVal == ".tlsdescseq")
11340 parseDirectiveTLSDescSeq(DirectiveID.getLoc());
11341 else
11342 return true;
11343 } else if (IsCOFF) {
11344 if (IDVal == ".seh_stackalloc")
11345 parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/false);
11346 else if (IDVal == ".seh_stackalloc_w")
11347 parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/true);
11348 else if (IDVal == ".seh_save_regs")
11349 parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/false);
11350 else if (IDVal == ".seh_save_regs_w")
11351 parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/true);
11352 else if (IDVal == ".seh_save_sp")
11353 parseDirectiveSEHSaveSP(DirectiveID.getLoc());
11354 else if (IDVal == ".seh_save_fregs")
11355 parseDirectiveSEHSaveFRegs(DirectiveID.getLoc());
11356 else if (IDVal == ".seh_save_lr")
11357 parseDirectiveSEHSaveLR(DirectiveID.getLoc());
11358 else if (IDVal == ".seh_endprologue")
11359 parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/false);
11360 else if (IDVal == ".seh_endprologue_fragment")
11361 parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/true);
11362 else if (IDVal == ".seh_nop")
11363 parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/false);
11364 else if (IDVal == ".seh_nop_w")
11365 parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/true);
11366 else if (IDVal == ".seh_startepilogue")
11367 parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/false);
11368 else if (IDVal == ".seh_startepilogue_cond")
11369 parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/true);
11370 else if (IDVal == ".seh_endepilogue")
11371 parseDirectiveSEHEpilogEnd(DirectiveID.getLoc());
11372 else if (IDVal == ".seh_custom")
11373 parseDirectiveSEHCustom(DirectiveID.getLoc());
11374 else
11375 return true;
11376 } else
11377 return true;
11378 return false;
11379}
11380
11381/// parseLiteralValues
11382/// ::= .hword expression [, expression]*
11383/// ::= .short expression [, expression]*
11384/// ::= .word expression [, expression]*
11385bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
11386 auto parseOne = [&]() -> bool {
11387 const MCExpr *Value;
11388 if (getParser().parseExpression(Value))
11389 return true;
11390 getParser().getStreamer().emitValue(Value, Size, L);
11391 return false;
11392 };
11393 return (parseMany(parseOne));
11394}
11395
11396/// parseDirectiveThumb
11397/// ::= .thumb
11398bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
11399 if (parseEOL() || check(!hasThumb(), L, "target does not support Thumb mode"))
11400 return true;
11401
11402 if (!isThumb())
11403 SwitchMode();
11404
11405 getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11406 getParser().getStreamer().emitCodeAlignment(Align(2), &getSTI(), 0);
11407 return false;
11408}
11409
11410/// parseDirectiveARM
11411/// ::= .arm
11412bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
11413 if (parseEOL() || check(!hasARM(), L, "target does not support ARM mode"))
11414 return true;
11415
11416 if (isThumb())
11417 SwitchMode();
11418 getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11419 getParser().getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
11420 return false;
11421}
11422
11424ARMAsmParser::getVariantKindForName(StringRef Name) const {
11429 .Case("gotfuncdesc", MCSymbolRefExpr::VK_GOTFUNCDESC)
11431 .Case("gotofffuncdesc", MCSymbolRefExpr::VK_GOTOFFFUNCDESC)
11433 .Case("gottpoff_fdpic", MCSymbolRefExpr::VK_GOTTPOFF_FDPIC)
11439 .Case("secrel32", MCSymbolRefExpr::VK_SECREL)
11445 .Case("tlsgd_fdpic", MCSymbolRefExpr::VK_TLSGD_FDPIC)
11448 .Case("tlsldm_fdpic", MCSymbolRefExpr::VK_TLSLDM_FDPIC)
11452}
11453
11454void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) {
11455 // We need to flush the current implicit IT block on a label, because it is
11456 // not legal to branch into an IT block.
11457 flushPendingInstructions(getStreamer());
11458}
11459
11460void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
11461 if (NextSymbolIsThumb) {
11462 getParser().getStreamer().emitThumbFunc(Symbol);
11463 NextSymbolIsThumb = false;
11464 }
11465}
11466
11467/// parseDirectiveThumbFunc
11468/// ::= .thumbfunc symbol_name
11469bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
11470 MCAsmParser &Parser = getParser();
11471 const auto Format = getContext().getObjectFileType();
11472 bool IsMachO = Format == MCContext::IsMachO;
11473
11474 // Darwin asm has (optionally) function name after .thumb_func direction
11475 // ELF doesn't
11476
11477 if (IsMachO) {
11478 if (Parser.getTok().is(AsmToken::Identifier) ||
11479 Parser.getTok().is(AsmToken::String)) {
11480 MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
11481 Parser.getTok().getIdentifier());
11482 getParser().getStreamer().emitThumbFunc(Func);
11483 Parser.Lex();
11484 if (parseEOL())
11485 return true;
11486 return false;
11487 }
11488 }
11489
11490 if (parseEOL())
11491 return true;
11492
11493 // .thumb_func implies .thumb
11494 if (!isThumb())
11495 SwitchMode();
11496
11497 getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11498
11499 NextSymbolIsThumb = true;
11500 return false;
11501}
11502
11503/// parseDirectiveSyntax
11504/// ::= .syntax unified | divided
11505bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
11506 MCAsmParser &Parser = getParser();
11507 const AsmToken &Tok = Parser.getTok();
11508 if (Tok.isNot(AsmToken::Identifier)) {
11509 Error(L, "unexpected token in .syntax directive");
11510 return false;
11511 }
11512
11513 StringRef Mode = Tok.getString();
11514 Parser.Lex();
11515 if (check(Mode == "divided" || Mode == "DIVIDED", L,
11516 "'.syntax divided' arm assembly not supported") ||
11517 check(Mode != "unified" && Mode != "UNIFIED", L,
11518 "unrecognized syntax mode in .syntax directive") ||
11519 parseEOL())
11520 return true;
11521
11522 // TODO tell the MC streamer the mode
11523 // getParser().getStreamer().Emit???();
11524 return false;
11525}
11526
11527/// parseDirectiveCode
11528/// ::= .code 16 | 32
11529bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
11530 MCAsmParser &Parser = getParser();
11531 const AsmToken &Tok = Parser.getTok();
11532 if (Tok.isNot(AsmToken::Integer))
11533 return Error(L, "unexpected token in .code directive");
11534 int64_t Val = Parser.getTok().getIntVal();
11535 if (Val != 16 && Val != 32) {
11536 Error(L, "invalid operand to .code directive");
11537 return false;
11538 }
11539 Parser.Lex();
11540
11541 if (parseEOL())
11542 return true;
11543
11544 if (Val == 16) {
11545 if (!hasThumb())
11546 return Error(L, "target does not support Thumb mode");
11547
11548 if (!isThumb())
11549 SwitchMode();
11550 getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11551 } else {
11552 if (!hasARM())
11553 return Error(L, "target does not support ARM mode");
11554
11555 if (isThumb())
11556 SwitchMode();
11557 getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11558 }
11559
11560 return false;
11561}
11562
11563/// parseDirectiveReq
11564/// ::= name .req registername
11565bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
11566 MCAsmParser &Parser = getParser();
11567 Parser.Lex(); // Eat the '.req' token.
11569 SMLoc SRegLoc, ERegLoc;
11570 if (check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11571 "register name expected") ||
11572 parseEOL())
11573 return true;
11574
11575 if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
11576 return Error(SRegLoc,
11577 "redefinition of '" + Name + "' does not match original.");
11578
11579 return false;
11580}
11581
11582/// parseDirectiveUneq
11583/// ::= .unreq registername
11584bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
11585 MCAsmParser &Parser = getParser();
11586 if (Parser.getTok().isNot(AsmToken::Identifier))
11587 return Error(L, "unexpected input in .unreq directive.");
11588 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
11589 Parser.Lex(); // Eat the identifier.
11590 return parseEOL();
11591}
11592
11593// After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
11594// before, if supported by the new target, or emit mapping symbols for the mode
11595// switch.
11596void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
11597 if (WasThumb != isThumb()) {
11598 if (WasThumb && hasThumb()) {
11599 // Stay in Thumb mode
11600 SwitchMode();
11601 } else if (!WasThumb && hasARM()) {
11602 // Stay in ARM mode
11603 SwitchMode();
11604 } else {
11605 // Mode switch forced, because the new arch doesn't support the old mode.
11606 getParser().getStreamer().emitAssemblerFlag(isThumb() ? MCAF_Code16
11607 : MCAF_Code32);
11608 // Warn about the implcit mode switch. GAS does not switch modes here,
11609 // but instead stays in the old mode, reporting an error on any following
11610 // instructions as the mode does not exist on the target.
11611 Warning(Loc, Twine("new target does not support ") +
11612 (WasThumb ? "thumb" : "arm") + " mode, switching to " +
11613 (!WasThumb ? "thumb" : "arm") + " mode");
11614 }
11615 }
11616}
11617
11618/// parseDirectiveArch
11619/// ::= .arch token
11620bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
11621 StringRef Arch = getParser().parseStringToEndOfStatement().trim();
11623
11624 if (ID == ARM::ArchKind::INVALID)
11625 return Error(L, "Unknown arch name");
11626
11627 bool WasThumb = isThumb();
11628 Triple T;
11629 MCSubtargetInfo &STI = copySTI();
11630 STI.setDefaultFeatures("", /*TuneCPU*/ "",
11631 ("+" + ARM::getArchName(ID)).str());
11632 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11633 FixModeAfterArchChange(WasThumb, L);
11634
11635 getTargetStreamer().emitArch(ID);
11636 return false;
11637}
11638
11639/// parseDirectiveEabiAttr
11640/// ::= .eabi_attribute int, int [, "str"]
11641/// ::= .eabi_attribute Tag_name, int [, "str"]
11642bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
11643 MCAsmParser &Parser = getParser();
11644 int64_t Tag;
11645 SMLoc TagLoc;
11646 TagLoc = Parser.getTok().getLoc();
11647 if (Parser.getTok().is(AsmToken::Identifier)) {
11648 StringRef Name = Parser.getTok().getIdentifier();
11649 std::optional<unsigned> Ret = ELFAttrs::attrTypeFromString(
11651 if (!Ret) {
11652 Error(TagLoc, "attribute name not recognised: " + Name);
11653 return false;
11654 }
11655 Tag = *Ret;
11656 Parser.Lex();
11657 } else {
11658 const MCExpr *AttrExpr;
11659
11660 TagLoc = Parser.getTok().getLoc();
11661 if (Parser.parseExpression(AttrExpr))
11662 return true;
11663
11664 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
11665 if (check(!CE, TagLoc, "expected numeric constant"))
11666 return true;
11667
11668 Tag = CE->getValue();
11669 }
11670
11671 if (Parser.parseComma())
11672 return true;
11673
11674 StringRef StringValue = "";
11675 bool IsStringValue = false;
11676
11677 int64_t IntegerValue = 0;
11678 bool IsIntegerValue = false;
11679
11681 IsStringValue = true;
11682 else if (Tag == ARMBuildAttrs::compatibility) {
11683 IsStringValue = true;
11684 IsIntegerValue = true;
11685 } else if (Tag < 32 || Tag % 2 == 0)
11686 IsIntegerValue = true;
11687 else if (Tag % 2 == 1)
11688 IsStringValue = true;
11689 else
11690 llvm_unreachable("invalid tag type");
11691
11692 if (IsIntegerValue) {
11693 const MCExpr *ValueExpr;
11694 SMLoc ValueExprLoc = Parser.getTok().getLoc();
11695 if (Parser.parseExpression(ValueExpr))
11696 return true;
11697
11698 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
11699 if (!CE)
11700 return Error(ValueExprLoc, "expected numeric constant");
11701 IntegerValue = CE->getValue();
11702 }
11703
11705 if (Parser.parseComma())
11706 return true;
11707 }
11708
11709 std::string EscapedValue;
11710 if (IsStringValue) {
11711 if (Parser.getTok().isNot(AsmToken::String))
11712 return Error(Parser.getTok().getLoc(), "bad string constant");
11713
11715 if (Parser.parseEscapedString(EscapedValue))
11716 return Error(Parser.getTok().getLoc(), "bad escaped string constant");
11717
11718 StringValue = EscapedValue;
11719 } else {
11720 StringValue = Parser.getTok().getStringContents();
11721 Parser.Lex();
11722 }
11723 }
11724
11725 if (Parser.parseEOL())
11726 return true;
11727
11728 if (IsIntegerValue && IsStringValue) {
11730 getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
11731 } else if (IsIntegerValue)
11732 getTargetStreamer().emitAttribute(Tag, IntegerValue);
11733 else if (IsStringValue)
11734 getTargetStreamer().emitTextAttribute(Tag, StringValue);
11735 return false;
11736}
11737
11738/// parseDirectiveCPU
11739/// ::= .cpu str
11740bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
11741 StringRef CPU = getParser().parseStringToEndOfStatement().trim();
11742 getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
11743
11744 // FIXME: This is using table-gen data, but should be moved to
11745 // ARMTargetParser once that is table-gen'd.
11746 if (!getSTI().isCPUStringValid(CPU))
11747 return Error(L, "Unknown CPU name");
11748
11749 bool WasThumb = isThumb();
11750 MCSubtargetInfo &STI = copySTI();
11751 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
11752 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11753 FixModeAfterArchChange(WasThumb, L);
11754
11755 return false;
11756}
11757
11758/// parseDirectiveFPU
11759/// ::= .fpu str
11760bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
11761 SMLoc FPUNameLoc = getTok().getLoc();
11762 StringRef FPU = getParser().parseStringToEndOfStatement().trim();
11763
11765 std::vector<StringRef> Features;
11766 if (!ARM::getFPUFeatures(ID, Features))
11767 return Error(FPUNameLoc, "Unknown FPU name");
11768
11769 MCSubtargetInfo &STI = copySTI();
11770 for (auto Feature : Features)
11771 STI.ApplyFeatureFlag(Feature);
11772 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11773
11774 getTargetStreamer().emitFPU(ID);
11775 return false;
11776}
11777
11778/// parseDirectiveFnStart
11779/// ::= .fnstart
11780bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
11781 if (parseEOL())
11782 return true;
11783
11784 if (UC.hasFnStart()) {
11785 Error(L, ".fnstart starts before the end of previous one");
11786 UC.emitFnStartLocNotes();
11787 return true;
11788 }
11789
11790 // Reset the unwind directives parser state
11791 UC.reset();
11792
11793 getTargetStreamer().emitFnStart();
11794
11795 UC.recordFnStart(L);
11796 return false;
11797}
11798
11799/// parseDirectiveFnEnd
11800/// ::= .fnend
11801bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
11802 if (parseEOL())
11803 return true;
11804 // Check the ordering of unwind directives
11805 if (!UC.hasFnStart())
11806 return Error(L, ".fnstart must precede .fnend directive");
11807
11808 // Reset the unwind directives parser state
11809 getTargetStreamer().emitFnEnd();
11810
11811 UC.reset();
11812 return false;
11813}
11814
11815/// parseDirectiveCantUnwind
11816/// ::= .cantunwind
11817bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
11818 if (parseEOL())
11819 return true;
11820
11821 UC.recordCantUnwind(L);
11822 // Check the ordering of unwind directives
11823 if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
11824 return true;
11825
11826 if (UC.hasHandlerData()) {
11827 Error(L, ".cantunwind can't be used with .handlerdata directive");
11828 UC.emitHandlerDataLocNotes();
11829 return true;
11830 }
11831 if (UC.hasPersonality()) {
11832 Error(L, ".cantunwind can't be used with .personality directive");
11833 UC.emitPersonalityLocNotes();
11834 return true;
11835 }
11836
11837 getTargetStreamer().emitCantUnwind();
11838 return false;
11839}
11840
11841/// parseDirectivePersonality
11842/// ::= .personality name
11843bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
11844 MCAsmParser &Parser = getParser();
11845 bool HasExistingPersonality = UC.hasPersonality();
11846
11847 // Parse the name of the personality routine
11848 if (Parser.getTok().isNot(AsmToken::Identifier))
11849 return Error(L, "unexpected input in .personality directive.");
11850 StringRef Name(Parser.getTok().getIdentifier());
11851 Parser.Lex();
11852
11853 if (parseEOL())
11854 return true;
11855
11856 UC.recordPersonality(L);
11857
11858 // Check the ordering of unwind directives
11859 if (!UC.hasFnStart())
11860 return Error(L, ".fnstart must precede .personality directive");
11861 if (UC.cantUnwind()) {
11862 Error(L, ".personality can't be used with .cantunwind directive");
11863 UC.emitCantUnwindLocNotes();
11864 return true;
11865 }
11866 if (UC.hasHandlerData()) {
11867 Error(L, ".personality must precede .handlerdata directive");
11868 UC.emitHandlerDataLocNotes();
11869 return true;
11870 }
11871 if (HasExistingPersonality) {
11872 Error(L, "multiple personality directives");
11873 UC.emitPersonalityLocNotes();
11874 return true;
11875 }
11876
11877 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
11878 getTargetStreamer().emitPersonality(PR);
11879 return false;
11880}
11881
11882/// parseDirectiveHandlerData
11883/// ::= .handlerdata
11884bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
11885 if (parseEOL())
11886 return true;
11887
11888 UC.recordHandlerData(L);
11889 // Check the ordering of unwind directives
11890 if (!UC.hasFnStart())
11891 return Error(L, ".fnstart must precede .personality directive");
11892 if (UC.cantUnwind()) {
11893 Error(L, ".handlerdata can't be used with .cantunwind directive");
11894 UC.emitCantUnwindLocNotes();
11895 return true;
11896 }
11897
11898 getTargetStreamer().emitHandlerData();
11899 return false;
11900}
11901
11902/// parseDirectiveSetFP
11903/// ::= .setfp fpreg, spreg [, offset]
11904bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
11905 MCAsmParser &Parser = getParser();
11906 // Check the ordering of unwind directives
11907 if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
11908 check(UC.hasHandlerData(), L,
11909 ".setfp must precede .handlerdata directive"))
11910 return true;
11911
11912 // Parse fpreg
11913 SMLoc FPRegLoc = Parser.getTok().getLoc();
11914 int FPReg = tryParseRegister();
11915
11916 if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
11917 Parser.parseComma())
11918 return true;
11919
11920 // Parse spreg
11921 SMLoc SPRegLoc = Parser.getTok().getLoc();
11922 int SPReg = tryParseRegister();
11923 if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
11924 check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
11925 "register should be either $sp or the latest fp register"))
11926 return true;
11927
11928 // Update the frame pointer register
11929 UC.saveFPReg(FPReg);
11930
11931 // Parse offset
11932 int64_t Offset = 0;
11933 if (Parser.parseOptionalToken(AsmToken::Comma)) {
11934 if (Parser.getTok().isNot(AsmToken::Hash) &&
11935 Parser.getTok().isNot(AsmToken::Dollar))
11936 return Error(Parser.getTok().getLoc(), "'#' expected");
11937 Parser.Lex(); // skip hash token.
11938
11939 const MCExpr *OffsetExpr;
11940 SMLoc ExLoc = Parser.getTok().getLoc();
11941 SMLoc EndLoc;
11942 if (getParser().parseExpression(OffsetExpr, EndLoc))
11943 return Error(ExLoc, "malformed setfp offset");
11944 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11945 if (check(!CE, ExLoc, "setfp offset must be an immediate"))
11946 return true;
11947 Offset = CE->getValue();
11948 }
11949
11950 if (Parser.parseEOL())
11951 return true;
11952
11953 getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
11954 static_cast<unsigned>(SPReg), Offset);
11955 return false;
11956}
11957
11958/// parseDirective
11959/// ::= .pad offset
11960bool ARMAsmParser::parseDirectivePad(SMLoc L) {
11961 MCAsmParser &Parser = getParser();
11962 // Check the ordering of unwind directives
11963 if (!UC.hasFnStart())
11964 return Error(L, ".fnstart must precede .pad directive");
11965 if (UC.hasHandlerData())
11966 return Error(L, ".pad must precede .handlerdata directive");
11967
11968 // Parse the offset
11969 if (Parser.getTok().isNot(AsmToken::Hash) &&
11970 Parser.getTok().isNot(AsmToken::Dollar))
11971 return Error(Parser.getTok().getLoc(), "'#' expected");
11972 Parser.Lex(); // skip hash token.
11973
11974 const MCExpr *OffsetExpr;
11975 SMLoc ExLoc = Parser.getTok().getLoc();
11976 SMLoc EndLoc;
11977 if (getParser().parseExpression(OffsetExpr, EndLoc))
11978 return Error(ExLoc, "malformed pad offset");
11979 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
11980 if (!CE)
11981 return Error(ExLoc, "pad offset must be an immediate");
11982
11983 if (parseEOL())
11984 return true;
11985
11986 getTargetStreamer().emitPad(CE->getValue());
11987 return false;
11988}
11989
11990/// parseDirectiveRegSave
11991/// ::= .save { registers }
11992/// ::= .vsave { registers }
11993bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
11994 // Check the ordering of unwind directives
11995 if (!UC.hasFnStart())
11996 return Error(L, ".fnstart must precede .save or .vsave directives");
11997 if (UC.hasHandlerData())
11998 return Error(L, ".save or .vsave must precede .handlerdata directive");
11999
12000 // RAII object to make sure parsed operands are deleted.
12002
12003 // Parse the register list
12004 if (parseRegisterList(Operands, true, true) || parseEOL())
12005 return true;
12006 ARMOperand &Op = (ARMOperand &)*Operands[0];
12007 if (!IsVector && !Op.isRegList())
12008 return Error(L, ".save expects GPR registers");
12009 if (IsVector && !Op.isDPRRegList())
12010 return Error(L, ".vsave expects DPR registers");
12011
12012 getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
12013 return false;
12014}
12015
12016/// parseDirectiveInst
12017/// ::= .inst opcode [, ...]
12018/// ::= .inst.n opcode [, ...]
12019/// ::= .inst.w opcode [, ...]
12020bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
12021 int Width = 4;
12022
12023 if (isThumb()) {
12024 switch (Suffix) {
12025 case 'n':
12026 Width = 2;
12027 break;
12028 case 'w':
12029 break;
12030 default:
12031 Width = 0;
12032 break;
12033 }
12034 } else {
12035 if (Suffix)
12036 return Error(Loc, "width suffixes are invalid in ARM mode");
12037 }
12038
12039 auto parseOne = [&]() -> bool {
12040 const MCExpr *Expr;
12041 if (getParser().parseExpression(Expr))
12042 return true;
12043 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
12044 if (!Value) {
12045 return Error(Loc, "expected constant expression");
12046 }
12047
12048 char CurSuffix = Suffix;
12049 switch (Width) {
12050 case 2:
12051 if (Value->getValue() > 0xffff)
12052 return Error(Loc, "inst.n operand is too big, use inst.w instead");
12053 break;
12054 case 4:
12055 if (Value->getValue() > 0xffffffff)
12056 return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
12057 " operand is too big");
12058 break;
12059 case 0:
12060 // Thumb mode, no width indicated. Guess from the opcode, if possible.
12061 if (Value->getValue() < 0xe800)
12062 CurSuffix = 'n';
12063 else if (Value->getValue() >= 0xe8000000)
12064 CurSuffix = 'w';
12065 else
12066 return Error(Loc, "cannot determine Thumb instruction size, "
12067 "use inst.n/inst.w instead");
12068 break;
12069 default:
12070 llvm_unreachable("only supported widths are 2 and 4");
12071 }
12072
12073 getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
12074 forwardITPosition();
12075 forwardVPTPosition();
12076 return false;
12077 };
12078
12079 if (parseOptionalToken(AsmToken::EndOfStatement))
12080 return Error(Loc, "expected expression following directive");
12081 if (parseMany(parseOne))
12082 return true;
12083 return false;
12084}
12085
12086/// parseDirectiveLtorg
12087/// ::= .ltorg | .pool
12088bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
12089 if (parseEOL())
12090 return true;
12091 getTargetStreamer().emitCurrentConstantPool();
12092 return false;
12093}
12094
12095bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
12096 const MCSection *Section = getStreamer().getCurrentSectionOnly();
12097
12098 if (parseEOL())
12099 return true;
12100
12101 if (!Section) {
12102 getStreamer().initSections(false, getSTI());
12103 Section = getStreamer().getCurrentSectionOnly();
12104 }
12105
12106 assert(Section && "must have section to emit alignment");
12107 if (Section->useCodeAlign())
12108 getStreamer().emitCodeAlignment(Align(2), &getSTI());
12109 else
12110 getStreamer().emitValueToAlignment(Align(2));
12111
12112 return false;
12113}
12114
12115/// parseDirectivePersonalityIndex
12116/// ::= .personalityindex index
12117bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
12118 MCAsmParser &Parser = getParser();
12119 bool HasExistingPersonality = UC.hasPersonality();
12120
12121 const MCExpr *IndexExpression;
12122 SMLoc IndexLoc = Parser.getTok().getLoc();
12123 if (Parser.parseExpression(IndexExpression) || parseEOL()) {
12124 return true;
12125 }
12126
12127 UC.recordPersonalityIndex(L);
12128
12129 if (!UC.hasFnStart()) {
12130 return Error(L, ".fnstart must precede .personalityindex directive");
12131 }
12132 if (UC.cantUnwind()) {
12133 Error(L, ".personalityindex cannot be used with .cantunwind");
12134 UC.emitCantUnwindLocNotes();
12135 return true;
12136 }
12137 if (UC.hasHandlerData()) {
12138 Error(L, ".personalityindex must precede .handlerdata directive");
12139 UC.emitHandlerDataLocNotes();
12140 return true;
12141 }
12142 if (HasExistingPersonality) {
12143 Error(L, "multiple personality directives");
12144 UC.emitPersonalityLocNotes();
12145 return true;
12146 }
12147
12148 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
12149 if (!CE)
12150 return Error(IndexLoc, "index must be a constant number");
12151 if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
12152 return Error(IndexLoc,
12153 "personality routine index should be in range [0-3]");
12154
12155 getTargetStreamer().emitPersonalityIndex(CE->getValue());
12156 return false;
12157}
12158
12159/// parseDirectiveUnwindRaw
12160/// ::= .unwind_raw offset, opcode [, opcode...]
12161bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
12162 MCAsmParser &Parser = getParser();
12163 int64_t StackOffset;
12164 const MCExpr *OffsetExpr;
12165 SMLoc OffsetLoc = getLexer().getLoc();
12166
12167 if (!UC.hasFnStart())
12168 return Error(L, ".fnstart must precede .unwind_raw directives");
12169 if (getParser().parseExpression(OffsetExpr))
12170 return Error(OffsetLoc, "expected expression");
12171
12172 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12173 if (!CE)
12174 return Error(OffsetLoc, "offset must be a constant");
12175
12176 StackOffset = CE->getValue();
12177
12178 if (Parser.parseComma())
12179 return true;
12180
12182
12183 auto parseOne = [&]() -> bool {
12184 const MCExpr *OE = nullptr;
12185 SMLoc OpcodeLoc = getLexer().getLoc();
12186 if (check(getLexer().is(AsmToken::EndOfStatement) ||
12187 Parser.parseExpression(OE),
12188 OpcodeLoc, "expected opcode expression"))
12189 return true;
12190 const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
12191 if (!OC)
12192 return Error(OpcodeLoc, "opcode value must be a constant");
12193 const int64_t Opcode = OC->getValue();
12194 if (Opcode & ~0xff)
12195 return Error(OpcodeLoc, "invalid opcode");
12196 Opcodes.push_back(uint8_t(Opcode));
12197 return false;
12198 };
12199
12200 // Must have at least 1 element
12201 SMLoc OpcodeLoc = getLexer().getLoc();
12202 if (parseOptionalToken(AsmToken::EndOfStatement))
12203 return Error(OpcodeLoc, "expected opcode expression");
12204 if (parseMany(parseOne))
12205 return true;
12206
12207 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
12208 return false;
12209}
12210
12211/// parseDirectiveTLSDescSeq
12212/// ::= .tlsdescseq tls-variable
12213bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
12214 MCAsmParser &Parser = getParser();
12215
12216 if (getLexer().isNot(AsmToken::Identifier))
12217 return TokError("expected variable after '.tlsdescseq' directive");
12218
12219 const MCSymbolRefExpr *SRE =
12222 Lex();
12223
12224 if (parseEOL())
12225 return true;
12226
12227 getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12228 return false;
12229}
12230
12231/// parseDirectiveMovSP
12232/// ::= .movsp reg [, #offset]
12233bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
12234 MCAsmParser &Parser = getParser();
12235 if (!UC.hasFnStart())
12236 return Error(L, ".fnstart must precede .movsp directives");
12237 if (UC.getFPReg() != ARM::SP)
12238 return Error(L, "unexpected .movsp directive");
12239
12240 SMLoc SPRegLoc = Parser.getTok().getLoc();
12241 int SPReg = tryParseRegister();
12242 if (SPReg == -1)
12243 return Error(SPRegLoc, "register expected");
12244 if (SPReg == ARM::SP || SPReg == ARM::PC)
12245 return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
12246
12247 int64_t Offset = 0;
12248 if (Parser.parseOptionalToken(AsmToken::Comma)) {
12249 if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
12250 return true;
12251
12252 const MCExpr *OffsetExpr;
12253 SMLoc OffsetLoc = Parser.getTok().getLoc();
12254
12255 if (Parser.parseExpression(OffsetExpr))
12256 return Error(OffsetLoc, "malformed offset expression");
12257
12258 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12259 if (!CE)
12260 return Error(OffsetLoc, "offset must be an immediate constant");
12261
12262 Offset = CE->getValue();
12263 }
12264
12265 if (parseEOL())
12266 return true;
12267
12268 getTargetStreamer().emitMovSP(SPReg, Offset);
12269 UC.saveFPReg(SPReg);
12270
12271 return false;
12272}
12273
12274/// parseDirectiveObjectArch
12275/// ::= .object_arch name
12276bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
12277 MCAsmParser &Parser = getParser();
12278 if (getLexer().isNot(AsmToken::Identifier))
12279 return Error(getLexer().getLoc(), "unexpected token");
12280
12281 StringRef Arch = Parser.getTok().getString();
12282 SMLoc ArchLoc = Parser.getTok().getLoc();
12283 Lex();
12284
12286
12287 if (ID == ARM::ArchKind::INVALID)
12288 return Error(ArchLoc, "unknown architecture '" + Arch + "'");
12289 if (parseToken(AsmToken::EndOfStatement))
12290 return true;
12291
12292 getTargetStreamer().emitObjectArch(ID);
12293 return false;
12294}
12295
12296/// parseDirectiveAlign
12297/// ::= .align
12298bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
12299 // NOTE: if this is not the end of the statement, fall back to the target
12300 // agnostic handling for this directive which will correctly handle this.
12301 if (parseOptionalToken(AsmToken::EndOfStatement)) {
12302 // '.align' is target specifically handled to mean 2**2 byte alignment.
12303 const MCSection *Section = getStreamer().getCurrentSectionOnly();
12304 assert(Section && "must have section to emit alignment");
12305 if (Section->useCodeAlign())
12306 getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
12307 else
12308 getStreamer().emitValueToAlignment(Align(4), 0, 1, 0);
12309 return false;
12310 }
12311 return true;
12312}
12313
12314/// parseDirectiveThumbSet
12315/// ::= .thumb_set name, value
12316bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
12317 MCAsmParser &Parser = getParser();
12318
12320 if (check(Parser.parseIdentifier(Name),
12321 "expected identifier after '.thumb_set'") ||
12322 Parser.parseComma())
12323 return true;
12324
12325 MCSymbol *Sym;
12326 const MCExpr *Value;
12327 if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
12328 Parser, Sym, Value))
12329 return true;
12330
12331 getTargetStreamer().emitThumbSet(Sym, Value);
12332 return false;
12333}
12334
12335/// parseDirectiveSEHAllocStack
12336/// ::= .seh_stackalloc
12337/// ::= .seh_stackalloc_w
12338bool ARMAsmParser::parseDirectiveSEHAllocStack(SMLoc L, bool Wide) {
12339 int64_t Size;
12340 if (parseImmExpr(Size))
12341 return true;
12342 getTargetStreamer().emitARMWinCFIAllocStack(Size, Wide);
12343 return false;
12344}
12345
12346/// parseDirectiveSEHSaveRegs
12347/// ::= .seh_save_regs
12348/// ::= .seh_save_regs_w
12349bool ARMAsmParser::parseDirectiveSEHSaveRegs(SMLoc L, bool Wide) {
12351
12352 if (parseRegisterList(Operands) || parseEOL())
12353 return true;
12354 ARMOperand &Op = (ARMOperand &)*Operands[0];
12355 if (!Op.isRegList())
12356 return Error(L, ".seh_save_regs{_w} expects GPR registers");
12357 const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12358 uint32_t Mask = 0;
12359 for (size_t i = 0; i < RegList.size(); ++i) {
12360 unsigned Reg = MRI->getEncodingValue(RegList[i]);
12361 if (Reg == 15) // pc -> lr
12362 Reg = 14;
12363 if (Reg == 13)
12364 return Error(L, ".seh_save_regs{_w} can't include SP");
12365 assert(Reg < 16U && "Register out of range");
12366 unsigned Bit = (1u << Reg);
12367 Mask |= Bit;
12368 }
12369 if (!Wide && (Mask & 0x1f00) != 0)
12370 return Error(L,
12371 ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12372 getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12373 return false;
12374}
12375
12376/// parseDirectiveSEHSaveSP
12377/// ::= .seh_save_sp
12378bool ARMAsmParser::parseDirectiveSEHSaveSP(SMLoc L) {
12379 int Reg = tryParseRegister();
12380 if (Reg == -1 || !MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12381 return Error(L, "expected GPR");
12382 unsigned Index = MRI->getEncodingValue(Reg);
12383 if (Index > 14 || Index == 13)
12384 return Error(L, "invalid register for .seh_save_sp");
12385 getTargetStreamer().emitARMWinCFISaveSP(Index);
12386 return false;
12387}
12388
12389/// parseDirectiveSEHSaveFRegs
12390/// ::= .seh_save_fregs
12391bool ARMAsmParser::parseDirectiveSEHSaveFRegs(SMLoc L) {
12393
12394 if (parseRegisterList(Operands) || parseEOL())
12395 return true;
12396 ARMOperand &Op = (ARMOperand &)*Operands[0];
12397 if (!Op.isDPRRegList())
12398 return Error(L, ".seh_save_fregs expects DPR registers");
12399 const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12400 uint32_t Mask = 0;
12401 for (size_t i = 0; i < RegList.size(); ++i) {
12402 unsigned Reg = MRI->getEncodingValue(RegList[i]);
12403 assert(Reg < 32U && "Register out of range");
12404 unsigned Bit = (1u << Reg);
12405 Mask |= Bit;
12406 }
12407
12408 if (Mask == 0)
12409 return Error(L, ".seh_save_fregs missing registers");
12410
12411 unsigned First = 0;
12412 while ((Mask & 1) == 0) {
12413 First++;
12414 Mask >>= 1;
12415 }
12416 if (((Mask + 1) & Mask) != 0)
12417 return Error(L,
12418 ".seh_save_fregs must take a contiguous range of registers");
12419 unsigned Last = First;
12420 while ((Mask & 2) != 0) {
12421 Last++;
12422 Mask >>= 1;
12423 }
12424 if (First < 16 && Last >= 16)
12425 return Error(L, ".seh_save_fregs must be all d0-d15 or d16-d31");
12426 getTargetStreamer().emitARMWinCFISaveFRegs(First, Last);
12427 return false;
12428}
12429
12430/// parseDirectiveSEHSaveLR
12431/// ::= .seh_save_lr
12432bool ARMAsmParser::parseDirectiveSEHSaveLR(SMLoc L) {
12433 int64_t Offset;
12434 if (parseImmExpr(Offset))
12435 return true;
12436 getTargetStreamer().emitARMWinCFISaveLR(Offset);
12437 return false;
12438}
12439
12440/// parseDirectiveSEHPrologEnd
12441/// ::= .seh_endprologue
12442/// ::= .seh_endprologue_fragment
12443bool ARMAsmParser::parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment) {
12444 getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12445 return false;
12446}
12447
12448/// parseDirectiveSEHNop
12449/// ::= .seh_nop
12450/// ::= .seh_nop_w
12451bool ARMAsmParser::parseDirectiveSEHNop(SMLoc L, bool Wide) {
12452 getTargetStreamer().emitARMWinCFINop(Wide);
12453 return false;
12454}
12455
12456/// parseDirectiveSEHEpilogStart
12457/// ::= .seh_startepilogue
12458/// ::= .seh_startepilogue_cond
12459bool ARMAsmParser::parseDirectiveSEHEpilogStart(SMLoc L, bool Condition) {
12460 unsigned CC = ARMCC::AL;
12461 if (Condition) {
12462 MCAsmParser &Parser = getParser();
12463 SMLoc S = Parser.getTok().getLoc();
12464 const AsmToken &Tok = Parser.getTok();
12465 if (!Tok.is(AsmToken::Identifier))
12466 return Error(S, ".seh_startepilogue_cond missing condition");
12468 if (CC == ~0U)
12469 return Error(S, "invalid condition");
12470 Parser.Lex(); // Eat the token.
12471 }
12472
12473 getTargetStreamer().emitARMWinCFIEpilogStart(CC);
12474 return false;
12475}
12476
12477/// parseDirectiveSEHEpilogEnd
12478/// ::= .seh_endepilogue
12479bool ARMAsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
12480 getTargetStreamer().emitARMWinCFIEpilogEnd();
12481 return false;
12482}
12483
12484/// parseDirectiveSEHCustom
12485/// ::= .seh_custom
12486bool ARMAsmParser::parseDirectiveSEHCustom(SMLoc L) {
12487 unsigned Opcode = 0;
12488 do {
12489 int64_t Byte;
12490 if (parseImmExpr(Byte))
12491 return true;
12492 if (Byte > 0xff || Byte < 0)
12493 return Error(L, "Invalid byte value in .seh_custom");
12494 if (Opcode > 0x00ffffff)
12495 return Error(L, "Too many bytes in .seh_custom");
12496 // Store the bytes as one big endian number in Opcode. In a multi byte
12497 // opcode sequence, the first byte can't be zero.
12498 Opcode = (Opcode << 8) | Byte;
12499 } while (parseOptionalToken(AsmToken::Comma));
12500 getTargetStreamer().emitARMWinCFICustom(Opcode);
12501 return false;
12502}
12503
12504/// Force static initialization.
12510}
12511
12512#define GET_REGISTER_MATCHER
12513#define GET_SUBTARGET_FEATURE_NAME
12514#define GET_MATCHER_IMPLEMENTATION
12515#define GET_MNEMONIC_SPELL_CHECKER
12516#include "ARMGenAsmMatcher.inc"
12517
12518// Some diagnostics need to vary with subtarget features, so they are handled
12519// here. For example, the DPR class has either 16 or 32 registers, depending
12520// on the FPU available.
12521const char *
12522ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12523 switch (MatchError) {
12524 // rGPR contains sp starting with ARMv8.
12525 case Match_rGPR:
12526 return hasV8Ops() ? "operand must be a register in range [r0, r14]"
12527 : "operand must be a register in range [r0, r12] or r14";
12528 // DPR contains 16 registers for some FPUs, and 32 for others.
12529 case Match_DPR:
12530 return hasD32() ? "operand must be a register in range [d0, d31]"
12531 : "operand must be a register in range [d0, d15]";
12532 case Match_DPR_RegList:
12533 return hasD32() ? "operand must be a list of registers in range [d0, d31]"
12534 : "operand must be a list of registers in range [d0, d15]";
12535
12536 // For all other diags, use the static string from tablegen.
12537 default:
12538 return getMatchKindDiag(MatchError);
12539 }
12540}
12541
12542// Process the list of near-misses, throwing away ones we don't want to report
12543// to the user, and converting the rest to a source location and string that
12544// should be reported.
12545void
12546ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
12547 SmallVectorImpl<NearMissMessage> &NearMissesOut,
12548 SMLoc IDLoc, OperandVector &Operands) {
12549 // TODO: If operand didn't match, sub in a dummy one and run target
12550 // predicate, so that we can avoid reporting near-misses that are invalid?
12551 // TODO: Many operand types dont have SuperClasses set, so we report
12552 // redundant ones.
12553 // TODO: Some operands are superclasses of registers (e.g.
12554 // MCK_RegShiftedImm), we don't have any way to represent that currently.
12555 // TODO: This is not all ARM-specific, can some of it be factored out?
12556
12557 // Record some information about near-misses that we have already seen, so
12558 // that we can avoid reporting redundant ones. For example, if there are
12559 // variants of an instruction that take 8- and 16-bit immediates, we want
12560 // to only report the widest one.
12561 std::multimap<unsigned, unsigned> OperandMissesSeen;
12562 SmallSet<FeatureBitset, 4> FeatureMissesSeen;
12563 bool ReportedTooFewOperands = false;
12564
12565 // Process the near-misses in reverse order, so that we see more general ones
12566 // first, and so can avoid emitting more specific ones.
12567 for (NearMissInfo &I : reverse(NearMissesIn)) {
12568 switch (I.getKind()) {
12570 SMLoc OperandLoc =
12571 ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
12572 const char *OperandDiag =
12573 getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
12574
12575 // If we have already emitted a message for a superclass, don't also report
12576 // the sub-class. We consider all operand classes that we don't have a
12577 // specialised diagnostic for to be equal for the propose of this check,
12578 // so that we don't report the generic error multiple times on the same
12579 // operand.
12580 unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
12581 auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
12582 if (std::any_of(PrevReports.first, PrevReports.second,
12583 [DupCheckMatchClass](
12584 const std::pair<unsigned, unsigned> Pair) {
12585 if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12586 return Pair.second == DupCheckMatchClass;
12587 else
12588 return isSubclass((MatchClassKind)DupCheckMatchClass,
12589 (MatchClassKind)Pair.second);
12590 }))
12591 break;
12592 OperandMissesSeen.insert(
12593 std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
12594
12595 NearMissMessage Message;
12596 Message.Loc = OperandLoc;
12597 if (OperandDiag) {
12598 Message.Message = OperandDiag;
12599 } else if (I.getOperandClass() == InvalidMatchClass) {
12600 Message.Message = "too many operands for instruction";
12601 } else {
12602 Message.Message = "invalid operand for instruction";
12603 LLVM_DEBUG(
12604 dbgs() << "Missing diagnostic string for operand class "
12605 << getMatchClassName((MatchClassKind)I.getOperandClass())
12606 << I.getOperandClass() << ", error " << I.getOperandError()
12607 << ", opcode " << MII.getName(I.getOpcode()) << "\n");
12608 }
12609 NearMissesOut.emplace_back(Message);
12610 break;
12611 }
12613 const FeatureBitset &MissingFeatures = I.getFeatures();
12614 // Don't report the same set of features twice.
12615 if (FeatureMissesSeen.count(MissingFeatures))
12616 break;
12617 FeatureMissesSeen.insert(MissingFeatures);
12618
12619 // Special case: don't report a feature set which includes arm-mode for
12620 // targets that don't have ARM mode.
12621 if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
12622 break;
12623 // Don't report any near-misses that both require switching instruction
12624 // set, and adding other subtarget features.
12625 if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
12626 MissingFeatures.count() > 1)
12627 break;
12628 if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
12629 MissingFeatures.count() > 1)
12630 break;
12631 if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
12632 (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
12633 Feature_IsThumbBit})).any())
12634 break;
12635 if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
12636 break;
12637
12638 NearMissMessage Message;
12639 Message.Loc = IDLoc;
12640 raw_svector_ostream OS(Message.Message);
12641
12642 OS << "instruction requires:";
12643 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
12644 if (MissingFeatures.test(i))
12645 OS << ' ' << getSubtargetFeatureName(i);
12646
12647 NearMissesOut.emplace_back(Message);
12648
12649 break;
12650 }
12652 NearMissMessage Message;
12653 Message.Loc = IDLoc;
12654 switch (I.getPredicateError()) {
12655 case Match_RequiresNotITBlock:
12656 Message.Message = "flag setting instruction only valid outside IT block";
12657 break;
12658 case Match_RequiresITBlock:
12659 Message.Message = "instruction only valid inside IT block";
12660 break;
12661 case Match_RequiresV6:
12662 Message.Message = "instruction variant requires ARMv6 or later";
12663 break;
12664 case Match_RequiresThumb2:
12665 Message.Message = "instruction variant requires Thumb2";
12666 break;
12667 case Match_RequiresV8:
12668 Message.Message = "instruction variant requires ARMv8 or later";
12669 break;
12670 case Match_RequiresFlagSetting:
12671 Message.Message = "no flag-preserving variant of this instruction available";
12672 break;
12673 case Match_InvalidOperand:
12674 Message.Message = "invalid operand for instruction";
12675 break;
12676 default:
12677 llvm_unreachable("Unhandled target predicate error");
12678 break;
12679 }
12680 NearMissesOut.emplace_back(Message);
12681 break;
12682 }
12684 if (!ReportedTooFewOperands) {
12685 SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
12686 NearMissesOut.emplace_back(NearMissMessage{
12687 EndLoc, StringRef("too few operands for instruction")});
12688 ReportedTooFewOperands = true;
12689 }
12690 break;
12691 }
12693 // This should never leave the matcher.
12694 llvm_unreachable("not a near-miss");
12695 break;
12696 }
12697 }
12698}
12699
12700void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
12701 SMLoc IDLoc, OperandVector &Operands) {
12703 FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
12704
12705 if (Messages.size() == 0) {
12706 // No near-misses were found, so the best we can do is "invalid
12707 // instruction".
12708 Error(IDLoc, "invalid instruction");
12709 } else if (Messages.size() == 1) {
12710 // One near miss was found, report it as the sole error.
12711 Error(Messages[0].Loc, Messages[0].Message);
12712 } else {
12713 // More than one near miss, so report a generic "invalid instruction"
12714 // error, followed by notes for each of the near-misses.
12715 Error(IDLoc, "invalid instruction, any one of the following would fix this:");
12716 for (auto &M : Messages) {
12717 Note(M.Loc, M.Message);
12718 }
12719 }
12720}
12721
12722bool ARMAsmParser::enableArchExtFeature(StringRef Name, SMLoc &ExtLoc) {
12723 // FIXME: This structure should be moved inside ARMTargetParser
12724 // when we start to table-generate them, and we can use the ARM
12725 // flags below, that were generated by table-gen.
12726 static const struct {
12727 const uint64_t Kind;
12728 const FeatureBitset ArchCheck;
12729 const FeatureBitset Features;
12730 } Extensions[] = {
12731 {ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12732 {ARM::AEK_AES,
12733 {Feature_HasV8Bit},
12734 {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12736 {Feature_HasV8Bit},
12737 {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12739 {Feature_HasV8Bit},
12740 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12742 {Feature_HasV8_1MMainlineBit},
12743 {ARM::HasMVEFloatOps}},
12744 {ARM::AEK_FP,
12745 {Feature_HasV8Bit},
12746 {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12748 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12749 {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12750 {ARM::AEK_MP,
12751 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12752 {ARM::FeatureMP}},
12754 {Feature_HasV8Bit},
12755 {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12756 {ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12757 // FIXME: Only available in A-class, isel not predicated
12758 {ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12760 {Feature_HasV8_2aBit},
12761 {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12762 {ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12763 {ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12764 {ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12765 // FIXME: Unsupported extensions.
12766 {ARM::AEK_OS, {}, {}},
12767 {ARM::AEK_IWMMXT, {}, {}},
12768 {ARM::AEK_IWMMXT2, {}, {}},
12769 {ARM::AEK_MAVERICK, {}, {}},
12770 {ARM::AEK_XSCALE, {}, {}},
12771 };
12772 bool EnableFeature = !Name.consume_front_insensitive("no");
12774 if (FeatureKind == ARM::AEK_INVALID)
12775 return Error(ExtLoc, "unknown architectural extension: " + Name);
12776
12777 for (const auto &Extension : Extensions) {
12778 if (Extension.Kind != FeatureKind)
12779 continue;
12780
12781 if (Extension.Features.none())
12782 return Error(ExtLoc, "unsupported architectural extension: " + Name);
12783
12784 if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
12785 return Error(ExtLoc, "architectural extension '" + Name +
12786 "' is not "
12787 "allowed for the current base architecture");
12788
12789 MCSubtargetInfo &STI = copySTI();
12790 if (EnableFeature) {
12792 } else {
12794 }
12795 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
12796 setAvailableFeatures(Features);
12797 return true;
12798 }
12799 return false;
12800}
12801
12802/// parseDirectiveArchExtension
12803/// ::= .arch_extension [no]feature
12804bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
12805
12806 MCAsmParser &Parser = getParser();
12807
12808 if (getLexer().isNot(AsmToken::Identifier))
12809 return Error(getLexer().getLoc(), "expected architecture extension name");
12810
12811 StringRef Name = Parser.getTok().getString();
12812 SMLoc ExtLoc = Parser.getTok().getLoc();
12813 Lex();
12814
12815 if (parseEOL())
12816 return true;
12817
12818 if (Name == "nocrypto") {
12819 enableArchExtFeature("nosha2", ExtLoc);
12820 enableArchExtFeature("noaes", ExtLoc);
12821 }
12822
12823 if (enableArchExtFeature(Name, ExtLoc))
12824 return false;
12825
12826 return Error(ExtLoc, "unknown architectural extension: " + Name);
12827}
12828
12829// Define this matcher function after the auto-generated include so we
12830// have the match class enum definitions.
12831unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
12832 unsigned Kind) {
12833 ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
12834 // If the kind is a token for a literal immediate, check if our asm
12835 // operand matches. This is for InstAliases which have a fixed-value
12836 // immediate in the syntax.
12837 switch (Kind) {
12838 default: break;
12839 case MCK__HASH_0:
12840 if (Op.isImm())
12841 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12842 if (CE->getValue() == 0)
12843 return Match_Success;
12844 break;
12845 case MCK__HASH_8:
12846 if (Op.isImm())
12847 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12848 if (CE->getValue() == 8)
12849 return Match_Success;
12850 break;
12851 case MCK__HASH_16:
12852 if (Op.isImm())
12853 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12854 if (CE->getValue() == 16)
12855 return Match_Success;
12856 break;
12857 case MCK_ModImm:
12858 if (Op.isImm()) {
12859 const MCExpr *SOExpr = Op.getImm();
12860 int64_t Value;
12861 if (!SOExpr->evaluateAsAbsolute(Value))
12862 return Match_Success;
12863 assert((Value >= std::numeric_limits<int32_t>::min() &&
12864 Value <= std::numeric_limits<uint32_t>::max()) &&
12865 "expression value must be representable in 32 bits");
12866 }
12867 break;
12868 case MCK_rGPR:
12869 if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
12870 return Match_Success;
12871 return Match_rGPR;
12872 case MCK_GPRPair:
12873 if (Op.isReg() &&
12874 MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
12875 return Match_Success;
12876 break;
12877 }
12878 return Match_InvalidOperand;
12879}
12880
12881bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
12882 StringRef ExtraToken) {
12883 if (!hasMVE())
12884 return false;
12885
12886 if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
12887 (Mnemonic.starts_with("vldrh") && Mnemonic != "vldrhi") ||
12888 (Mnemonic.starts_with("vmov") &&
12889 !(ExtraToken == ".f16" || ExtraToken == ".32" || ExtraToken == ".16" ||
12890 ExtraToken == ".8")) ||
12891 (Mnemonic.starts_with("vrint") && Mnemonic != "vrintr") ||
12892 (Mnemonic.starts_with("vstrh") && Mnemonic != "vstrhi"))
12893 return true;
12894
12895 const char *predicable_prefixes[] = {
12896 "vabav", "vabd", "vabs", "vadc", "vadd",
12897 "vaddlv", "vaddv", "vand", "vbic", "vbrsr",
12898 "vcadd", "vcls", "vclz", "vcmla", "vcmp",
12899 "vcmul", "vctp", "vcvt", "vddup", "vdup",
12900 "vdwdup", "veor", "vfma", "vfmas", "vfms",
12901 "vhadd", "vhcadd", "vhsub", "vidup", "viwdup",
12902 "vldrb", "vldrd", "vldrw", "vmax", "vmaxa",
12903 "vmaxav", "vmaxnm", "vmaxnma", "vmaxnmav", "vmaxnmv",
12904 "vmaxv", "vmin", "vminav", "vminnm", "vminnmav",
12905 "vminnmv", "vminv", "vmla", "vmladav", "vmlaldav",
12906 "vmlalv", "vmlas", "vmlav", "vmlsdav", "vmlsldav",
12907 "vmovlb", "vmovlt", "vmovnb", "vmovnt", "vmul",
12908 "vmvn", "vneg", "vorn", "vorr", "vpnot",
12909 "vpsel", "vqabs", "vqadd", "vqdmladh", "vqdmlah",
12910 "vqdmlash", "vqdmlsdh", "vqdmulh", "vqdmull", "vqmovn",
12911 "vqmovun", "vqneg", "vqrdmladh", "vqrdmlah", "vqrdmlash",
12912 "vqrdmlsdh", "vqrdmulh", "vqrshl", "vqrshrn", "vqrshrun",
12913 "vqshl", "vqshrn", "vqshrun", "vqsub", "vrev16",
12914 "vrev32", "vrev64", "vrhadd", "vrmlaldavh", "vrmlalvh",
12915 "vrmlsldavh", "vrmulh", "vrshl", "vrshr", "vrshrn",
12916 "vsbc", "vshl", "vshlc", "vshll", "vshr",
12917 "vshrn", "vsli", "vsri", "vstrb", "vstrd",
12918 "vstrw", "vsub"};
12919
12920 return std::any_of(
12921 std::begin(predicable_prefixes), std::end(predicable_prefixes),
12922 [&Mnemonic](const char *prefix) { return Mnemonic.starts_with(prefix); });
12923}
unsigned const MachineRegisterInfo * MRI
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isLoad(int Opcode)
static unsigned getNextRegister(unsigned Reg)
static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing)
static bool instIsBreakpoint(const MCInst &Inst)
static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo, unsigned Reg, unsigned HiReg, bool &containsReg)
static bool isDataTypeToken(StringRef Tok)
static MCRegister MatchRegisterName(StringRef Name)
static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing)
static const char * getSubtargetFeatureName(uint64_t Val)
static bool isVectorPredicable(const MCInstrDesc &MCID)
static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp)
MatchCoprocessorOperandName - Try to parse an coprocessor related instruction with a symbolic operand...
static void applyMnemonicAliases(StringRef &Mnemonic, const FeatureBitset &Features, unsigned VariantID)
static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT)
static bool insertNoDuplicates(SmallVectorImpl< std::pair< unsigned, unsigned > > &Regs, unsigned Enc, unsigned Reg)
static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID)
static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp)
static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg)
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser()
Force static initialization.
static bool isARMMCExpr(MCParsedAsmOperand &MCOp)
static bool isThumb(const MCSubtargetInfo &STI)
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Register getFPReg(const CSKYSubtarget &STI)
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:693
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:135
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define check(cond)
#define op(i)
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
unsigned Reg
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
uint64_t High
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
static cl::opt< bool > AddBuildAttributes("riscv-add-build-attributes", cl::init(false))
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Pre allocate WWM Registers
cl::list< SPIRV::Extension::Extension > Extensions("spirv-extensions", cl::desc("SPIR-V extensions"), cl::ZeroOrMore, cl::Hidden, cl::values(clEnumValN(SPIRV::Extension::SPV_EXT_shader_atomic_float_add, "SPV_EXT_shader_atomic_float_add", "Adds atomic add instruction on floating-point numbers."), clEnumValN(SPIRV::Extension::SPV_EXT_shader_atomic_float16_add, "SPV_EXT_shader_atomic_float16_add", "Extends the SPV_EXT_shader_atomic_float_add extension to support " "atomically adding to 16-bit floating-point numbers in memory."), clEnumValN(SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max, "SPV_EXT_shader_atomic_float_min_max", "Adds atomic min and max instruction on floating-point numbers."), clEnumValN(SPIRV::Extension::SPV_INTEL_arbitrary_precision_integers, "SPV_INTEL_arbitrary_precision_integers", "Allows generating arbitrary width integer types."), clEnumValN(SPIRV::Extension::SPV_INTEL_optnone, "SPV_INTEL_optnone", "Adds OptNoneINTEL value for Function Control mask that " "indicates a request to not optimize the function."), clEnumValN(SPIRV::Extension::SPV_INTEL_usm_storage_classes, "SPV_INTEL_usm_storage_classes", "Introduces two new storage classes that are sub classes of " "the CrossWorkgroup storage class " "that provides additional information that can enable " "optimization."), clEnumValN(SPIRV::Extension::SPV_INTEL_subgroups, "SPV_INTEL_subgroups", "Allows work items in a subgroup to share data without the " "use of local memory and work group barriers, and to " "utilize specialized hardware to load and store blocks of " "data from images or buffers."), clEnumValN(SPIRV::Extension::SPV_KHR_uniform_group_instructions, "SPV_KHR_uniform_group_instructions", "Allows support for additional group operations within " "uniform control flow."), clEnumValN(SPIRV::Extension::SPV_KHR_no_integer_wrap_decoration, "SPV_KHR_no_integer_wrap_decoration", "Adds decorations to indicate that a given instruction does " "not cause integer wrapping."), clEnumValN(SPIRV::Extension::SPV_KHR_float_controls, "SPV_KHR_float_controls", "Provides new execution modes to control floating-point " "computations by overriding an implementation’s default behavior " "for rounding modes, denormals, signed zero, and infinities."), clEnumValN(SPIRV::Extension::SPV_KHR_expect_assume, "SPV_KHR_expect_assume", "Provides additional information to a compiler, similar to " "the llvm.assume and llvm.expect intrinsics."), clEnumValN(SPIRV::Extension::SPV_KHR_bit_instructions, "SPV_KHR_bit_instructions", "This enables bit instructions to be used by SPIR-V modules " "without requiring the Shader capability."), clEnumValN(SPIRV::Extension::SPV_KHR_linkonce_odr, "SPV_KHR_linkonce_odr", "Allows to use the LinkOnceODR linkage type that is to let " "a function or global variable to be merged with other functions " "or global variables of the same name when linkage occurs."), clEnumValN(SPIRV::Extension::SPV_INTEL_bfloat16_conversion, "SPV_INTEL_bfloat16_conversion", "Adds instructions to convert between single-precision " "32-bit floating-point values and 16-bit bfloat16 values."), clEnumValN(SPIRV::Extension::SPV_KHR_subgroup_rotate, "SPV_KHR_subgroup_rotate", "Adds a new instruction that enables rotating values across " "invocations within a subgroup."), clEnumValN(SPIRV::Extension::SPV_INTEL_variable_length_array, "SPV_INTEL_variable_length_array", "Allows to allocate local arrays whose number of elements " "is unknown at compile time."), clEnumValN(SPIRV::Extension::SPV_INTEL_function_pointers, "SPV_INTEL_function_pointers", "Allows translation of function pointers.")))
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file defines the SmallSet class.
This file defines the SmallVector class.
StringSet - A set-like wrapper for the StringMap.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
Class for arbitrary precision integers.
Definition: APInt.h:76
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1491
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=ARM::NoRegAltName)
VariantKind getKind() const
getOpcode - Get the kind of this expression.
Definition: ARMMCExpr.h:76
static const ARMMCExpr * create(VariantKind Kind, const MCExpr *Expr, MCContext &Ctx)
Definition: ARMMCExpr.cpp:17
Target independent representation for an assembler token.
Definition: MCAsmMacro.h:21
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
int64_t getIntVal() const
Definition: MCAsmMacro.h:115
bool isNot(TokenKind K) const
Definition: MCAsmMacro.h:83
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmMacro.h:110
StringRef getStringContents() const
Get the contents of a string token (without quotes).
Definition: MCAsmMacro.h:90
bool is(TokenKind K) const
Definition: MCAsmMacro.h:82
SMLoc getEndLoc() const
Definition: MCAsmLexer.cpp:30
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition: MCAsmMacro.h:99
This class represents an Operation in the Expression.
Base class for user error types.
Definition: Error.h:352
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Container class for subtarget features.
constexpr bool test(unsigned I) const
size_t count() const
constexpr size_t size() const
Generic assembler lexer interface, for use by target specific assembly lexers.
Definition: MCAsmLexer.h:37
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:111
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:123
bool parseToken(AsmToken::TokenKind T, const Twine &Msg="unexpected token")
Definition: MCAsmParser.cpp:63
virtual bool parseEscapedString(std::string &Data)=0
Parse the current token as a string which may include escaped characters and return the string conten...
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual void Note(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a note at the location L, with the message Msg.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:40
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
bool parseOptionalToken(AsmToken::TokenKind T)
Attempt to parse and consume token, returning true on success.
Definition: MCAsmParser.cpp:80
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual bool Warning(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a warning at the location L, with the message Msg.
bool Error(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)
Return an error at the location L, with the message Msg.
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:536
int64_t getValue() const
Definition: MCExpr.h:173
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:194
Context object for machine code objects.
Definition: MCContext.h:76
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:448
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCRegisterInfo *RegInfo=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
Definition: MCInst.cpp:81
unsigned getNumOperands() const
Definition: MCInst.h:208
void setLoc(SMLoc loc)
Definition: MCInst.h:203
unsigned getOpcode() const
Definition: MCInst.h:198
iterator insert(iterator I, const MCOperand &Op)
Definition: MCInst.h:224
void addOperand(const MCOperand Op)
Definition: MCInst.h:210
iterator begin()
Definition: MCInst.h:219
void setOpcode(unsigned Op)
Definition: MCInst.h:197
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:206
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
bool isIndirectBranch() const
Return true if this is an indirect branch, such as a branch through a register.
Definition: MCInstrDesc.h:311
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
Definition: MCInstrDesc.h:609
bool hasDefOfPhysReg(const MCInst &MI, unsigned Reg, const MCRegisterInfo &RI) const
Return true if this instruction defines the specified physical register, either explicitly or implici...
Definition: MCInstrDesc.cpp:40
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
Definition: MCInstrDesc.h:265
unsigned short NumOperands
Definition: MCInstrDesc.h:206
bool isBranch() const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MCInstrDesc.h:307
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:338
bool isCall() const
Return true if the instruction is a call.
Definition: MCInstrDesc.h:288
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
Definition: MCInstrDesc.h:301
bool isReturn() const
Return true if the instruction is a return.
Definition: MCInstrDesc.h:276
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:134
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:162
int64_t getImm() const
Definition: MCInst.h:80
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:141
bool isImm() const
Definition: MCInst.h:62
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
bool isReg() const
Definition: MCInst.h:61
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual unsigned getReg() const =0
virtual SMLoc getStartLoc() const =0
getStartLoc - Get the location of the first token of this operand.
virtual bool isReg() const =0
isReg - Is this a register operand?
virtual bool isMem() const =0
isMem - Is this a memory operand?
virtual void print(raw_ostream &OS) const =0
print - Print a debug representation of the operand to the given stream.
virtual bool isToken() const =0
isToken - Is this a token operand?
virtual bool isImm() const =0
isImm - Is this an immediate operand?
virtual SMLoc getEndLoc() const =0
getEndLoc - Get the location of the last token of this operand.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
unsigned getNumRegs() const
getNumRegs - Return the number of registers in this class.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:39
Streaming machine code generation interface.
Definition: MCStreamer.h:212
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
Definition: MCStreamer.cpp:424
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:304
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const FeatureBitset & getFeatureBits() const
FeatureBitset ApplyFeatureFlag(StringRef FS)
Apply a feature flag and return the re-computed feature bits, including all feature bits implied by t...
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:397
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:40
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual void onLabelParsed(MCSymbol *Symbol)
MCSubtargetInfo & copySTI()
Create a copy of STI and return a non-const reference to it.
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual void flushPendingInstructions(MCStreamer &Out)
Ensure that all previously parsed instructions have been emitted to the output streamer,...
void setAvailableFeatures(const FeatureBitset &Value)
virtual MCSymbolRefExpr::VariantKind getVariantKindForName(StringRef Name) const
const MCSubtargetInfo & getSTI() const
virtual void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc)
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
ParseInstruction - Parse one assembly instruction.
virtual unsigned checkTargetMatchPredicate(MCInst &Inst)
checkTargetMatchPredicate - Validate the instruction match against any complex target predicates not ...
virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
MatchAndEmitInstruction - Recognize a series of operands of a parsed instruction as an actual MCInst ...
Target specific streamer interface.
Definition: MCStreamer.h:93
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
Represents a location in source code.
Definition: SMLoc.h:23
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:36
constexpr const char * getPointer() const
Definition: SMLoc.h:34
Represents a range in source code.
Definition: SMLoc.h:48
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
typename SuperClass::const_iterator const_iterator
Definition: SmallVector.h:591
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
iterator end()
Definition: StringMap.h:221
iterator find(StringRef Key)
Definition: StringMap.h:234
size_type count(StringRef Key) const
count - Return 1 if the element is in the map, 0 otherwise.
Definition: StringMap.h:277
void erase(iterator I)
Definition: StringMap.h:415
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition: StringMap.h:307
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:567
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:680
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:811
std::string lower() const
Definition: StringRef.cpp:111
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:271
static constexpr size_t npos
Definition: StringRef.h:52
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition: StringRef.h:170
StringSet - A wrapper for StringMap that provides set-like functionality.
Definition: StringSet.h:23
std::pair< typename Base::iterator, bool > insert(StringRef key)
Definition: StringSet.h:38
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
LLVM Value Representation.
Definition: Value.h:74
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:690
This class provides various memory handling functions that manipulate MemoryBlock instances.
Definition: Memory.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const TagNameMap & getARMAttributeTags()
static CondCodes getOppositeCondition(CondCodes CC)
Definition: ARMBaseInfo.h:48
@ ThumbArithFlagSetting
Definition: ARMBaseInfo.h:414
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
unsigned encodeNEONi16splat(unsigned Value)
float getFPImmFloat(unsigned Imm)
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO, unsigned IdxMode=0)
unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset)
getAM5Opc - This function encodes the addrmode5 opc field.
ShiftOpc getSORegShOp(unsigned Op)
bool isNEONi16splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getAM5FP16Opc(AddrOpc Opc, unsigned char Offset)
getAM5FP16Opc - This function encodes the addrmode5fp16 opc field.
unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset, unsigned IdxMode=0)
getAM3Opc - This function encodes the addrmode3 opc field.
bool isNEONi32splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
const char * getShiftOpcStr(ShiftOpc Op)
unsigned encodeNEONi32splat(unsigned Value)
Encode NEON 32 bits Splat immediate for instructions like VBIC/VORR.
static const char * IFlagsToString(unsigned val)
Definition: ARMBaseInfo.h:37
bool getFPUFeatures(FPUKind FPUKind, std::vector< StringRef > &Features)
StringRef getArchName(ArchKind AK)
uint64_t parseArchExt(StringRef ArchExt)
ArchKind parseArch(StringRef Arch)
bool isVpred(OperandType op)
FPUKind parseFPU(StringRef FPU)
bool isCDECoproc(size_t Coproc, const MCSubtargetInfo &STI)
@ D16
Only 16 D registers.
constexpr bool any(E Val)
Definition: BitmaskEnum.h:141
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
std::optional< unsigned > attrTypeFromString(StringRef tag, TagNameMap tagNameMap)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
bool parseAssignmentExpression(StringRef Name, bool allow_redef, MCAsmParser &Parser, MCSymbol *&Symbol, const MCExpr *&Value)
Parse a value expression and return whether it can be assigned to a symbol with the given name.
Definition: AsmParser.cpp:6404
@ CE
Windows NT (Windows on ARM)
Reg
All possible values of the reg field in the ModR/M byte.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:718
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
Format
The format used for serializing/deserializing remarks.
Definition: RemarkFormat.h:25
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
static const char * ARMVPTPredToString(ARMVCC::VPTCodes CC)
Definition: ARMBaseInfo.h:130
@ Offset
Definition: DWP.cpp:456
@ Length
Definition: DWP.cpp:456
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:385
Target & getTheThumbBETarget()
static unsigned ARMCondCodeFromString(StringRef CC)
Definition: ARMBaseInfo.h:167
const ARMInstrTable ARMDescs
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
static bool isARMLowRegister(unsigned Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
Definition: ARMBaseInfo.h:160
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
Definition: STLExtras.h:1911
bool IsCPSRDead< MCInst >(const MCInst *Instr)
static bool isValidCoprocessorNumber(unsigned Num, const FeatureBitset &featureBits)
isValidCoprocessorNumber - decide whether an explicit coprocessor number is legal in generic instruct...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ MCAF_Code16
.code16 (X86) / .code 16 (ARM)
Definition: MCDirectives.h:56
@ MCAF_Code32
.code32 (X86) / .code 32 (ARM)
Definition: MCDirectives.h:57
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1758
static unsigned ARMVectorCondCodeFromString(StringRef CC)
Definition: ARMBaseInfo.h:139
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
Definition: ARMBaseInfo.h:146
Target & getTheARMLETarget()
Target & getTheARMBETarget()
Target & getTheThumbLETarget()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
const FeatureBitset Features
MCInstrDesc Insts[4445]
MCOperandInfo OperandInfo[3026]
MCPhysReg ImplicitOps[130]
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Holds functions to get, set or test bitfields.
Definition: Bitfields.h:212
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...