LLVM 19.0.0git
ARMAsmParser.cpp
Go to the documentation of this file.
1//===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "ARMBaseInstrInfo.h"
10#include "ARMFeatures.h"
17#include "Utils/ARMBaseInfo.h"
18#include "llvm/ADT/APFloat.h"
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/StringSet.h"
28#include "llvm/ADT/Twine.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
32#include "llvm/MC/MCInstrDesc.h"
33#include "llvm/MC/MCInstrInfo.h"
41#include "llvm/MC/MCSection.h"
42#include "llvm/MC/MCStreamer.h"
44#include "llvm/MC/MCSymbol.h"
51#include "llvm/Support/Debug.h"
54#include "llvm/Support/SMLoc.h"
59#include <algorithm>
60#include <cassert>
61#include <cstddef>
62#include <cstdint>
63#include <iterator>
64#include <limits>
65#include <memory>
66#include <optional>
67#include <string>
68#include <utility>
69#include <vector>
70
71#define DEBUG_TYPE "asm-parser"
72
73using namespace llvm;
74
75namespace llvm {
80};
81extern const ARMInstrTable ARMDescs;
82} // end namespace llvm
83
84namespace {
85class ARMOperand;
86
87enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
88
89static cl::opt<ImplicitItModeTy> ImplicitItMode(
90 "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
91 cl::desc("Allow conditional instructions outdside of an IT block"),
92 cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
93 "Accept in both ISAs, emit implicit ITs in Thumb"),
94 clEnumValN(ImplicitItModeTy::Never, "never",
95 "Warn in ARM, reject in Thumb"),
96 clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
97 "Accept in ARM, reject in Thumb"),
98 clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
99 "Warn in ARM, emit implicit ITs in Thumb")));
100
101static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
102 cl::init(false));
103
104enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
105
106static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
107 // Position==0 means we're not in an IT block at all. Position==1
108 // means we want the first state bit, which is always 0 (Then).
109 // Position==2 means we want the second state bit, stored at bit 3
110 // of Mask, and so on downwards. So (5 - Position) will shift the
111 // right bit down to bit 0, including the always-0 bit at bit 4 for
112 // the mandatory initial Then.
113 return (Mask >> (5 - Position) & 1);
114}
115
116class UnwindContext {
117 using Locs = SmallVector<SMLoc, 4>;
118
119 MCAsmParser &Parser;
120 Locs FnStartLocs;
121 Locs CantUnwindLocs;
122 Locs PersonalityLocs;
123 Locs PersonalityIndexLocs;
124 Locs HandlerDataLocs;
125 int FPReg;
126
127public:
128 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
129
130 bool hasFnStart() const { return !FnStartLocs.empty(); }
131 bool cantUnwind() const { return !CantUnwindLocs.empty(); }
132 bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
133
134 bool hasPersonality() const {
135 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
136 }
137
138 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
139 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
140 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
141 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
142 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
143
144 void saveFPReg(int Reg) { FPReg = Reg; }
145 int getFPReg() const { return FPReg; }
146
147 void emitFnStartLocNotes() const {
148 for (const SMLoc &Loc : FnStartLocs)
149 Parser.Note(Loc, ".fnstart was specified here");
150 }
151
152 void emitCantUnwindLocNotes() const {
153 for (const SMLoc &Loc : CantUnwindLocs)
154 Parser.Note(Loc, ".cantunwind was specified here");
155 }
156
157 void emitHandlerDataLocNotes() const {
158 for (const SMLoc &Loc : HandlerDataLocs)
159 Parser.Note(Loc, ".handlerdata was specified here");
160 }
161
162 void emitPersonalityLocNotes() const {
163 for (Locs::const_iterator PI = PersonalityLocs.begin(),
164 PE = PersonalityLocs.end(),
165 PII = PersonalityIndexLocs.begin(),
166 PIE = PersonalityIndexLocs.end();
167 PI != PE || PII != PIE;) {
168 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
169 Parser.Note(*PI++, ".personality was specified here");
170 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
171 Parser.Note(*PII++, ".personalityindex was specified here");
172 else
173 llvm_unreachable(".personality and .personalityindex cannot be "
174 "at the same location");
175 }
176 }
177
178 void reset() {
179 FnStartLocs = Locs();
180 CantUnwindLocs = Locs();
181 PersonalityLocs = Locs();
182 HandlerDataLocs = Locs();
183 PersonalityIndexLocs = Locs();
184 FPReg = ARM::SP;
185 }
186};
187
188// Various sets of ARM instruction mnemonics which are used by the asm parser
189class ARMMnemonicSets {
190 StringSet<> CDE;
191 StringSet<> CDEWithVPTSuffix;
192public:
193 ARMMnemonicSets(const MCSubtargetInfo &STI);
194
195 /// Returns true iff a given mnemonic is a CDE instruction
196 bool isCDEInstr(StringRef Mnemonic) {
197 // Quick check before searching the set
198 if (!Mnemonic.starts_with("cx") && !Mnemonic.starts_with("vcx"))
199 return false;
200 return CDE.count(Mnemonic);
201 }
202
203 /// Returns true iff a given mnemonic is a VPT-predicable CDE instruction
204 /// (possibly with a predication suffix "e" or "t")
205 bool isVPTPredicableCDEInstr(StringRef Mnemonic) {
206 if (!Mnemonic.starts_with("vcx"))
207 return false;
208 return CDEWithVPTSuffix.count(Mnemonic);
209 }
210
211 /// Returns true iff a given mnemonic is an IT-predicable CDE instruction
212 /// (possibly with a condition suffix)
213 bool isITPredicableCDEInstr(StringRef Mnemonic) {
214 if (!Mnemonic.starts_with("cx"))
215 return false;
216 return Mnemonic.starts_with("cx1a") || Mnemonic.starts_with("cx1da") ||
217 Mnemonic.starts_with("cx2a") || Mnemonic.starts_with("cx2da") ||
218 Mnemonic.starts_with("cx3a") || Mnemonic.starts_with("cx3da");
219 }
220
221 /// Return true iff a given mnemonic is an integer CDE instruction with
222 /// dual-register destination
223 bool isCDEDualRegInstr(StringRef Mnemonic) {
224 if (!Mnemonic.starts_with("cx"))
225 return false;
226 return Mnemonic == "cx1d" || Mnemonic == "cx1da" ||
227 Mnemonic == "cx2d" || Mnemonic == "cx2da" ||
228 Mnemonic == "cx3d" || Mnemonic == "cx3da";
229 }
230};
231
232ARMMnemonicSets::ARMMnemonicSets(const MCSubtargetInfo &STI) {
233 for (StringRef Mnemonic: { "cx1", "cx1a", "cx1d", "cx1da",
234 "cx2", "cx2a", "cx2d", "cx2da",
235 "cx3", "cx3a", "cx3d", "cx3da", })
236 CDE.insert(Mnemonic);
237 for (StringRef Mnemonic :
238 {"vcx1", "vcx1a", "vcx2", "vcx2a", "vcx3", "vcx3a"}) {
239 CDE.insert(Mnemonic);
240 CDEWithVPTSuffix.insert(Mnemonic);
241 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "t");
242 CDEWithVPTSuffix.insert(std::string(Mnemonic) + "e");
243 }
244}
245
246class ARMAsmParser : public MCTargetAsmParser {
247 const MCRegisterInfo *MRI;
248 UnwindContext UC;
249 ARMMnemonicSets MS;
250
251 ARMTargetStreamer &getTargetStreamer() {
252 assert(getParser().getStreamer().getTargetStreamer() &&
253 "do not have a target streamer");
255 return static_cast<ARMTargetStreamer &>(TS);
256 }
257
258 // Map of register aliases registers via the .req directive.
259 StringMap<unsigned> RegisterReqs;
260
261 bool NextSymbolIsThumb;
262
263 bool useImplicitITThumb() const {
264 return ImplicitItMode == ImplicitItModeTy::Always ||
265 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
266 }
267
268 bool useImplicitITARM() const {
269 return ImplicitItMode == ImplicitItModeTy::Always ||
270 ImplicitItMode == ImplicitItModeTy::ARMOnly;
271 }
272
273 struct {
274 ARMCC::CondCodes Cond; // Condition for IT block.
275 unsigned Mask:4; // Condition mask for instructions.
276 // Starting at first 1 (from lsb).
277 // '1' condition as indicated in IT.
278 // '0' inverse of condition (else).
279 // Count of instructions in IT block is
280 // 4 - trailingzeroes(mask)
281 // Note that this does not have the same encoding
282 // as in the IT instruction, which also depends
283 // on the low bit of the condition code.
284
285 unsigned CurPosition; // Current position in parsing of IT
286 // block. In range [0,4], with 0 being the IT
287 // instruction itself. Initialized according to
288 // count of instructions in block. ~0U if no
289 // active IT block.
290
291 bool IsExplicit; // true - The IT instruction was present in the
292 // input, we should not modify it.
293 // false - The IT instruction was added
294 // implicitly, we can extend it if that
295 // would be legal.
296 } ITState;
297
298 SmallVector<MCInst, 4> PendingConditionalInsts;
299
300 void flushPendingInstructions(MCStreamer &Out) override {
301 if (!inImplicitITBlock()) {
302 assert(PendingConditionalInsts.size() == 0);
303 return;
304 }
305
306 // Emit the IT instruction
307 MCInst ITInst;
308 ITInst.setOpcode(ARM::t2IT);
309 ITInst.addOperand(MCOperand::createImm(ITState.Cond));
310 ITInst.addOperand(MCOperand::createImm(ITState.Mask));
311 Out.emitInstruction(ITInst, getSTI());
312
313 // Emit the conditional instructions
314 assert(PendingConditionalInsts.size() <= 4);
315 for (const MCInst &Inst : PendingConditionalInsts) {
316 Out.emitInstruction(Inst, getSTI());
317 }
318 PendingConditionalInsts.clear();
319
320 // Clear the IT state
321 ITState.Mask = 0;
322 ITState.CurPosition = ~0U;
323 }
324
325 bool inITBlock() { return ITState.CurPosition != ~0U; }
326 bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
327 bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
328
329 bool lastInITBlock() {
330 return ITState.CurPosition == 4 - (unsigned)llvm::countr_zero(ITState.Mask);
331 }
332
333 void forwardITPosition() {
334 if (!inITBlock()) return;
335 // Move to the next instruction in the IT block, if there is one. If not,
336 // mark the block as done, except for implicit IT blocks, which we leave
337 // open until we find an instruction that can't be added to it.
338 unsigned TZ = llvm::countr_zero(ITState.Mask);
339 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
340 ITState.CurPosition = ~0U; // Done with the IT block after this.
341 }
342
343 // Rewind the state of the current IT block, removing the last slot from it.
344 void rewindImplicitITPosition() {
345 assert(inImplicitITBlock());
346 assert(ITState.CurPosition > 1);
347 ITState.CurPosition--;
348 unsigned TZ = llvm::countr_zero(ITState.Mask);
349 unsigned NewMask = 0;
350 NewMask |= ITState.Mask & (0xC << TZ);
351 NewMask |= 0x2 << TZ;
352 ITState.Mask = NewMask;
353 }
354
355 // Rewind the state of the current IT block, removing the last slot from it.
356 // If we were at the first slot, this closes the IT block.
357 void discardImplicitITBlock() {
358 assert(inImplicitITBlock());
359 assert(ITState.CurPosition == 1);
360 ITState.CurPosition = ~0U;
361 }
362
363 // Return the low-subreg of a given Q register.
364 unsigned getDRegFromQReg(unsigned QReg) const {
365 return MRI->getSubReg(QReg, ARM::dsub_0);
366 }
367
368 // Get the condition code corresponding to the current IT block slot.
369 ARMCC::CondCodes currentITCond() {
370 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
371 return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
372 }
373
374 // Invert the condition of the current IT block slot without changing any
375 // other slots in the same block.
376 void invertCurrentITCondition() {
377 if (ITState.CurPosition == 1) {
378 ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
379 } else {
380 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
381 }
382 }
383
384 // Returns true if the current IT block is full (all 4 slots used).
385 bool isITBlockFull() {
386 return inITBlock() && (ITState.Mask & 1);
387 }
388
389 // Extend the current implicit IT block to have one more slot with the given
390 // condition code.
391 void extendImplicitITBlock(ARMCC::CondCodes Cond) {
392 assert(inImplicitITBlock());
393 assert(!isITBlockFull());
394 assert(Cond == ITState.Cond ||
395 Cond == ARMCC::getOppositeCondition(ITState.Cond));
396 unsigned TZ = llvm::countr_zero(ITState.Mask);
397 unsigned NewMask = 0;
398 // Keep any existing condition bits.
399 NewMask |= ITState.Mask & (0xE << TZ);
400 // Insert the new condition bit.
401 NewMask |= (Cond != ITState.Cond) << TZ;
402 // Move the trailing 1 down one bit.
403 NewMask |= 1 << (TZ - 1);
404 ITState.Mask = NewMask;
405 }
406
407 // Create a new implicit IT block with a dummy condition code.
408 void startImplicitITBlock() {
409 assert(!inITBlock());
410 ITState.Cond = ARMCC::AL;
411 ITState.Mask = 8;
412 ITState.CurPosition = 1;
413 ITState.IsExplicit = false;
414 }
415
416 // Create a new explicit IT block with the given condition and mask.
417 // The mask should be in the format used in ARMOperand and
418 // MCOperand, with a 1 implying 'e', regardless of the low bit of
419 // the condition.
420 void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
421 assert(!inITBlock());
422 ITState.Cond = Cond;
423 ITState.Mask = Mask;
424 ITState.CurPosition = 0;
425 ITState.IsExplicit = true;
426 }
427
428 struct {
429 unsigned Mask : 4;
430 unsigned CurPosition;
431 } VPTState;
432 bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
433 void forwardVPTPosition() {
434 if (!inVPTBlock()) return;
435 unsigned TZ = llvm::countr_zero(VPTState.Mask);
436 if (++VPTState.CurPosition == 5 - TZ)
437 VPTState.CurPosition = ~0U;
438 }
439
440 void Note(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
441 return getParser().Note(L, Msg, Range);
442 }
443
444 bool Warning(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
445 return getParser().Warning(L, Msg, Range);
446 }
447
448 bool Error(SMLoc L, const Twine &Msg, SMRange Range = std::nullopt) {
449 return getParser().Error(L, Msg, Range);
450 }
451
452 bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
453 unsigned MnemonicOpsEndInd, unsigned ListIndex,
454 bool IsARPop = false);
455 bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
456 unsigned MnemonicOpsEndInd, unsigned ListIndex);
457
458 int tryParseRegister(bool AllowOutofBoundReg = false);
459 bool tryParseRegisterWithWriteBack(OperandVector &);
460 int tryParseShiftRegister(OperandVector &);
461 std::optional<ARM_AM::ShiftOpc> tryParseShiftToken();
462 bool parseRegisterList(OperandVector &, bool EnforceOrder = true,
463 bool AllowRAAC = false,
464 bool AllowOutOfBoundReg = false);
465 bool parseMemory(OperandVector &);
466 bool parseOperand(OperandVector &, StringRef Mnemonic);
467 bool parseImmExpr(int64_t &Out);
468 bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
469 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
470 unsigned &ShiftAmount);
471 bool parseLiteralValues(unsigned Size, SMLoc L);
472 bool parseDirectiveThumb(SMLoc L);
473 bool parseDirectiveARM(SMLoc L);
474 bool parseDirectiveThumbFunc(SMLoc L);
475 bool parseDirectiveCode(SMLoc L);
476 bool parseDirectiveSyntax(SMLoc L);
477 bool parseDirectiveReq(StringRef Name, SMLoc L);
478 bool parseDirectiveUnreq(SMLoc L);
479 bool parseDirectiveArch(SMLoc L);
480 bool parseDirectiveEabiAttr(SMLoc L);
481 bool parseDirectiveCPU(SMLoc L);
482 bool parseDirectiveFPU(SMLoc L);
483 bool parseDirectiveFnStart(SMLoc L);
484 bool parseDirectiveFnEnd(SMLoc L);
485 bool parseDirectiveCantUnwind(SMLoc L);
486 bool parseDirectivePersonality(SMLoc L);
487 bool parseDirectiveHandlerData(SMLoc L);
488 bool parseDirectiveSetFP(SMLoc L);
489 bool parseDirectivePad(SMLoc L);
490 bool parseDirectiveRegSave(SMLoc L, bool IsVector);
491 bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
492 bool parseDirectiveLtorg(SMLoc L);
493 bool parseDirectiveEven(SMLoc L);
494 bool parseDirectivePersonalityIndex(SMLoc L);
495 bool parseDirectiveUnwindRaw(SMLoc L);
496 bool parseDirectiveTLSDescSeq(SMLoc L);
497 bool parseDirectiveMovSP(SMLoc L);
498 bool parseDirectiveObjectArch(SMLoc L);
499 bool parseDirectiveArchExtension(SMLoc L);
500 bool parseDirectiveAlign(SMLoc L);
501 bool parseDirectiveThumbSet(SMLoc L);
502
503 bool parseDirectiveSEHAllocStack(SMLoc L, bool Wide);
504 bool parseDirectiveSEHSaveRegs(SMLoc L, bool Wide);
505 bool parseDirectiveSEHSaveSP(SMLoc L);
506 bool parseDirectiveSEHSaveFRegs(SMLoc L);
507 bool parseDirectiveSEHSaveLR(SMLoc L);
508 bool parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment);
509 bool parseDirectiveSEHNop(SMLoc L, bool Wide);
510 bool parseDirectiveSEHEpilogStart(SMLoc L, bool Condition);
511 bool parseDirectiveSEHEpilogEnd(SMLoc L);
512 bool parseDirectiveSEHCustom(SMLoc L);
513
514 std::unique_ptr<ARMOperand> defaultCondCodeOp();
515 std::unique_ptr<ARMOperand> defaultCCOutOp();
516 std::unique_ptr<ARMOperand> defaultVPTPredOp();
517
518 bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
519 StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
520 ARMCC::CondCodes &PredicationCode,
521 ARMVCC::VPTCodes &VPTPredicationCode,
522 bool &CarrySetting, unsigned &ProcessorIMod,
523 StringRef &ITMask);
524 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
525 StringRef FullInst, bool &CanAcceptCarrySet,
526 bool &CanAcceptPredicationCode,
527 bool &CanAcceptVPTPredicationCode);
528 bool enableArchExtFeature(StringRef Name, SMLoc &ExtLoc);
529
530 void tryConvertingToTwoOperandForm(StringRef Mnemonic,
531 ARMCC::CondCodes PredicationCode,
532 bool CarrySetting, OperandVector &Operands,
533 unsigned MnemonicOpsEndInd);
534
535 bool CDEConvertDualRegOperand(StringRef Mnemonic, OperandVector &Operands,
536 unsigned MnemonicOpsEndInd);
537
538 bool isThumb() const {
539 // FIXME: Can tablegen auto-generate this?
540 return getSTI().hasFeature(ARM::ModeThumb);
541 }
542
543 bool isThumbOne() const {
544 return isThumb() && !getSTI().hasFeature(ARM::FeatureThumb2);
545 }
546
547 bool isThumbTwo() const {
548 return isThumb() && getSTI().hasFeature(ARM::FeatureThumb2);
549 }
550
551 bool hasThumb() const {
552 return getSTI().hasFeature(ARM::HasV4TOps);
553 }
554
555 bool hasThumb2() const {
556 return getSTI().hasFeature(ARM::FeatureThumb2);
557 }
558
559 bool hasV6Ops() const {
560 return getSTI().hasFeature(ARM::HasV6Ops);
561 }
562
563 bool hasV6T2Ops() const {
564 return getSTI().hasFeature(ARM::HasV6T2Ops);
565 }
566
567 bool hasV6MOps() const {
568 return getSTI().hasFeature(ARM::HasV6MOps);
569 }
570
571 bool hasV7Ops() const {
572 return getSTI().hasFeature(ARM::HasV7Ops);
573 }
574
575 bool hasV8Ops() const {
576 return getSTI().hasFeature(ARM::HasV8Ops);
577 }
578
579 bool hasV8MBaseline() const {
580 return getSTI().hasFeature(ARM::HasV8MBaselineOps);
581 }
582
583 bool hasV8MMainline() const {
584 return getSTI().hasFeature(ARM::HasV8MMainlineOps);
585 }
586 bool hasV8_1MMainline() const {
587 return getSTI().hasFeature(ARM::HasV8_1MMainlineOps);
588 }
589 bool hasMVE() const {
590 return getSTI().hasFeature(ARM::HasMVEIntegerOps);
591 }
592 bool hasMVEFloat() const {
593 return getSTI().hasFeature(ARM::HasMVEFloatOps);
594 }
595 bool hasCDE() const {
596 return getSTI().hasFeature(ARM::HasCDEOps);
597 }
598 bool has8MSecExt() const {
599 return getSTI().hasFeature(ARM::Feature8MSecExt);
600 }
601
602 bool hasARM() const {
603 return !getSTI().hasFeature(ARM::FeatureNoARM);
604 }
605
606 bool hasDSP() const {
607 return getSTI().hasFeature(ARM::FeatureDSP);
608 }
609
610 bool hasD32() const {
611 return getSTI().hasFeature(ARM::FeatureD32);
612 }
613
614 bool hasV8_1aOps() const {
615 return getSTI().hasFeature(ARM::HasV8_1aOps);
616 }
617
618 bool hasRAS() const {
619 return getSTI().hasFeature(ARM::FeatureRAS);
620 }
621
622 void SwitchMode() {
623 MCSubtargetInfo &STI = copySTI();
624 auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
626 }
627
628 void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
629
630 bool isMClass() const {
631 return getSTI().hasFeature(ARM::FeatureMClass);
632 }
633
634 /// @name Auto-generated Match Functions
635 /// {
636
637#define GET_ASSEMBLER_HEADER
638#include "ARMGenAsmMatcher.inc"
639
640 /// }
641
642 ParseStatus parseITCondCode(OperandVector &);
643 ParseStatus parseCoprocNumOperand(OperandVector &);
644 ParseStatus parseCoprocRegOperand(OperandVector &);
645 ParseStatus parseCoprocOptionOperand(OperandVector &);
646 ParseStatus parseMemBarrierOptOperand(OperandVector &);
647 ParseStatus parseTraceSyncBarrierOptOperand(OperandVector &);
648 ParseStatus parseInstSyncBarrierOptOperand(OperandVector &);
649 ParseStatus parseProcIFlagsOperand(OperandVector &);
650 ParseStatus parseMSRMaskOperand(OperandVector &);
651 ParseStatus parseBankedRegOperand(OperandVector &);
652 ParseStatus parsePKHImm(OperandVector &O, ARM_AM::ShiftOpc, int Low,
653 int High);
654 ParseStatus parsePKHLSLImm(OperandVector &O) {
655 return parsePKHImm(O, ARM_AM::lsl, 0, 31);
656 }
657 ParseStatus parsePKHASRImm(OperandVector &O) {
658 return parsePKHImm(O, ARM_AM::asr, 1, 32);
659 }
660 ParseStatus parseSetEndImm(OperandVector &);
661 ParseStatus parseShifterImm(OperandVector &);
662 ParseStatus parseRotImm(OperandVector &);
663 ParseStatus parseModImm(OperandVector &);
664 ParseStatus parseBitfield(OperandVector &);
665 ParseStatus parsePostIdxReg(OperandVector &);
666 ParseStatus parseAM3Offset(OperandVector &);
667 ParseStatus parseFPImm(OperandVector &);
668 ParseStatus parseVectorList(OperandVector &);
669 ParseStatus parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
670 SMLoc &EndLoc);
671
672 // Asm Match Converter Methods
673 void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
674 void cvtThumbBranches(MCInst &Inst, const OperandVector &);
675 void cvtMVEVMOVQtoDReg(MCInst &Inst, const OperandVector &);
676
677 bool validateInstruction(MCInst &Inst, const OperandVector &Ops,
678 unsigned MnemonicOpsEndInd);
679 bool processInstruction(MCInst &Inst, const OperandVector &Ops,
680 unsigned MnemonicOpsEndInd, MCStreamer &Out);
681 bool shouldOmitVectorPredicateOperand(StringRef Mnemonic,
683 unsigned MnemonicOpsEndInd);
684 bool isITBlockTerminator(MCInst &Inst) const;
685
686 void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands,
687 unsigned MnemonicOpsEndInd);
688 bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands, bool Load,
689 bool ARMMode, bool Writeback,
690 unsigned MnemonicOpsEndInd);
691
692public:
693 enum ARMMatchResultTy {
694 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
695 Match_RequiresNotITBlock,
696 Match_RequiresV6,
697 Match_RequiresThumb2,
698 Match_RequiresV8,
699 Match_RequiresFlagSetting,
700#define GET_OPERAND_DIAGNOSTIC_TYPES
701#include "ARMGenAsmMatcher.inc"
702
703 };
704
705 ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
706 const MCInstrInfo &MII, const MCTargetOptions &Options)
707 : MCTargetAsmParser(Options, STI, MII), UC(Parser), MS(STI) {
709
710 // Cache the MCRegisterInfo.
712
713 // Initialize the set of available features.
714 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
715
716 // Add build attributes based on the selected target.
718 getTargetStreamer().emitTargetAttributes(STI);
719
720 // Not in an ITBlock to start with.
721 ITState.CurPosition = ~0U;
722
723 VPTState.CurPosition = ~0U;
724
725 NextSymbolIsThumb = false;
726 }
727
728 // Implementation of the MCTargetAsmParser interface:
729 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
731 SMLoc &EndLoc) override;
733 SMLoc NameLoc, OperandVector &Operands) override;
734 bool ParseDirective(AsmToken DirectiveID) override;
735
737 unsigned Kind) override;
738 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
739 unsigned
741 const OperandVector &Operands) override;
742
743 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
746 bool MatchingInlineAsm) override;
747 unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
749 bool MatchingInlineAsm, bool &EmitInITBlock,
750 MCStreamer &Out);
751
752 struct NearMissMessage {
753 SMLoc Loc;
754 SmallString<128> Message;
755 };
756
757 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
758
759 void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
762 void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
764
766 getVariantKindForName(StringRef Name) const override;
767
768 void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) override;
769
770 void onLabelParsed(MCSymbol *Symbol) override;
771};
772
773/// ARMOperand - Instances of this class represent a parsed ARM machine
774/// operand.
775class ARMOperand : public MCParsedAsmOperand {
776 enum KindTy {
777 k_CondCode,
778 k_VPTPred,
779 k_CCOut,
780 k_ITCondMask,
781 k_CoprocNum,
782 k_CoprocReg,
783 k_CoprocOption,
784 k_Immediate,
785 k_MemBarrierOpt,
786 k_InstSyncBarrierOpt,
787 k_TraceSyncBarrierOpt,
788 k_Memory,
789 k_PostIndexRegister,
790 k_MSRMask,
791 k_BankedReg,
792 k_ProcIFlags,
793 k_VectorIndex,
794 k_Register,
795 k_RegisterList,
796 k_RegisterListWithAPSR,
797 k_DPRRegisterList,
798 k_SPRRegisterList,
799 k_FPSRegisterListWithVPR,
800 k_FPDRegisterListWithVPR,
801 k_VectorList,
802 k_VectorListAllLanes,
803 k_VectorListIndexed,
804 k_ShiftedRegister,
805 k_ShiftedImmediate,
806 k_ShifterImmediate,
807 k_RotateImmediate,
808 k_ModifiedImmediate,
809 k_ConstantPoolImmediate,
810 k_BitfieldDescriptor,
811 k_Token,
812 } Kind;
813
814 SMLoc StartLoc, EndLoc, AlignmentLoc;
816
817 struct CCOp {
819 };
820
821 struct VCCOp {
823 };
824
825 struct CopOp {
826 unsigned Val;
827 };
828
829 struct CoprocOptionOp {
830 unsigned Val;
831 };
832
833 struct ITMaskOp {
834 unsigned Mask:4;
835 };
836
837 struct MBOptOp {
838 ARM_MB::MemBOpt Val;
839 };
840
841 struct ISBOptOp {
843 };
844
845 struct TSBOptOp {
847 };
848
849 struct IFlagsOp {
851 };
852
853 struct MMaskOp {
854 unsigned Val;
855 };
856
857 struct BankedRegOp {
858 unsigned Val;
859 };
860
861 struct TokOp {
862 const char *Data;
863 unsigned Length;
864 };
865
866 struct RegOp {
867 unsigned RegNum;
868 };
869
870 // A vector register list is a sequential list of 1 to 4 registers.
871 struct VectorListOp {
872 unsigned RegNum;
873 unsigned Count;
874 unsigned LaneIndex;
875 bool isDoubleSpaced;
876 };
877
878 struct VectorIndexOp {
879 unsigned Val;
880 };
881
882 struct ImmOp {
883 const MCExpr *Val;
884 };
885
886 /// Combined record for all forms of ARM address expressions.
887 struct MemoryOp {
888 unsigned BaseRegNum;
889 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
890 // was specified.
891 const MCExpr *OffsetImm; // Offset immediate value
892 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL
893 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
894 unsigned ShiftImm; // shift for OffsetReg.
895 unsigned Alignment; // 0 = no alignment specified
896 // n = alignment in bytes (2, 4, 8, 16, or 32)
897 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit)
898 };
899
900 struct PostIdxRegOp {
901 unsigned RegNum;
902 bool isAdd;
903 ARM_AM::ShiftOpc ShiftTy;
904 unsigned ShiftImm;
905 };
906
907 struct ShifterImmOp {
908 bool isASR;
909 unsigned Imm;
910 };
911
912 struct RegShiftedRegOp {
913 ARM_AM::ShiftOpc ShiftTy;
914 unsigned SrcReg;
915 unsigned ShiftReg;
916 unsigned ShiftImm;
917 };
918
919 struct RegShiftedImmOp {
920 ARM_AM::ShiftOpc ShiftTy;
921 unsigned SrcReg;
922 unsigned ShiftImm;
923 };
924
925 struct RotImmOp {
926 unsigned Imm;
927 };
928
929 struct ModImmOp {
930 unsigned Bits;
931 unsigned Rot;
932 };
933
934 struct BitfieldOp {
935 unsigned LSB;
936 unsigned Width;
937 };
938
939 union {
940 struct CCOp CC;
941 struct VCCOp VCC;
942 struct CopOp Cop;
943 struct CoprocOptionOp CoprocOption;
944 struct MBOptOp MBOpt;
945 struct ISBOptOp ISBOpt;
946 struct TSBOptOp TSBOpt;
947 struct ITMaskOp ITMask;
948 struct IFlagsOp IFlags;
949 struct MMaskOp MMask;
950 struct BankedRegOp BankedReg;
951 struct TokOp Tok;
952 struct RegOp Reg;
953 struct VectorListOp VectorList;
954 struct VectorIndexOp VectorIndex;
955 struct ImmOp Imm;
956 struct MemoryOp Memory;
957 struct PostIdxRegOp PostIdxReg;
958 struct ShifterImmOp ShifterImm;
959 struct RegShiftedRegOp RegShiftedReg;
960 struct RegShiftedImmOp RegShiftedImm;
961 struct RotImmOp RotImm;
962 struct ModImmOp ModImm;
963 struct BitfieldOp Bitfield;
964 };
965
966public:
967 ARMOperand(KindTy K) : Kind(K) {}
968
969 /// getStartLoc - Get the location of the first token of this operand.
970 SMLoc getStartLoc() const override { return StartLoc; }
971
972 /// getEndLoc - Get the location of the last token of this operand.
973 SMLoc getEndLoc() const override { return EndLoc; }
974
975 /// getLocRange - Get the range between the first and last token of this
976 /// operand.
977 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
978
979 /// getAlignmentLoc - Get the location of the Alignment token of this operand.
980 SMLoc getAlignmentLoc() const {
981 assert(Kind == k_Memory && "Invalid access!");
982 return AlignmentLoc;
983 }
984
986 assert(Kind == k_CondCode && "Invalid access!");
987 return CC.Val;
988 }
989
990 ARMVCC::VPTCodes getVPTPred() const {
991 assert(isVPTPred() && "Invalid access!");
992 return VCC.Val;
993 }
994
995 unsigned getCoproc() const {
996 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
997 return Cop.Val;
998 }
999
1000 StringRef getToken() const {
1001 assert(Kind == k_Token && "Invalid access!");
1002 return StringRef(Tok.Data, Tok.Length);
1003 }
1004
1005 MCRegister getReg() const override {
1006 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
1007 return Reg.RegNum;
1008 }
1009
1010 const SmallVectorImpl<unsigned> &getRegList() const {
1011 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
1012 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
1013 Kind == k_FPSRegisterListWithVPR ||
1014 Kind == k_FPDRegisterListWithVPR) &&
1015 "Invalid access!");
1016 return Registers;
1017 }
1018
1019 const MCExpr *getImm() const {
1020 assert(isImm() && "Invalid access!");
1021 return Imm.Val;
1022 }
1023
1024 const MCExpr *getConstantPoolImm() const {
1025 assert(isConstantPoolImm() && "Invalid access!");
1026 return Imm.Val;
1027 }
1028
1029 unsigned getVectorIndex() const {
1030 assert(Kind == k_VectorIndex && "Invalid access!");
1031 return VectorIndex.Val;
1032 }
1033
1034 ARM_MB::MemBOpt getMemBarrierOpt() const {
1035 assert(Kind == k_MemBarrierOpt && "Invalid access!");
1036 return MBOpt.Val;
1037 }
1038
1039 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
1040 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
1041 return ISBOpt.Val;
1042 }
1043
1044 ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
1045 assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
1046 return TSBOpt.Val;
1047 }
1048
1049 ARM_PROC::IFlags getProcIFlags() const {
1050 assert(Kind == k_ProcIFlags && "Invalid access!");
1051 return IFlags.Val;
1052 }
1053
1054 unsigned getMSRMask() const {
1055 assert(Kind == k_MSRMask && "Invalid access!");
1056 return MMask.Val;
1057 }
1058
1059 unsigned getBankedReg() const {
1060 assert(Kind == k_BankedReg && "Invalid access!");
1061 return BankedReg.Val;
1062 }
1063
1064 bool isCoprocNum() const { return Kind == k_CoprocNum; }
1065 bool isCoprocReg() const { return Kind == k_CoprocReg; }
1066 bool isCoprocOption() const { return Kind == k_CoprocOption; }
1067 bool isCondCode() const { return Kind == k_CondCode; }
1068 bool isVPTPred() const { return Kind == k_VPTPred; }
1069 bool isCCOut() const { return Kind == k_CCOut; }
1070 bool isITMask() const { return Kind == k_ITCondMask; }
1071 bool isITCondCode() const { return Kind == k_CondCode; }
1072 bool isImm() const override {
1073 return Kind == k_Immediate;
1074 }
1075
1076 bool isARMBranchTarget() const {
1077 if (!isImm()) return false;
1078
1079 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1080 return CE->getValue() % 4 == 0;
1081 return true;
1082 }
1083
1084
1085 bool isThumbBranchTarget() const {
1086 if (!isImm()) return false;
1087
1088 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1089 return CE->getValue() % 2 == 0;
1090 return true;
1091 }
1092
1093 // checks whether this operand is an unsigned offset which fits is a field
1094 // of specified width and scaled by a specific number of bits
1095 template<unsigned width, unsigned scale>
1096 bool isUnsignedOffset() const {
1097 if (!isImm()) return false;
1098 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1099 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1100 int64_t Val = CE->getValue();
1101 int64_t Align = 1LL << scale;
1102 int64_t Max = Align * ((1LL << width) - 1);
1103 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
1104 }
1105 return false;
1106 }
1107
1108 // checks whether this operand is an signed offset which fits is a field
1109 // of specified width and scaled by a specific number of bits
1110 template<unsigned width, unsigned scale>
1111 bool isSignedOffset() const {
1112 if (!isImm()) return false;
1113 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1114 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1115 int64_t Val = CE->getValue();
1116 int64_t Align = 1LL << scale;
1117 int64_t Max = Align * ((1LL << (width-1)) - 1);
1118 int64_t Min = -Align * (1LL << (width-1));
1119 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1120 }
1121 return false;
1122 }
1123
1124 // checks whether this operand is an offset suitable for the LE /
1125 // LETP instructions in Arm v8.1M
1126 bool isLEOffset() const {
1127 if (!isImm()) return false;
1128 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1129 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1130 int64_t Val = CE->getValue();
1131 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1132 }
1133 return false;
1134 }
1135
1136 // checks whether this operand is a memory operand computed as an offset
1137 // applied to PC. the offset may have 8 bits of magnitude and is represented
1138 // with two bits of shift. textually it may be either [pc, #imm], #imm or
1139 // relocable expression...
1140 bool isThumbMemPC() const {
1141 int64_t Val = 0;
1142 if (isImm()) {
1143 if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1144 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1145 if (!CE) return false;
1146 Val = CE->getValue();
1147 }
1148 else if (isGPRMem()) {
1149 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1150 if(Memory.BaseRegNum != ARM::PC) return false;
1151 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
1152 Val = CE->getValue();
1153 else
1154 return false;
1155 }
1156 else return false;
1157 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1158 }
1159
1160 bool isFPImm() const {
1161 if (!isImm()) return false;
1162 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1163 if (!CE) return false;
1164 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1165 return Val != -1;
1166 }
1167
1168 template<int64_t N, int64_t M>
1169 bool isImmediate() const {
1170 if (!isImm()) return false;
1171 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1172 if (!CE) return false;
1173 int64_t Value = CE->getValue();
1174 return Value >= N && Value <= M;
1175 }
1176
1177 template<int64_t N, int64_t M>
1178 bool isImmediateS4() const {
1179 if (!isImm()) return false;
1180 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1181 if (!CE) return false;
1182 int64_t Value = CE->getValue();
1183 return ((Value & 3) == 0) && Value >= N && Value <= M;
1184 }
1185 template<int64_t N, int64_t M>
1186 bool isImmediateS2() const {
1187 if (!isImm()) return false;
1188 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1189 if (!CE) return false;
1190 int64_t Value = CE->getValue();
1191 return ((Value & 1) == 0) && Value >= N && Value <= M;
1192 }
1193 bool isFBits16() const {
1194 return isImmediate<0, 17>();
1195 }
1196 bool isFBits32() const {
1197 return isImmediate<1, 33>();
1198 }
1199 bool isImm8s4() const {
1200 return isImmediateS4<-1020, 1020>();
1201 }
1202 bool isImm7s4() const {
1203 return isImmediateS4<-508, 508>();
1204 }
1205 bool isImm7Shift0() const {
1206 return isImmediate<-127, 127>();
1207 }
1208 bool isImm7Shift1() const {
1209 return isImmediateS2<-255, 255>();
1210 }
1211 bool isImm7Shift2() const {
1212 return isImmediateS4<-511, 511>();
1213 }
1214 bool isImm7() const {
1215 return isImmediate<-127, 127>();
1216 }
1217 bool isImm0_1020s4() const {
1218 return isImmediateS4<0, 1020>();
1219 }
1220 bool isImm0_508s4() const {
1221 return isImmediateS4<0, 508>();
1222 }
1223 bool isImm0_508s4Neg() const {
1224 if (!isImm()) return false;
1225 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1226 if (!CE) return false;
1227 int64_t Value = -CE->getValue();
1228 // explicitly exclude zero. we want that to use the normal 0_508 version.
1229 return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1230 }
1231
1232 bool isImm0_4095Neg() const {
1233 if (!isImm()) return false;
1234 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1235 if (!CE) return false;
1236 // isImm0_4095Neg is used with 32-bit immediates only.
1237 // 32-bit immediates are zero extended to 64-bit when parsed,
1238 // thus simple -CE->getValue() results in a big negative number,
1239 // not a small positive number as intended
1240 if ((CE->getValue() >> 32) > 0) return false;
1241 uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1242 return Value > 0 && Value < 4096;
1243 }
1244
1245 bool isImm0_7() const {
1246 return isImmediate<0, 7>();
1247 }
1248
1249 bool isImm1_16() const {
1250 return isImmediate<1, 16>();
1251 }
1252
1253 bool isImm1_32() const {
1254 return isImmediate<1, 32>();
1255 }
1256
1257 bool isImm8_255() const {
1258 return isImmediate<8, 255>();
1259 }
1260
1261 bool isImm0_255Expr() const {
1262 if (!isImm())
1263 return false;
1264 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1265 // If it's not a constant expression, it'll generate a fixup and be
1266 // handled later.
1267 if (!CE)
1268 return true;
1269 int64_t Value = CE->getValue();
1270 return isUInt<8>(Value);
1271 }
1272
1273 bool isImm256_65535Expr() const {
1274 if (!isImm()) return false;
1275 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1276 // If it's not a constant expression, it'll generate a fixup and be
1277 // handled later.
1278 if (!CE) return true;
1279 int64_t Value = CE->getValue();
1280 return Value >= 256 && Value < 65536;
1281 }
1282
1283 bool isImm0_65535Expr() const {
1284 if (!isImm()) return false;
1285 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1286 // If it's not a constant expression, it'll generate a fixup and be
1287 // handled later.
1288 if (!CE) return true;
1289 int64_t Value = CE->getValue();
1290 return Value >= 0 && Value < 65536;
1291 }
1292
1293 bool isImm24bit() const {
1294 return isImmediate<0, 0xffffff + 1>();
1295 }
1296
1297 bool isImmThumbSR() const {
1298 return isImmediate<1, 33>();
1299 }
1300
1301 bool isPKHLSLImm() const {
1302 return isImmediate<0, 32>();
1303 }
1304
1305 bool isPKHASRImm() const {
1306 return isImmediate<0, 33>();
1307 }
1308
1309 bool isAdrLabel() const {
1310 // If we have an immediate that's not a constant, treat it as a label
1311 // reference needing a fixup.
1312 if (isImm() && !isa<MCConstantExpr>(getImm()))
1313 return true;
1314
1315 // If it is a constant, it must fit into a modified immediate encoding.
1316 if (!isImm()) return false;
1317 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1318 if (!CE) return false;
1319 int64_t Value = CE->getValue();
1320 return (ARM_AM::getSOImmVal(Value) != -1 ||
1321 ARM_AM::getSOImmVal(-Value) != -1);
1322 }
1323
1324 bool isT2SOImm() const {
1325 // If we have an immediate that's not a constant, treat it as an expression
1326 // needing a fixup.
1327 if (isImm() && !isa<MCConstantExpr>(getImm())) {
1328 // We want to avoid matching :upper16: and :lower16: as we want these
1329 // expressions to match in isImm0_65535Expr()
1330 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1331 return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1332 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1333 }
1334 if (!isImm()) return false;
1335 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1336 if (!CE) return false;
1337 int64_t Value = CE->getValue();
1338 return ARM_AM::getT2SOImmVal(Value) != -1;
1339 }
1340
1341 bool isT2SOImmNot() const {
1342 if (!isImm()) return false;
1343 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1344 if (!CE) return false;
1345 int64_t Value = CE->getValue();
1346 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1348 }
1349
1350 bool isT2SOImmNeg() const {
1351 if (!isImm()) return false;
1352 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1353 if (!CE) return false;
1354 int64_t Value = CE->getValue();
1355 // Only use this when not representable as a plain so_imm.
1356 return ARM_AM::getT2SOImmVal(Value) == -1 &&
1358 }
1359
1360 bool isSetEndImm() const {
1361 if (!isImm()) return false;
1362 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1363 if (!CE) return false;
1364 int64_t Value = CE->getValue();
1365 return Value == 1 || Value == 0;
1366 }
1367
1368 bool isReg() const override { return Kind == k_Register; }
1369 bool isRegList() const { return Kind == k_RegisterList; }
1370 bool isRegListWithAPSR() const {
1371 return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1372 }
1373 bool isDReg() const {
1374 return isReg() &&
1375 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg.RegNum);
1376 }
1377 bool isQReg() const {
1378 return isReg() &&
1379 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg.RegNum);
1380 }
1381 bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1382 bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1383 bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1384 bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1385 bool isToken() const override { return Kind == k_Token; }
1386 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1387 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1388 bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1389 bool isMem() const override {
1390 return isGPRMem() || isMVEMem();
1391 }
1392 bool isMVEMem() const {
1393 if (Kind != k_Memory)
1394 return false;
1395 if (Memory.BaseRegNum &&
1396 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum) &&
1397 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Memory.BaseRegNum))
1398 return false;
1399 if (Memory.OffsetRegNum &&
1400 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1401 Memory.OffsetRegNum))
1402 return false;
1403 return true;
1404 }
1405 bool isGPRMem() const {
1406 if (Kind != k_Memory)
1407 return false;
1408 if (Memory.BaseRegNum &&
1409 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1410 return false;
1411 if (Memory.OffsetRegNum &&
1412 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1413 return false;
1414 return true;
1415 }
1416 bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1417 bool isRegShiftedReg() const {
1418 return Kind == k_ShiftedRegister &&
1419 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1420 RegShiftedReg.SrcReg) &&
1421 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1422 RegShiftedReg.ShiftReg);
1423 }
1424 bool isRegShiftedImm() const {
1425 return Kind == k_ShiftedImmediate &&
1426 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1427 RegShiftedImm.SrcReg);
1428 }
1429 bool isRotImm() const { return Kind == k_RotateImmediate; }
1430
1431 template<unsigned Min, unsigned Max>
1432 bool isPowerTwoInRange() const {
1433 if (!isImm()) return false;
1434 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1435 if (!CE) return false;
1436 int64_t Value = CE->getValue();
1437 return Value > 0 && llvm::popcount((uint64_t)Value) == 1 && Value >= Min &&
1438 Value <= Max;
1439 }
1440 bool isModImm() const { return Kind == k_ModifiedImmediate; }
1441
1442 bool isModImmNot() const {
1443 if (!isImm()) return false;
1444 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1445 if (!CE) return false;
1446 int64_t Value = CE->getValue();
1447 return ARM_AM::getSOImmVal(~Value) != -1;
1448 }
1449
1450 bool isModImmNeg() const {
1451 if (!isImm()) return false;
1452 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1453 if (!CE) return false;
1454 int64_t Value = CE->getValue();
1455 return ARM_AM::getSOImmVal(Value) == -1 &&
1456 ARM_AM::getSOImmVal(-Value) != -1;
1457 }
1458
1459 bool isThumbModImmNeg1_7() const {
1460 if (!isImm()) return false;
1461 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1462 if (!CE) return false;
1463 int32_t Value = -(int32_t)CE->getValue();
1464 return 0 < Value && Value < 8;
1465 }
1466
1467 bool isThumbModImmNeg8_255() const {
1468 if (!isImm()) return false;
1469 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1470 if (!CE) return false;
1471 int32_t Value = -(int32_t)CE->getValue();
1472 return 7 < Value && Value < 256;
1473 }
1474
1475 bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1476 bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1477 bool isPostIdxRegShifted() const {
1478 return Kind == k_PostIndexRegister &&
1479 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1480 }
1481 bool isPostIdxReg() const {
1482 return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1483 }
1484 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1485 if (!isGPRMem())
1486 return false;
1487 // No offset of any kind.
1488 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1489 (alignOK || Memory.Alignment == Alignment);
1490 }
1491 bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1492 if (!isGPRMem())
1493 return false;
1494
1495 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1496 Memory.BaseRegNum))
1497 return false;
1498
1499 // No offset of any kind.
1500 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1501 (alignOK || Memory.Alignment == Alignment);
1502 }
1503 bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1504 if (!isGPRMem())
1505 return false;
1506
1507 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1508 Memory.BaseRegNum))
1509 return false;
1510
1511 // No offset of any kind.
1512 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1513 (alignOK || Memory.Alignment == Alignment);
1514 }
1515 bool isMemNoOffsetT(bool alignOK = false, unsigned Alignment = 0) const {
1516 if (!isGPRMem())
1517 return false;
1518
1519 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].contains(
1520 Memory.BaseRegNum))
1521 return false;
1522
1523 // No offset of any kind.
1524 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1525 (alignOK || Memory.Alignment == Alignment);
1526 }
1527 bool isMemPCRelImm12() const {
1528 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1529 return false;
1530 // Base register must be PC.
1531 if (Memory.BaseRegNum != ARM::PC)
1532 return false;
1533 // Immediate offset in range [-4095, 4095].
1534 if (!Memory.OffsetImm) return true;
1535 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1536 int64_t Val = CE->getValue();
1537 return (Val > -4096 && Val < 4096) ||
1538 (Val == std::numeric_limits<int32_t>::min());
1539 }
1540 return false;
1541 }
1542
1543 bool isAlignedMemory() const {
1544 return isMemNoOffset(true);
1545 }
1546
1547 bool isAlignedMemoryNone() const {
1548 return isMemNoOffset(false, 0);
1549 }
1550
1551 bool isDupAlignedMemoryNone() const {
1552 return isMemNoOffset(false, 0);
1553 }
1554
1555 bool isAlignedMemory16() const {
1556 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1557 return true;
1558 return isMemNoOffset(false, 0);
1559 }
1560
1561 bool isDupAlignedMemory16() const {
1562 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1563 return true;
1564 return isMemNoOffset(false, 0);
1565 }
1566
1567 bool isAlignedMemory32() const {
1568 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1569 return true;
1570 return isMemNoOffset(false, 0);
1571 }
1572
1573 bool isDupAlignedMemory32() const {
1574 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1575 return true;
1576 return isMemNoOffset(false, 0);
1577 }
1578
1579 bool isAlignedMemory64() const {
1580 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1581 return true;
1582 return isMemNoOffset(false, 0);
1583 }
1584
1585 bool isDupAlignedMemory64() const {
1586 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1587 return true;
1588 return isMemNoOffset(false, 0);
1589 }
1590
1591 bool isAlignedMemory64or128() const {
1592 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1593 return true;
1594 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1595 return true;
1596 return isMemNoOffset(false, 0);
1597 }
1598
1599 bool isDupAlignedMemory64or128() const {
1600 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1601 return true;
1602 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1603 return true;
1604 return isMemNoOffset(false, 0);
1605 }
1606
1607 bool isAlignedMemory64or128or256() const {
1608 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1609 return true;
1610 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1611 return true;
1612 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1613 return true;
1614 return isMemNoOffset(false, 0);
1615 }
1616
1617 bool isAddrMode2() const {
1618 if (!isGPRMem() || Memory.Alignment != 0) return false;
1619 // Check for register offset.
1620 if (Memory.OffsetRegNum) return true;
1621 // Immediate offset in range [-4095, 4095].
1622 if (!Memory.OffsetImm) return true;
1623 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1624 int64_t Val = CE->getValue();
1625 return Val > -4096 && Val < 4096;
1626 }
1627 return false;
1628 }
1629
1630 bool isAM2OffsetImm() const {
1631 if (!isImm()) return false;
1632 // Immediate offset in range [-4095, 4095].
1633 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1634 if (!CE) return false;
1635 int64_t Val = CE->getValue();
1636 return (Val == std::numeric_limits<int32_t>::min()) ||
1637 (Val > -4096 && Val < 4096);
1638 }
1639
1640 bool isAddrMode3() const {
1641 // If we have an immediate that's not a constant, treat it as a label
1642 // reference needing a fixup. If it is a constant, it's something else
1643 // and we reject it.
1644 if (isImm() && !isa<MCConstantExpr>(getImm()))
1645 return true;
1646 if (!isGPRMem() || Memory.Alignment != 0) return false;
1647 // No shifts are legal for AM3.
1648 if (Memory.ShiftType != ARM_AM::no_shift) return false;
1649 // Check for register offset.
1650 if (Memory.OffsetRegNum) return true;
1651 // Immediate offset in range [-255, 255].
1652 if (!Memory.OffsetImm) return true;
1653 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1654 int64_t Val = CE->getValue();
1655 // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and
1656 // we have to check for this too.
1657 return (Val > -256 && Val < 256) ||
1658 Val == std::numeric_limits<int32_t>::min();
1659 }
1660 return false;
1661 }
1662
1663 bool isAM3Offset() const {
1664 if (isPostIdxReg())
1665 return true;
1666 if (!isImm())
1667 return false;
1668 // Immediate offset in range [-255, 255].
1669 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1670 if (!CE) return false;
1671 int64_t Val = CE->getValue();
1672 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1673 return (Val > -256 && Val < 256) ||
1674 Val == std::numeric_limits<int32_t>::min();
1675 }
1676
1677 bool isAddrMode5() const {
1678 // If we have an immediate that's not a constant, treat it as a label
1679 // reference needing a fixup. If it is a constant, it's something else
1680 // and we reject it.
1681 if (isImm() && !isa<MCConstantExpr>(getImm()))
1682 return true;
1683 if (!isGPRMem() || Memory.Alignment != 0) return false;
1684 // Check for register offset.
1685 if (Memory.OffsetRegNum) return false;
1686 // Immediate offset in range [-1020, 1020] and a multiple of 4.
1687 if (!Memory.OffsetImm) return true;
1688 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1689 int64_t Val = CE->getValue();
1690 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1691 Val == std::numeric_limits<int32_t>::min();
1692 }
1693 return false;
1694 }
1695
1696 bool isAddrMode5FP16() const {
1697 // If we have an immediate that's not a constant, treat it as a label
1698 // reference needing a fixup. If it is a constant, it's something else
1699 // and we reject it.
1700 if (isImm() && !isa<MCConstantExpr>(getImm()))
1701 return true;
1702 if (!isGPRMem() || Memory.Alignment != 0) return false;
1703 // Check for register offset.
1704 if (Memory.OffsetRegNum) return false;
1705 // Immediate offset in range [-510, 510] and a multiple of 2.
1706 if (!Memory.OffsetImm) return true;
1707 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1708 int64_t Val = CE->getValue();
1709 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1710 Val == std::numeric_limits<int32_t>::min();
1711 }
1712 return false;
1713 }
1714
1715 bool isMemTBB() const {
1716 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1717 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1718 return false;
1719 return true;
1720 }
1721
1722 bool isMemTBH() const {
1723 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1724 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1725 Memory.Alignment != 0 )
1726 return false;
1727 return true;
1728 }
1729
1730 bool isMemRegOffset() const {
1731 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1732 return false;
1733 return true;
1734 }
1735
1736 bool isT2MemRegOffset() const {
1737 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1738 Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1739 return false;
1740 // Only lsl #{0, 1, 2, 3} allowed.
1741 if (Memory.ShiftType == ARM_AM::no_shift)
1742 return true;
1743 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1744 return false;
1745 return true;
1746 }
1747
1748 bool isMemThumbRR() const {
1749 // Thumb reg+reg addressing is simple. Just two registers, a base and
1750 // an offset. No shifts, negations or any other complicating factors.
1751 if (!isGPRMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1752 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1753 return false;
1754 return isARMLowRegister(Memory.BaseRegNum) &&
1755 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1756 }
1757
1758 bool isMemThumbRIs4() const {
1759 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1760 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1761 return false;
1762 // Immediate offset, multiple of 4 in range [0, 124].
1763 if (!Memory.OffsetImm) return true;
1764 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1765 int64_t Val = CE->getValue();
1766 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1767 }
1768 return false;
1769 }
1770
1771 bool isMemThumbRIs2() const {
1772 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1773 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1774 return false;
1775 // Immediate offset, multiple of 4 in range [0, 62].
1776 if (!Memory.OffsetImm) return true;
1777 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1778 int64_t Val = CE->getValue();
1779 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1780 }
1781 return false;
1782 }
1783
1784 bool isMemThumbRIs1() const {
1785 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1786 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1787 return false;
1788 // Immediate offset in range [0, 31].
1789 if (!Memory.OffsetImm) return true;
1790 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1791 int64_t Val = CE->getValue();
1792 return Val >= 0 && Val <= 31;
1793 }
1794 return false;
1795 }
1796
1797 bool isMemThumbSPI() const {
1798 if (!isGPRMem() || Memory.OffsetRegNum != 0 ||
1799 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1800 return false;
1801 // Immediate offset, multiple of 4 in range [0, 1020].
1802 if (!Memory.OffsetImm) return true;
1803 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1804 int64_t Val = CE->getValue();
1805 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1806 }
1807 return false;
1808 }
1809
1810 bool isMemImm8s4Offset() const {
1811 // If we have an immediate that's not a constant, treat it as a label
1812 // reference needing a fixup. If it is a constant, it's something else
1813 // and we reject it.
1814 if (isImm() && !isa<MCConstantExpr>(getImm()))
1815 return true;
1816 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1817 return false;
1818 // Immediate offset a multiple of 4 in range [-1020, 1020].
1819 if (!Memory.OffsetImm) return true;
1820 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1821 int64_t Val = CE->getValue();
1822 // Special case, #-0 is std::numeric_limits<int32_t>::min().
1823 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1824 Val == std::numeric_limits<int32_t>::min();
1825 }
1826 return false;
1827 }
1828
1829 bool isMemImm7s4Offset() const {
1830 // If we have an immediate that's not a constant, treat it as a label
1831 // reference needing a fixup. If it is a constant, it's something else
1832 // and we reject it.
1833 if (isImm() && !isa<MCConstantExpr>(getImm()))
1834 return true;
1835 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1836 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1837 Memory.BaseRegNum))
1838 return false;
1839 // Immediate offset a multiple of 4 in range [-508, 508].
1840 if (!Memory.OffsetImm) return true;
1841 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1842 int64_t Val = CE->getValue();
1843 // Special case, #-0 is INT32_MIN.
1844 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1845 }
1846 return false;
1847 }
1848
1849 bool isMemImm0_1020s4Offset() const {
1850 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1851 return false;
1852 // Immediate offset a multiple of 4 in range [0, 1020].
1853 if (!Memory.OffsetImm) return true;
1854 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1855 int64_t Val = CE->getValue();
1856 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1857 }
1858 return false;
1859 }
1860
1861 bool isMemImm8Offset() const {
1862 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1863 return false;
1864 // Base reg of PC isn't allowed for these encodings.
1865 if (Memory.BaseRegNum == ARM::PC) return false;
1866 // Immediate offset in range [-255, 255].
1867 if (!Memory.OffsetImm) return true;
1868 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1869 int64_t Val = CE->getValue();
1870 return (Val == std::numeric_limits<int32_t>::min()) ||
1871 (Val > -256 && Val < 256);
1872 }
1873 return false;
1874 }
1875
1876 template<unsigned Bits, unsigned RegClassID>
1877 bool isMemImm7ShiftedOffset() const {
1878 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1879 !ARMMCRegisterClasses[RegClassID].contains(Memory.BaseRegNum))
1880 return false;
1881
1882 // Expect an immediate offset equal to an element of the range
1883 // [-127, 127], shifted left by Bits.
1884
1885 if (!Memory.OffsetImm) return true;
1886 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1887 int64_t Val = CE->getValue();
1888
1889 // INT32_MIN is a special-case value (indicating the encoding with
1890 // zero offset and the subtract bit set)
1891 if (Val == INT32_MIN)
1892 return true;
1893
1894 unsigned Divisor = 1U << Bits;
1895
1896 // Check that the low bits are zero
1897 if (Val % Divisor != 0)
1898 return false;
1899
1900 // Check that the remaining offset is within range.
1901 Val /= Divisor;
1902 return (Val >= -127 && Val <= 127);
1903 }
1904 return false;
1905 }
1906
1907 template <int shift> bool isMemRegRQOffset() const {
1908 if (!isMVEMem() || Memory.OffsetImm != nullptr || Memory.Alignment != 0)
1909 return false;
1910
1911 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1912 Memory.BaseRegNum))
1913 return false;
1914 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1915 Memory.OffsetRegNum))
1916 return false;
1917
1918 if (shift == 0 && Memory.ShiftType != ARM_AM::no_shift)
1919 return false;
1920
1921 if (shift > 0 &&
1922 (Memory.ShiftType != ARM_AM::uxtw || Memory.ShiftImm != shift))
1923 return false;
1924
1925 return true;
1926 }
1927
1928 template <int shift> bool isMemRegQOffset() const {
1929 if (!isMVEMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1930 return false;
1931
1932 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1933 Memory.BaseRegNum))
1934 return false;
1935
1936 if (!Memory.OffsetImm)
1937 return true;
1938 static_assert(shift < 56,
1939 "Such that we dont shift by a value higher than 62");
1940 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1941 int64_t Val = CE->getValue();
1942
1943 // The value must be a multiple of (1 << shift)
1944 if ((Val & ((1U << shift) - 1)) != 0)
1945 return false;
1946
1947 // And be in the right range, depending on the amount that it is shifted
1948 // by. Shift 0, is equal to 7 unsigned bits, the sign bit is set
1949 // separately.
1950 int64_t Range = (1U << (7 + shift)) - 1;
1951 return (Val == INT32_MIN) || (Val > -Range && Val < Range);
1952 }
1953 return false;
1954 }
1955
1956 bool isMemPosImm8Offset() const {
1957 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1958 return false;
1959 // Immediate offset in range [0, 255].
1960 if (!Memory.OffsetImm) return true;
1961 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1962 int64_t Val = CE->getValue();
1963 return Val >= 0 && Val < 256;
1964 }
1965 return false;
1966 }
1967
1968 bool isMemNegImm8Offset() const {
1969 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1970 return false;
1971 // Base reg of PC isn't allowed for these encodings.
1972 if (Memory.BaseRegNum == ARM::PC) return false;
1973 // Immediate offset in range [-255, -1].
1974 if (!Memory.OffsetImm) return false;
1975 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1976 int64_t Val = CE->getValue();
1977 return (Val == std::numeric_limits<int32_t>::min()) ||
1978 (Val > -256 && Val < 0);
1979 }
1980 return false;
1981 }
1982
1983 bool isMemUImm12Offset() const {
1984 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1985 return false;
1986 // Immediate offset in range [0, 4095].
1987 if (!Memory.OffsetImm) return true;
1988 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
1989 int64_t Val = CE->getValue();
1990 return (Val >= 0 && Val < 4096);
1991 }
1992 return false;
1993 }
1994
1995 bool isMemImm12Offset() const {
1996 // If we have an immediate that's not a constant, treat it as a label
1997 // reference needing a fixup. If it is a constant, it's something else
1998 // and we reject it.
1999
2000 if (isImm() && !isa<MCConstantExpr>(getImm()))
2001 return true;
2002
2003 if (!isGPRMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
2004 return false;
2005 // Immediate offset in range [-4095, 4095].
2006 if (!Memory.OffsetImm) return true;
2007 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
2008 int64_t Val = CE->getValue();
2009 return (Val > -4096 && Val < 4096) ||
2010 (Val == std::numeric_limits<int32_t>::min());
2011 }
2012 // If we have an immediate that's not a constant, treat it as a
2013 // symbolic expression needing a fixup.
2014 return true;
2015 }
2016
2017 bool isConstPoolAsmImm() const {
2018 // Delay processing of Constant Pool Immediate, this will turn into
2019 // a constant. Match no other operand
2020 return (isConstantPoolImm());
2021 }
2022
2023 bool isPostIdxImm8() const {
2024 if (!isImm()) return false;
2025 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2026 if (!CE) return false;
2027 int64_t Val = CE->getValue();
2028 return (Val > -256 && Val < 256) ||
2029 (Val == std::numeric_limits<int32_t>::min());
2030 }
2031
2032 bool isPostIdxImm8s4() const {
2033 if (!isImm()) return false;
2034 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2035 if (!CE) return false;
2036 int64_t Val = CE->getValue();
2037 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2038 (Val == std::numeric_limits<int32_t>::min());
2039 }
2040
2041 bool isMSRMask() const { return Kind == k_MSRMask; }
2042 bool isBankedReg() const { return Kind == k_BankedReg; }
2043 bool isProcIFlags() const { return Kind == k_ProcIFlags; }
2044
2045 // NEON operands.
2046 bool isVectorList() const { return Kind == k_VectorList; }
2047
2048 bool isSingleSpacedVectorList() const {
2049 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2050 }
2051
2052 bool isDoubleSpacedVectorList() const {
2053 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2054 }
2055
2056 bool isVecListOneD() const {
2057 if (!isSingleSpacedVectorList()) return false;
2058 return VectorList.Count == 1;
2059 }
2060
2061 bool isVecListTwoMQ() const {
2062 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2063 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2064 VectorList.RegNum);
2065 }
2066
2067 bool isVecListDPair() const {
2068 if (!isSingleSpacedVectorList()) return false;
2069 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2070 .contains(VectorList.RegNum));
2071 }
2072
2073 bool isVecListThreeD() const {
2074 if (!isSingleSpacedVectorList()) return false;
2075 return VectorList.Count == 3;
2076 }
2077
2078 bool isVecListFourD() const {
2079 if (!isSingleSpacedVectorList()) return false;
2080 return VectorList.Count == 4;
2081 }
2082
2083 bool isVecListDPairSpaced() const {
2084 if (Kind != k_VectorList) return false;
2085 if (isSingleSpacedVectorList()) return false;
2086 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2087 .contains(VectorList.RegNum));
2088 }
2089
2090 bool isVecListThreeQ() const {
2091 if (!isDoubleSpacedVectorList()) return false;
2092 return VectorList.Count == 3;
2093 }
2094
2095 bool isVecListFourQ() const {
2096 if (!isDoubleSpacedVectorList()) return false;
2097 return VectorList.Count == 4;
2098 }
2099
2100 bool isVecListFourMQ() const {
2101 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2102 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2103 VectorList.RegNum);
2104 }
2105
2106 bool isSingleSpacedVectorAllLanes() const {
2107 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2108 }
2109
2110 bool isDoubleSpacedVectorAllLanes() const {
2111 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2112 }
2113
2114 bool isVecListOneDAllLanes() const {
2115 if (!isSingleSpacedVectorAllLanes()) return false;
2116 return VectorList.Count == 1;
2117 }
2118
2119 bool isVecListDPairAllLanes() const {
2120 if (!isSingleSpacedVectorAllLanes()) return false;
2121 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2122 .contains(VectorList.RegNum));
2123 }
2124
2125 bool isVecListDPairSpacedAllLanes() const {
2126 if (!isDoubleSpacedVectorAllLanes()) return false;
2127 return VectorList.Count == 2;
2128 }
2129
2130 bool isVecListThreeDAllLanes() const {
2131 if (!isSingleSpacedVectorAllLanes()) return false;
2132 return VectorList.Count == 3;
2133 }
2134
2135 bool isVecListThreeQAllLanes() const {
2136 if (!isDoubleSpacedVectorAllLanes()) return false;
2137 return VectorList.Count == 3;
2138 }
2139
2140 bool isVecListFourDAllLanes() const {
2141 if (!isSingleSpacedVectorAllLanes()) return false;
2142 return VectorList.Count == 4;
2143 }
2144
2145 bool isVecListFourQAllLanes() const {
2146 if (!isDoubleSpacedVectorAllLanes()) return false;
2147 return VectorList.Count == 4;
2148 }
2149
2150 bool isSingleSpacedVectorIndexed() const {
2151 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2152 }
2153
2154 bool isDoubleSpacedVectorIndexed() const {
2155 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2156 }
2157
2158 bool isVecListOneDByteIndexed() const {
2159 if (!isSingleSpacedVectorIndexed()) return false;
2160 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2161 }
2162
2163 bool isVecListOneDHWordIndexed() const {
2164 if (!isSingleSpacedVectorIndexed()) return false;
2165 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2166 }
2167
2168 bool isVecListOneDWordIndexed() const {
2169 if (!isSingleSpacedVectorIndexed()) return false;
2170 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2171 }
2172
2173 bool isVecListTwoDByteIndexed() const {
2174 if (!isSingleSpacedVectorIndexed()) return false;
2175 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2176 }
2177
2178 bool isVecListTwoDHWordIndexed() const {
2179 if (!isSingleSpacedVectorIndexed()) return false;
2180 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2181 }
2182
2183 bool isVecListTwoQWordIndexed() const {
2184 if (!isDoubleSpacedVectorIndexed()) return false;
2185 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2186 }
2187
2188 bool isVecListTwoQHWordIndexed() const {
2189 if (!isDoubleSpacedVectorIndexed()) return false;
2190 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2191 }
2192
2193 bool isVecListTwoDWordIndexed() const {
2194 if (!isSingleSpacedVectorIndexed()) return false;
2195 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2196 }
2197
2198 bool isVecListThreeDByteIndexed() const {
2199 if (!isSingleSpacedVectorIndexed()) return false;
2200 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2201 }
2202
2203 bool isVecListThreeDHWordIndexed() const {
2204 if (!isSingleSpacedVectorIndexed()) return false;
2205 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2206 }
2207
2208 bool isVecListThreeQWordIndexed() const {
2209 if (!isDoubleSpacedVectorIndexed()) return false;
2210 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2211 }
2212
2213 bool isVecListThreeQHWordIndexed() const {
2214 if (!isDoubleSpacedVectorIndexed()) return false;
2215 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2216 }
2217
2218 bool isVecListThreeDWordIndexed() const {
2219 if (!isSingleSpacedVectorIndexed()) return false;
2220 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2221 }
2222
2223 bool isVecListFourDByteIndexed() const {
2224 if (!isSingleSpacedVectorIndexed()) return false;
2225 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2226 }
2227
2228 bool isVecListFourDHWordIndexed() const {
2229 if (!isSingleSpacedVectorIndexed()) return false;
2230 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2231 }
2232
2233 bool isVecListFourQWordIndexed() const {
2234 if (!isDoubleSpacedVectorIndexed()) return false;
2235 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2236 }
2237
2238 bool isVecListFourQHWordIndexed() const {
2239 if (!isDoubleSpacedVectorIndexed()) return false;
2240 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2241 }
2242
2243 bool isVecListFourDWordIndexed() const {
2244 if (!isSingleSpacedVectorIndexed()) return false;
2245 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2246 }
2247
2248 bool isVectorIndex() const { return Kind == k_VectorIndex; }
2249
2250 template <unsigned NumLanes>
2251 bool isVectorIndexInRange() const {
2252 if (Kind != k_VectorIndex) return false;
2253 return VectorIndex.Val < NumLanes;
2254 }
2255
2256 bool isVectorIndex8() const { return isVectorIndexInRange<8>(); }
2257 bool isVectorIndex16() const { return isVectorIndexInRange<4>(); }
2258 bool isVectorIndex32() const { return isVectorIndexInRange<2>(); }
2259 bool isVectorIndex64() const { return isVectorIndexInRange<1>(); }
2260
2261 template<int PermittedValue, int OtherPermittedValue>
2262 bool isMVEPairVectorIndex() const {
2263 if (Kind != k_VectorIndex) return false;
2264 return VectorIndex.Val == PermittedValue ||
2265 VectorIndex.Val == OtherPermittedValue;
2266 }
2267
2268 bool isNEONi8splat() const {
2269 if (!isImm()) return false;
2270 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2271 // Must be a constant.
2272 if (!CE) return false;
2273 int64_t Value = CE->getValue();
2274 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
2275 // value.
2276 return Value >= 0 && Value < 256;
2277 }
2278
2279 bool isNEONi16splat() const {
2280 if (isNEONByteReplicate(2))
2281 return false; // Leave that for bytes replication and forbid by default.
2282 if (!isImm())
2283 return false;
2284 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2285 // Must be a constant.
2286 if (!CE) return false;
2287 unsigned Value = CE->getValue();
2289 }
2290
2291 bool isNEONi16splatNot() const {
2292 if (!isImm())
2293 return false;
2294 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2295 // Must be a constant.
2296 if (!CE) return false;
2297 unsigned Value = CE->getValue();
2298 return ARM_AM::isNEONi16splat(~Value & 0xffff);
2299 }
2300
2301 bool isNEONi32splat() const {
2302 if (isNEONByteReplicate(4))
2303 return false; // Leave that for bytes replication and forbid by default.
2304 if (!isImm())
2305 return false;
2306 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2307 // Must be a constant.
2308 if (!CE) return false;
2309 unsigned Value = CE->getValue();
2311 }
2312
2313 bool isNEONi32splatNot() const {
2314 if (!isImm())
2315 return false;
2316 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2317 // Must be a constant.
2318 if (!CE) return false;
2319 unsigned Value = CE->getValue();
2321 }
2322
2323 static bool isValidNEONi32vmovImm(int64_t Value) {
2324 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
2325 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
2326 return ((Value & 0xffffffffffffff00) == 0) ||
2327 ((Value & 0xffffffffffff00ff) == 0) ||
2328 ((Value & 0xffffffffff00ffff) == 0) ||
2329 ((Value & 0xffffffff00ffffff) == 0) ||
2330 ((Value & 0xffffffffffff00ff) == 0xff) ||
2331 ((Value & 0xffffffffff00ffff) == 0xffff);
2332 }
2333
2334 bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
2335 assert((Width == 8 || Width == 16 || Width == 32) &&
2336 "Invalid element width");
2337 assert(NumElems * Width <= 64 && "Invalid result width");
2338
2339 if (!isImm())
2340 return false;
2341 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2342 // Must be a constant.
2343 if (!CE)
2344 return false;
2345 int64_t Value = CE->getValue();
2346 if (!Value)
2347 return false; // Don't bother with zero.
2348 if (Inv)
2349 Value = ~Value;
2350
2351 uint64_t Mask = (1ull << Width) - 1;
2352 uint64_t Elem = Value & Mask;
2353 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2354 return false;
2355 if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2356 return false;
2357
2358 for (unsigned i = 1; i < NumElems; ++i) {
2359 Value >>= Width;
2360 if ((Value & Mask) != Elem)
2361 return false;
2362 }
2363 return true;
2364 }
2365
2366 bool isNEONByteReplicate(unsigned NumBytes) const {
2367 return isNEONReplicate(8, NumBytes, false);
2368 }
2369
2370 static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2371 assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2372 "Invalid source width");
2373 assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2374 "Invalid destination width");
2375 assert(FromW < ToW && "ToW is not less than FromW");
2376 }
2377
2378 template<unsigned FromW, unsigned ToW>
2379 bool isNEONmovReplicate() const {
2380 checkNeonReplicateArgs(FromW, ToW);
2381 if (ToW == 64 && isNEONi64splat())
2382 return false;
2383 return isNEONReplicate(FromW, ToW / FromW, false);
2384 }
2385
2386 template<unsigned FromW, unsigned ToW>
2387 bool isNEONinvReplicate() const {
2388 checkNeonReplicateArgs(FromW, ToW);
2389 return isNEONReplicate(FromW, ToW / FromW, true);
2390 }
2391
2392 bool isNEONi32vmov() const {
2393 if (isNEONByteReplicate(4))
2394 return false; // Let it to be classified as byte-replicate case.
2395 if (!isImm())
2396 return false;
2397 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2398 // Must be a constant.
2399 if (!CE)
2400 return false;
2401 return isValidNEONi32vmovImm(CE->getValue());
2402 }
2403
2404 bool isNEONi32vmovNeg() const {
2405 if (!isImm()) return false;
2406 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2407 // Must be a constant.
2408 if (!CE) return false;
2409 return isValidNEONi32vmovImm(~CE->getValue());
2410 }
2411
2412 bool isNEONi64splat() const {
2413 if (!isImm()) return false;
2414 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2415 // Must be a constant.
2416 if (!CE) return false;
2417 uint64_t Value = CE->getValue();
2418 // i64 value with each byte being either 0 or 0xff.
2419 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2420 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2421 return true;
2422 }
2423
2424 template<int64_t Angle, int64_t Remainder>
2425 bool isComplexRotation() const {
2426 if (!isImm()) return false;
2427
2428 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2429 if (!CE) return false;
2430 uint64_t Value = CE->getValue();
2431
2432 return (Value % Angle == Remainder && Value <= 270);
2433 }
2434
2435 bool isMVELongShift() const {
2436 if (!isImm()) return false;
2437 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2438 // Must be a constant.
2439 if (!CE) return false;
2440 uint64_t Value = CE->getValue();
2441 return Value >= 1 && Value <= 32;
2442 }
2443
2444 bool isMveSaturateOp() const {
2445 if (!isImm()) return false;
2446 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2447 if (!CE) return false;
2448 uint64_t Value = CE->getValue();
2449 return Value == 48 || Value == 64;
2450 }
2451
2452 bool isITCondCodeNoAL() const {
2453 if (!isITCondCode()) return false;
2455 return CC != ARMCC::AL;
2456 }
2457
2458 bool isITCondCodeRestrictedI() const {
2459 if (!isITCondCode())
2460 return false;
2462 return CC == ARMCC::EQ || CC == ARMCC::NE;
2463 }
2464
2465 bool isITCondCodeRestrictedS() const {
2466 if (!isITCondCode())
2467 return false;
2469 return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2470 CC == ARMCC::GE;
2471 }
2472
2473 bool isITCondCodeRestrictedU() const {
2474 if (!isITCondCode())
2475 return false;
2477 return CC == ARMCC::HS || CC == ARMCC::HI;
2478 }
2479
2480 bool isITCondCodeRestrictedFP() const {
2481 if (!isITCondCode())
2482 return false;
2484 return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2485 CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2486 }
2487
2488 void setVecListDPair(unsigned int DPair) {
2489 Kind = k_VectorList;
2490 VectorList.RegNum = DPair;
2491 VectorList.Count = 2;
2492 VectorList.isDoubleSpaced = false;
2493 }
2494
2495 void setVecListOneD(unsigned int DReg) {
2496 Kind = k_VectorList;
2497 VectorList.RegNum = DReg;
2498 VectorList.Count = 1;
2499 VectorList.isDoubleSpaced = false;
2500 }
2501
2502 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2503 // Add as immediates when possible. Null MCExpr = 0.
2504 if (!Expr)
2506 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2507 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2508 else
2510 }
2511
2512 void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2513 assert(N == 1 && "Invalid number of operands!");
2514 addExpr(Inst, getImm());
2515 }
2516
2517 void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2518 assert(N == 1 && "Invalid number of operands!");
2519 addExpr(Inst, getImm());
2520 }
2521
2522 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2523 assert(N == 2 && "Invalid number of operands!");
2524 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2525 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2526 Inst.addOperand(MCOperand::createReg(RegNum));
2527 }
2528
2529 void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2530 assert(N == 3 && "Invalid number of operands!");
2531 Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2532 unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2533 Inst.addOperand(MCOperand::createReg(RegNum));
2535 }
2536
2537 void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2538 assert(N == 4 && "Invalid number of operands!");
2539 addVPTPredNOperands(Inst, N-1);
2540 unsigned RegNum;
2541 if (getVPTPred() == ARMVCC::None) {
2542 RegNum = 0;
2543 } else {
2544 unsigned NextOpIndex = Inst.getNumOperands();
2545 const MCInstrDesc &MCID =
2546 ARMDescs.Insts[ARM::INSTRUCTION_LIST_END - 1 - Inst.getOpcode()];
2547 int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2548 assert(TiedOp >= 0 &&
2549 "Inactive register in vpred_r is not tied to an output!");
2550 RegNum = Inst.getOperand(TiedOp).getReg();
2551 }
2552 Inst.addOperand(MCOperand::createReg(RegNum));
2553 }
2554
2555 void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2556 assert(N == 1 && "Invalid number of operands!");
2557 Inst.addOperand(MCOperand::createImm(getCoproc()));
2558 }
2559
2560 void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2561 assert(N == 1 && "Invalid number of operands!");
2562 Inst.addOperand(MCOperand::createImm(getCoproc()));
2563 }
2564
2565 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2566 assert(N == 1 && "Invalid number of operands!");
2567 Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2568 }
2569
2570 void addITMaskOperands(MCInst &Inst, unsigned N) const {
2571 assert(N == 1 && "Invalid number of operands!");
2572 Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2573 }
2574
2575 void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2576 assert(N == 1 && "Invalid number of operands!");
2577 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2578 }
2579
2580 void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2581 assert(N == 1 && "Invalid number of operands!");
2583 }
2584
2585 void addCCOutOperands(MCInst &Inst, unsigned N) const {
2586 assert(N == 1 && "Invalid number of operands!");
2588 }
2589
2590 void addRegOperands(MCInst &Inst, unsigned N) const {
2591 assert(N == 1 && "Invalid number of operands!");
2593 }
2594
2595 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2596 assert(N == 3 && "Invalid number of operands!");
2597 assert(isRegShiftedReg() &&
2598 "addRegShiftedRegOperands() on non-RegShiftedReg!");
2599 Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2600 Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2602 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2603 }
2604
2605 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2606 assert(N == 2 && "Invalid number of operands!");
2607 assert(isRegShiftedImm() &&
2608 "addRegShiftedImmOperands() on non-RegShiftedImm!");
2609 Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2610 // Shift of #32 is encoded as 0 where permitted
2611 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2613 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2614 }
2615
2616 void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2617 assert(N == 1 && "Invalid number of operands!");
2618 Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2619 ShifterImm.Imm));
2620 }
2621
2622 void addRegListOperands(MCInst &Inst, unsigned N) const {
2623 assert(N == 1 && "Invalid number of operands!");
2624 const SmallVectorImpl<unsigned> &RegList = getRegList();
2625 for (unsigned Reg : RegList)
2627 }
2628
2629 void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2630 assert(N == 1 && "Invalid number of operands!");
2631 const SmallVectorImpl<unsigned> &RegList = getRegList();
2632 for (unsigned Reg : RegList)
2634 }
2635
2636 void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2637 addRegListOperands(Inst, N);
2638 }
2639
2640 void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2641 addRegListOperands(Inst, N);
2642 }
2643
2644 void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2645 addRegListOperands(Inst, N);
2646 }
2647
2648 void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2649 addRegListOperands(Inst, N);
2650 }
2651
2652 void addRotImmOperands(MCInst &Inst, unsigned N) const {
2653 assert(N == 1 && "Invalid number of operands!");
2654 // Encoded as val>>3. The printer handles display as 8, 16, 24.
2655 Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2656 }
2657
2658 void addModImmOperands(MCInst &Inst, unsigned N) const {
2659 assert(N == 1 && "Invalid number of operands!");
2660
2661 // Support for fixups (MCFixup)
2662 if (isImm())
2663 return addImmOperands(Inst, N);
2664
2665 Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2666 }
2667
2668 void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2669 assert(N == 1 && "Invalid number of operands!");
2670 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2671 uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2673 }
2674
2675 void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2676 assert(N == 1 && "Invalid number of operands!");
2677 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2678 uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2680 }
2681
2682 void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2683 assert(N == 1 && "Invalid number of operands!");
2684 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2685 uint32_t Val = -CE->getValue();
2687 }
2688
2689 void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2690 assert(N == 1 && "Invalid number of operands!");
2691 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2692 uint32_t Val = -CE->getValue();
2694 }
2695
2696 void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2697 assert(N == 1 && "Invalid number of operands!");
2698 // Munge the lsb/width into a bitfield mask.
2699 unsigned lsb = Bitfield.LSB;
2700 unsigned width = Bitfield.Width;
2701 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2702 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2703 (32 - (lsb + width)));
2704 Inst.addOperand(MCOperand::createImm(Mask));
2705 }
2706
2707 void addImmOperands(MCInst &Inst, unsigned N) const {
2708 assert(N == 1 && "Invalid number of operands!");
2709 addExpr(Inst, getImm());
2710 }
2711
2712 void addFBits16Operands(MCInst &Inst, unsigned N) const {
2713 assert(N == 1 && "Invalid number of operands!");
2714 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2715 Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2716 }
2717
2718 void addFBits32Operands(MCInst &Inst, unsigned N) const {
2719 assert(N == 1 && "Invalid number of operands!");
2720 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2721 Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2722 }
2723
2724 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2725 assert(N == 1 && "Invalid number of operands!");
2726 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2727 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2729 }
2730
2731 void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2732 assert(N == 1 && "Invalid number of operands!");
2733 // FIXME: We really want to scale the value here, but the LDRD/STRD
2734 // instruction don't encode operands that way yet.
2735 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2736 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2737 }
2738
2739 void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2740 assert(N == 1 && "Invalid number of operands!");
2741 // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2742 // instruction don't encode operands that way yet.
2743 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2744 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2745 }
2746
2747 void addImm7Shift0Operands(MCInst &Inst, unsigned N) const {
2748 assert(N == 1 && "Invalid number of operands!");
2749 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2750 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2751 }
2752
2753 void addImm7Shift1Operands(MCInst &Inst, unsigned N) const {
2754 assert(N == 1 && "Invalid number of operands!");
2755 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2756 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2757 }
2758
2759 void addImm7Shift2Operands(MCInst &Inst, unsigned N) const {
2760 assert(N == 1 && "Invalid number of operands!");
2761 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2762 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2763 }
2764
2765 void addImm7Operands(MCInst &Inst, unsigned N) const {
2766 assert(N == 1 && "Invalid number of operands!");
2767 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2768 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2769 }
2770
2771 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2772 assert(N == 1 && "Invalid number of operands!");
2773 // The immediate is scaled by four in the encoding and is stored
2774 // in the MCInst as such. Lop off the low two bits here.
2775 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2776 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2777 }
2778
2779 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2780 assert(N == 1 && "Invalid number of operands!");
2781 // The immediate is scaled by four in the encoding and is stored
2782 // in the MCInst as such. Lop off the low two bits here.
2783 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2784 Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2785 }
2786
2787 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2788 assert(N == 1 && "Invalid number of operands!");
2789 // The immediate is scaled by four in the encoding and is stored
2790 // in the MCInst as such. Lop off the low two bits here.
2791 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2792 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2793 }
2794
2795 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2796 assert(N == 1 && "Invalid number of operands!");
2797 // The constant encodes as the immediate-1, and we store in the instruction
2798 // the bits as encoded, so subtract off one here.
2799 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2800 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2801 }
2802
2803 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2804 assert(N == 1 && "Invalid number of operands!");
2805 // The constant encodes as the immediate-1, and we store in the instruction
2806 // the bits as encoded, so subtract off one here.
2807 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2808 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2809 }
2810
2811 void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2812 assert(N == 1 && "Invalid number of operands!");
2813 // The constant encodes as the immediate, except for 32, which encodes as
2814 // zero.
2815 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2816 unsigned Imm = CE->getValue();
2817 Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2818 }
2819
2820 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2821 assert(N == 1 && "Invalid number of operands!");
2822 // An ASR value of 32 encodes as 0, so that's how we want to add it to
2823 // the instruction as well.
2824 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2825 int Val = CE->getValue();
2826 Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2827 }
2828
2829 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2830 assert(N == 1 && "Invalid number of operands!");
2831 // The operand is actually a t2_so_imm, but we have its bitwise
2832 // negation in the assembly source, so twiddle it here.
2833 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2834 Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2835 }
2836
2837 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2838 assert(N == 1 && "Invalid number of operands!");
2839 // The operand is actually a t2_so_imm, but we have its
2840 // negation in the assembly source, so twiddle it here.
2841 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2842 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2843 }
2844
2845 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2846 assert(N == 1 && "Invalid number of operands!");
2847 // The operand is actually an imm0_4095, but we have its
2848 // negation in the assembly source, so twiddle it here.
2849 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2850 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2851 }
2852
2853 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2854 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2855 Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2856 return;
2857 }
2858 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2860 }
2861
2862 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2863 assert(N == 1 && "Invalid number of operands!");
2864 if (isImm()) {
2865 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2866 if (CE) {
2867 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2868 return;
2869 }
2870 const MCSymbolRefExpr *SR = cast<MCSymbolRefExpr>(Imm.Val);
2872 return;
2873 }
2874
2875 assert(isGPRMem() && "Unknown value type!");
2876 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2877 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2878 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2879 else
2880 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2881 }
2882
2883 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2884 assert(N == 1 && "Invalid number of operands!");
2885 Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2886 }
2887
2888 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2889 assert(N == 1 && "Invalid number of operands!");
2890 Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2891 }
2892
2893 void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2894 assert(N == 1 && "Invalid number of operands!");
2895 Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2896 }
2897
2898 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2899 assert(N == 1 && "Invalid number of operands!");
2900 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2901 }
2902
2903 void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2904 assert(N == 1 && "Invalid number of operands!");
2905 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2906 }
2907
2908 void addMemNoOffsetT2NoSpOperands(MCInst &Inst, unsigned N) const {
2909 assert(N == 1 && "Invalid number of operands!");
2910 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2911 }
2912
2913 void addMemNoOffsetTOperands(MCInst &Inst, unsigned N) const {
2914 assert(N == 1 && "Invalid number of operands!");
2915 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2916 }
2917
2918 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2919 assert(N == 1 && "Invalid number of operands!");
2920 if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
2921 Inst.addOperand(MCOperand::createImm(CE->getValue()));
2922 else
2923 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
2924 }
2925
2926 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2927 assert(N == 1 && "Invalid number of operands!");
2928 assert(isImm() && "Not an immediate!");
2929
2930 // If we have an immediate that's not a constant, treat it as a label
2931 // reference needing a fixup.
2932 if (!isa<MCConstantExpr>(getImm())) {
2933 Inst.addOperand(MCOperand::createExpr(getImm()));
2934 return;
2935 }
2936
2937 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2938 int Val = CE->getValue();
2940 }
2941
2942 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2943 assert(N == 2 && "Invalid number of operands!");
2944 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2945 Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2946 }
2947
2948 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2949 addAlignedMemoryOperands(Inst, N);
2950 }
2951
2952 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2953 addAlignedMemoryOperands(Inst, N);
2954 }
2955
2956 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2957 addAlignedMemoryOperands(Inst, N);
2958 }
2959
2960 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2961 addAlignedMemoryOperands(Inst, N);
2962 }
2963
2964 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2965 addAlignedMemoryOperands(Inst, N);
2966 }
2967
2968 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2969 addAlignedMemoryOperands(Inst, N);
2970 }
2971
2972 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2973 addAlignedMemoryOperands(Inst, N);
2974 }
2975
2976 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2977 addAlignedMemoryOperands(Inst, N);
2978 }
2979
2980 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2981 addAlignedMemoryOperands(Inst, N);
2982 }
2983
2984 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2985 addAlignedMemoryOperands(Inst, N);
2986 }
2987
2988 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2989 addAlignedMemoryOperands(Inst, N);
2990 }
2991
2992 void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2993 assert(N == 3 && "Invalid number of operands!");
2994 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2995 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2996 if (!Memory.OffsetRegNum) {
2997 if (!Memory.OffsetImm)
2999 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3000 int32_t Val = CE->getValue();
3002 // Special case for #-0
3003 if (Val == std::numeric_limits<int32_t>::min())
3004 Val = 0;
3005 if (Val < 0)
3006 Val = -Val;
3007 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
3009 } else
3010 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3011 } else {
3012 // For register offset, we encode the shift type and negation flag
3013 // here.
3014 int32_t Val =
3016 Memory.ShiftImm, Memory.ShiftType);
3018 }
3019 }
3020
3021 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
3022 assert(N == 2 && "Invalid number of operands!");
3023 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3024 assert(CE && "non-constant AM2OffsetImm operand!");
3025 int32_t Val = CE->getValue();
3027 // Special case for #-0
3028 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3029 if (Val < 0) Val = -Val;
3030 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
3033 }
3034
3035 void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
3036 assert(N == 3 && "Invalid number of operands!");
3037 // If we have an immediate that's not a constant, treat it as a label
3038 // reference needing a fixup. If it is a constant, it's something else
3039 // and we reject it.
3040 if (isImm()) {
3041 Inst.addOperand(MCOperand::createExpr(getImm()));
3044 return;
3045 }
3046
3047 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3048 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3049 if (!Memory.OffsetRegNum) {
3050 if (!Memory.OffsetImm)
3052 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3053 int32_t Val = CE->getValue();
3055 // Special case for #-0
3056 if (Val == std::numeric_limits<int32_t>::min())
3057 Val = 0;
3058 if (Val < 0)
3059 Val = -Val;
3060 Val = ARM_AM::getAM3Opc(AddSub, Val);
3062 } else
3063 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3064 } else {
3065 // For register offset, we encode the shift type and negation flag
3066 // here.
3067 int32_t Val =
3070 }
3071 }
3072
3073 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
3074 assert(N == 2 && "Invalid number of operands!");
3075 if (Kind == k_PostIndexRegister) {
3076 int32_t Val =
3077 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
3078 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3080 return;
3081 }
3082
3083 // Constant offset.
3084 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
3085 int32_t Val = CE->getValue();
3087 // Special case for #-0
3088 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3089 if (Val < 0) Val = -Val;
3090 Val = ARM_AM::getAM3Opc(AddSub, Val);
3093 }
3094
3095 void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
3096 assert(N == 2 && "Invalid number of operands!");
3097 // If we have an immediate that's not a constant, treat it as a label
3098 // reference needing a fixup. If it is a constant, it's something else
3099 // and we reject it.
3100 if (isImm()) {
3101 Inst.addOperand(MCOperand::createExpr(getImm()));
3103 return;
3104 }
3105
3106 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3107 if (!Memory.OffsetImm)
3109 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3110 // The lower two bits are always zero and as such are not encoded.
3111 int32_t Val = CE->getValue() / 4;
3113 // Special case for #-0
3114 if (Val == std::numeric_limits<int32_t>::min())
3115 Val = 0;
3116 if (Val < 0)
3117 Val = -Val;
3118 Val = ARM_AM::getAM5Opc(AddSub, Val);
3120 } else
3121 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3122 }
3123
3124 void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
3125 assert(N == 2 && "Invalid number of operands!");
3126 // If we have an immediate that's not a constant, treat it as a label
3127 // reference needing a fixup. If it is a constant, it's something else
3128 // and we reject it.
3129 if (isImm()) {
3130 Inst.addOperand(MCOperand::createExpr(getImm()));
3132 return;
3133 }
3134
3135 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3136 // The lower bit is always zero and as such is not encoded.
3137 if (!Memory.OffsetImm)
3139 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm)) {
3140 int32_t Val = CE->getValue() / 2;
3142 // Special case for #-0
3143 if (Val == std::numeric_limits<int32_t>::min())
3144 Val = 0;
3145 if (Val < 0)
3146 Val = -Val;
3147 Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
3149 } else
3150 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3151 }
3152
3153 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
3154 assert(N == 2 && "Invalid number of operands!");
3155 // If we have an immediate that's not a constant, treat it as a label
3156 // reference needing a fixup. If it is a constant, it's something else
3157 // and we reject it.
3158 if (isImm()) {
3159 Inst.addOperand(MCOperand::createExpr(getImm()));
3161 return;
3162 }
3163
3164 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3165 addExpr(Inst, Memory.OffsetImm);
3166 }
3167
3168 void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
3169 assert(N == 2 && "Invalid number of operands!");
3170 // If we have an immediate that's not a constant, treat it as a label
3171 // reference needing a fixup. If it is a constant, it's something else
3172 // and we reject it.
3173 if (isImm()) {
3174 Inst.addOperand(MCOperand::createExpr(getImm()));
3176 return;
3177 }
3178
3179 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3180 addExpr(Inst, Memory.OffsetImm);
3181 }
3182
3183 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
3184 assert(N == 2 && "Invalid number of operands!");
3185 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3186 if (!Memory.OffsetImm)
3188 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3189 // The lower two bits are always zero and as such are not encoded.
3190 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3191 else
3192 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3193 }
3194
3195 void addMemImmOffsetOperands(MCInst &Inst, unsigned N) const {
3196 assert(N == 2 && "Invalid number of operands!");
3197 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3198 addExpr(Inst, Memory.OffsetImm);
3199 }
3200
3201 void addMemRegRQOffsetOperands(MCInst &Inst, unsigned N) const {
3202 assert(N == 2 && "Invalid number of operands!");
3203 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3204 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3205 }
3206
3207 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3208 assert(N == 2 && "Invalid number of operands!");
3209 // If this is an immediate, it's a label reference.
3210 if (isImm()) {
3211 addExpr(Inst, getImm());
3213 return;
3214 }
3215
3216 // Otherwise, it's a normal memory reg+offset.
3217 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3218 addExpr(Inst, Memory.OffsetImm);
3219 }
3220
3221 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
3222 assert(N == 2 && "Invalid number of operands!");
3223 // If this is an immediate, it's a label reference.
3224 if (isImm()) {
3225 addExpr(Inst, getImm());
3227 return;
3228 }
3229
3230 // Otherwise, it's a normal memory reg+offset.
3231 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3232 addExpr(Inst, Memory.OffsetImm);
3233 }
3234
3235 void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
3236 assert(N == 1 && "Invalid number of operands!");
3237 // This is container for the immediate that we will create the constant
3238 // pool from
3239 addExpr(Inst, getConstantPoolImm());
3240 }
3241
3242 void addMemTBBOperands(MCInst &Inst, unsigned N) const {
3243 assert(N == 2 && "Invalid number of operands!");
3244 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3245 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3246 }
3247
3248 void addMemTBHOperands(MCInst &Inst, unsigned N) const {
3249 assert(N == 2 && "Invalid number of operands!");
3250 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3251 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3252 }
3253
3254 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3255 assert(N == 3 && "Invalid number of operands!");
3256 unsigned Val =
3258 Memory.ShiftImm, Memory.ShiftType);
3259 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3260 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3262 }
3263
3264 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
3265 assert(N == 3 && "Invalid number of operands!");
3266 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3267 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3268 Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
3269 }
3270
3271 void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
3272 assert(N == 2 && "Invalid number of operands!");
3273 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3274 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
3275 }
3276
3277 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
3278 assert(N == 2 && "Invalid number of operands!");
3279 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3280 if (!Memory.OffsetImm)
3282 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3283 // The lower two bits are always zero and as such are not encoded.
3284 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3285 else
3286 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3287 }
3288
3289 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
3290 assert(N == 2 && "Invalid number of operands!");
3291 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3292 if (!Memory.OffsetImm)
3294 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3295 Inst.addOperand(MCOperand::createImm(CE->getValue() / 2));
3296 else
3297 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3298 }
3299
3300 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
3301 assert(N == 2 && "Invalid number of operands!");
3302 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3303 addExpr(Inst, Memory.OffsetImm);
3304 }
3305
3306 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
3307 assert(N == 2 && "Invalid number of operands!");
3308 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
3309 if (!Memory.OffsetImm)
3311 else if (const auto *CE = dyn_cast<MCConstantExpr>(Memory.OffsetImm))
3312 // The lower two bits are always zero and as such are not encoded.
3313 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
3314 else
3315 Inst.addOperand(MCOperand::createExpr(Memory.OffsetImm));
3316 }
3317
3318 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
3319 assert(N == 1 && "Invalid number of operands!");
3320 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3321 assert(CE && "non-constant post-idx-imm8 operand!");
3322 int Imm = CE->getValue();
3323 bool isAdd = Imm >= 0;
3324 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3325 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
3327 }
3328
3329 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
3330 assert(N == 1 && "Invalid number of operands!");
3331 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3332 assert(CE && "non-constant post-idx-imm8s4 operand!");
3333 int Imm = CE->getValue();
3334 bool isAdd = Imm >= 0;
3335 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
3336 // Immediate is scaled by 4.
3337 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
3339 }
3340
3341 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
3342 assert(N == 2 && "Invalid number of operands!");
3343 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3344 Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
3345 }
3346
3347 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
3348 assert(N == 2 && "Invalid number of operands!");
3349 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
3350 // The sign, shift type, and shift amount are encoded in a single operand
3351 // using the AM2 encoding helpers.
3352 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
3353 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
3354 PostIdxReg.ShiftTy);
3356 }
3357
3358 void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
3359 assert(N == 1 && "Invalid number of operands!");
3360 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3361 Inst.addOperand(MCOperand::createImm(CE->getValue()));
3362 }
3363
3364 void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
3365 assert(N == 1 && "Invalid number of operands!");
3366 Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
3367 }
3368
3369 void addBankedRegOperands(MCInst &Inst, unsigned N) const {
3370 assert(N == 1 && "Invalid number of operands!");
3371 Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
3372 }
3373
3374 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
3375 assert(N == 1 && "Invalid number of operands!");
3376 Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
3377 }
3378
3379 void addVecListOperands(MCInst &Inst, unsigned N) const {
3380 assert(N == 1 && "Invalid number of operands!");
3381 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3382 }
3383
3384 void addMVEVecListOperands(MCInst &Inst, unsigned N) const {
3385 assert(N == 1 && "Invalid number of operands!");
3386
3387 // When we come here, the VectorList field will identify a range
3388 // of q-registers by its base register and length, and it will
3389 // have already been error-checked to be the expected length of
3390 // range and contain only q-regs in the range q0-q7. So we can
3391 // count on the base register being in the range q0-q6 (for 2
3392 // regs) or q0-q4 (for 4)
3393 //
3394 // The MVE instructions taking a register range of this kind will
3395 // need an operand in the MQQPR or MQQQQPR class, representing the
3396 // entire range as a unit. So we must translate into that class,
3397 // by finding the index of the base register in the MQPR reg
3398 // class, and returning the super-register at the corresponding
3399 // index in the target class.
3400
3401 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3402 const MCRegisterClass *RC_out =
3403 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3404 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3405
3406 unsigned I, E = RC_out->getNumRegs();
3407 for (I = 0; I < E; I++)
3408 if (RC_in->getRegister(I) == VectorList.RegNum)
3409 break;
3410 assert(I < E && "Invalid vector list start register!");
3411
3413 }
3414
3415 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
3416 assert(N == 2 && "Invalid number of operands!");
3417 Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
3418 Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
3419 }
3420
3421 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
3422 assert(N == 1 && "Invalid number of operands!");
3423 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3424 }
3425
3426 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
3427 assert(N == 1 && "Invalid number of operands!");
3428 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3429 }
3430
3431 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
3432 assert(N == 1 && "Invalid number of operands!");
3433 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3434 }
3435
3436 void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
3437 assert(N == 1 && "Invalid number of operands!");
3438 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3439 }
3440
3441 void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const {
3442 assert(N == 1 && "Invalid number of operands!");
3443 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3444 }
3445
3446 void addMVEPairVectorIndexOperands(MCInst &Inst, unsigned N) const {
3447 assert(N == 1 && "Invalid number of operands!");
3448 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
3449 }
3450
3451 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
3452 assert(N == 1 && "Invalid number of operands!");
3453 // The immediate encodes the type of constant as well as the value.
3454 // Mask in that this is an i8 splat.
3455 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3456 Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
3457 }
3458
3459 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
3460 assert(N == 1 && "Invalid number of operands!");
3461 // The immediate encodes the type of constant as well as the value.
3462 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3463 unsigned Value = CE->getValue();
3466 }
3467
3468 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
3469 assert(N == 1 && "Invalid number of operands!");
3470 // The immediate encodes the type of constant as well as the value.
3471 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3472 unsigned Value = CE->getValue();
3475 }
3476
3477 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
3478 assert(N == 1 && "Invalid number of operands!");
3479 // The immediate encodes the type of constant as well as the value.
3480 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3481 unsigned Value = CE->getValue();
3484 }
3485
3486 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3487 assert(N == 1 && "Invalid number of operands!");
3488 // The immediate encodes the type of constant as well as the value.
3489 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3490 unsigned Value = CE->getValue();
3493 }
3494
3495 void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3496 // The immediate encodes the type of constant as well as the value.
3497 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3498 assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3499 Inst.getOpcode() == ARM::VMOVv16i8) &&
3500 "All instructions that wants to replicate non-zero byte "
3501 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3502 unsigned Value = CE->getValue();
3503 if (Inv)
3504 Value = ~Value;
3505 unsigned B = Value & 0xff;
3506 B |= 0xe00; // cmode = 0b1110
3508 }
3509
3510 void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3511 assert(N == 1 && "Invalid number of operands!");
3512 addNEONi8ReplicateOperands(Inst, true);
3513 }
3514
3515 static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3516 if (Value >= 256 && Value <= 0xffff)
3517 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3518 else if (Value > 0xffff && Value <= 0xffffff)
3519 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3520 else if (Value > 0xffffff)
3521 Value = (Value >> 24) | 0x600;
3522 return Value;
3523 }
3524
3525 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3526 assert(N == 1 && "Invalid number of operands!");
3527 // The immediate encodes the type of constant as well as the value.
3528 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3529 unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3531 }
3532
3533 void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3534 assert(N == 1 && "Invalid number of operands!");
3535 addNEONi8ReplicateOperands(Inst, false);
3536 }
3537
3538 void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3539 assert(N == 1 && "Invalid number of operands!");
3540 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3541 assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3542 Inst.getOpcode() == ARM::VMOVv8i16 ||
3543 Inst.getOpcode() == ARM::VMVNv4i16 ||
3544 Inst.getOpcode() == ARM::VMVNv8i16) &&
3545 "All instructions that want to replicate non-zero half-word "
3546 "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3547 uint64_t Value = CE->getValue();
3548 unsigned Elem = Value & 0xffff;
3549 if (Elem >= 256)
3550 Elem = (Elem >> 8) | 0x200;
3551 Inst.addOperand(MCOperand::createImm(Elem));
3552 }
3553
3554 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3555 assert(N == 1 && "Invalid number of operands!");
3556 // The immediate encodes the type of constant as well as the value.
3557 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3558 unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3560 }
3561
3562 void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3563 assert(N == 1 && "Invalid number of operands!");
3564 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3565 assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3566 Inst.getOpcode() == ARM::VMOVv4i32 ||
3567 Inst.getOpcode() == ARM::VMVNv2i32 ||
3568 Inst.getOpcode() == ARM::VMVNv4i32) &&
3569 "All instructions that want to replicate non-zero word "
3570 "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3571 uint64_t Value = CE->getValue();
3572 unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3573 Inst.addOperand(MCOperand::createImm(Elem));
3574 }
3575
3576 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3577 assert(N == 1 && "Invalid number of operands!");
3578 // The immediate encodes the type of constant as well as the value.
3579 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3580 uint64_t Value = CE->getValue();
3581 unsigned Imm = 0;
3582 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3583 Imm |= (Value & 1) << i;
3584 }
3585 Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3586 }
3587
3588 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3589 assert(N == 1 && "Invalid number of operands!");
3590 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3591 Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3592 }
3593
3594 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3595 assert(N == 1 && "Invalid number of operands!");
3596 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3597 Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3598 }
3599
3600 void addMveSaturateOperands(MCInst &Inst, unsigned N) const {
3601 assert(N == 1 && "Invalid number of operands!");
3602 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
3603 unsigned Imm = CE->getValue();
3604 assert((Imm == 48 || Imm == 64) && "Invalid saturate operand");
3605 Inst.addOperand(MCOperand::createImm(Imm == 48 ? 1 : 0));
3606 }
3607
3608 void print(raw_ostream &OS) const override;
3609
3610 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3611 auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3612 Op->ITMask.Mask = Mask;
3613 Op->StartLoc = S;
3614 Op->EndLoc = S;
3615 return Op;
3616 }
3617
3618 static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3619 SMLoc S) {
3620 auto Op = std::make_unique<ARMOperand>(k_CondCode);
3621 Op->CC.Val = CC;
3622 Op->StartLoc = S;
3623 Op->EndLoc = S;
3624 return Op;
3625 }
3626
3627 static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3628 SMLoc S) {
3629 auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3630 Op->VCC.Val = CC;
3631 Op->StartLoc = S;
3632 Op->EndLoc = S;
3633 return Op;
3634 }
3635
3636 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3637 auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3638 Op->Cop.Val = CopVal;
3639 Op->StartLoc = S;
3640 Op->EndLoc = S;
3641 return Op;
3642 }
3643
3644 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3645 auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3646 Op->Cop.Val = CopVal;
3647 Op->StartLoc = S;
3648 Op->EndLoc = S;
3649 return Op;
3650 }
3651
3652 static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3653 SMLoc E) {
3654 auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3655 Op->Cop.Val = Val;
3656 Op->StartLoc = S;
3657 Op->EndLoc = E;
3658 return Op;
3659 }
3660
3661 static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3662 auto Op = std::make_unique<ARMOperand>(k_CCOut);
3663 Op->Reg.RegNum = RegNum;
3664 Op->StartLoc = S;
3665 Op->EndLoc = S;
3666 return Op;
3667 }
3668
3669 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3670 auto Op = std::make_unique<ARMOperand>(k_Token);
3671 Op->Tok.Data = Str.data();
3672 Op->Tok.Length = Str.size();
3673 Op->StartLoc = S;
3674 Op->EndLoc = S;
3675 return Op;
3676 }
3677
3678 static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3679 SMLoc E) {
3680 auto Op = std::make_unique<ARMOperand>(k_Register);
3681 Op->Reg.RegNum = RegNum;
3682 Op->StartLoc = S;
3683 Op->EndLoc = E;
3684 return Op;
3685 }
3686
3687 static std::unique_ptr<ARMOperand>
3688 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3689 unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3690 SMLoc E) {
3691 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3692 Op->RegShiftedReg.ShiftTy = ShTy;
3693 Op->RegShiftedReg.SrcReg = SrcReg;
3694 Op->RegShiftedReg.ShiftReg = ShiftReg;
3695 Op->RegShiftedReg.ShiftImm = ShiftImm;
3696 Op->StartLoc = S;
3697 Op->EndLoc = E;
3698 return Op;
3699 }
3700
3701 static std::unique_ptr<ARMOperand>
3702 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3703 unsigned ShiftImm, SMLoc S, SMLoc E) {
3704 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3705 Op->RegShiftedImm.ShiftTy = ShTy;
3706 Op->RegShiftedImm.SrcReg = SrcReg;
3707 Op->RegShiftedImm.ShiftImm = ShiftImm;
3708 Op->StartLoc = S;
3709 Op->EndLoc = E;
3710 return Op;
3711 }
3712
3713 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3714 SMLoc S, SMLoc E) {
3715 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3716 Op->ShifterImm.isASR = isASR;
3717 Op->ShifterImm.Imm = Imm;
3718 Op->StartLoc = S;
3719 Op->EndLoc = E;
3720 return Op;
3721 }
3722
3723 static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3724 SMLoc E) {
3725 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3726 Op->RotImm.Imm = Imm;
3727 Op->StartLoc = S;
3728 Op->EndLoc = E;
3729 return Op;
3730 }
3731
3732 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3733 SMLoc S, SMLoc E) {
3734 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3735 Op->ModImm.Bits = Bits;
3736 Op->ModImm.Rot = Rot;
3737 Op->StartLoc = S;
3738 Op->EndLoc = E;
3739 return Op;
3740 }
3741
3742 static std::unique_ptr<ARMOperand>
3743 CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3744 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3745 Op->Imm.Val = Val;
3746 Op->StartLoc = S;
3747 Op->EndLoc = E;
3748 return Op;
3749 }
3750
3751 static std::unique_ptr<ARMOperand>
3752 CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3753 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3754 Op->Bitfield.LSB = LSB;
3755 Op->Bitfield.Width = Width;
3756 Op->StartLoc = S;
3757 Op->EndLoc = E;
3758 return Op;
3759 }
3760
3761 static std::unique_ptr<ARMOperand>
3762 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3763 SMLoc StartLoc, SMLoc EndLoc) {
3764 assert(Regs.size() > 0 && "RegList contains no registers?");
3765 KindTy Kind = k_RegisterList;
3766
3767 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3768 Regs.front().second)) {
3769 if (Regs.back().second == ARM::VPR)
3770 Kind = k_FPDRegisterListWithVPR;
3771 else
3772 Kind = k_DPRRegisterList;
3773 } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3774 Regs.front().second)) {
3775 if (Regs.back().second == ARM::VPR)
3776 Kind = k_FPSRegisterListWithVPR;
3777 else
3778 Kind = k_SPRRegisterList;
3779 }
3780
3781 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3782 Kind = k_RegisterListWithAPSR;
3783
3784 assert(llvm::is_sorted(Regs) && "Register list must be sorted by encoding");
3785
3786 auto Op = std::make_unique<ARMOperand>(Kind);
3787 for (const auto &P : Regs)
3788 Op->Registers.push_back(P.second);
3789
3790 Op->StartLoc = StartLoc;
3791 Op->EndLoc = EndLoc;
3792 return Op;
3793 }
3794
3795 static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3796 unsigned Count,
3797 bool isDoubleSpaced,
3798 SMLoc S, SMLoc E) {
3799 auto Op = std::make_unique<ARMOperand>(k_VectorList);
3800 Op->VectorList.RegNum = RegNum;
3801 Op->VectorList.Count = Count;
3802 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3803 Op->StartLoc = S;
3804 Op->EndLoc = E;
3805 return Op;
3806 }
3807
3808 static std::unique_ptr<ARMOperand>
3809 CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3810 SMLoc S, SMLoc E) {
3811 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3812 Op->VectorList.RegNum = RegNum;
3813 Op->VectorList.Count = Count;
3814 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3815 Op->StartLoc = S;
3816 Op->EndLoc = E;
3817 return Op;
3818 }
3819
3820 static std::unique_ptr<ARMOperand>
3821 CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3822 bool isDoubleSpaced, SMLoc S, SMLoc E) {
3823 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3824 Op->VectorList.RegNum = RegNum;
3825 Op->VectorList.Count = Count;
3826 Op->VectorList.LaneIndex = Index;
3827 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3828 Op->StartLoc = S;
3829 Op->EndLoc = E;
3830 return Op;
3831 }
3832
3833 static std::unique_ptr<ARMOperand>
3834 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3835 auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3836 Op->VectorIndex.Val = Idx;
3837 Op->StartLoc = S;
3838 Op->EndLoc = E;
3839 return Op;
3840 }
3841
3842 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3843 SMLoc E) {
3844 auto Op = std::make_unique<ARMOperand>(k_Immediate);
3845 Op->Imm.Val = Val;
3846 Op->StartLoc = S;
3847 Op->EndLoc = E;
3848 return Op;
3849 }
3850
3851 static std::unique_ptr<ARMOperand>
3852 CreateMem(unsigned BaseRegNum, const MCExpr *OffsetImm, unsigned OffsetRegNum,
3853 ARM_AM::ShiftOpc ShiftType, unsigned ShiftImm, unsigned Alignment,
3854 bool isNegative, SMLoc S, SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3855 auto Op = std::make_unique<ARMOperand>(k_Memory);
3856 Op->Memory.BaseRegNum = BaseRegNum;
3857 Op->Memory.OffsetImm = OffsetImm;
3858 Op->Memory.OffsetRegNum = OffsetRegNum;
3859 Op->Memory.ShiftType = ShiftType;
3860 Op->Memory.ShiftImm = ShiftImm;
3861 Op->Memory.Alignment = Alignment;
3862 Op->Memory.isNegative = isNegative;
3863 Op->StartLoc = S;
3864 Op->EndLoc = E;
3865 Op->AlignmentLoc = AlignmentLoc;
3866 return Op;
3867 }
3868
3869 static std::unique_ptr<ARMOperand>
3870 CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3871 unsigned ShiftImm, SMLoc S, SMLoc E) {
3872 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3873 Op->PostIdxReg.RegNum = RegNum;
3874 Op->PostIdxReg.isAdd = isAdd;
3875 Op->PostIdxReg.ShiftTy = ShiftTy;
3876 Op->PostIdxReg.ShiftImm = ShiftImm;
3877 Op->StartLoc = S;
3878 Op->EndLoc = E;
3879 return Op;
3880 }
3881
3882 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3883 SMLoc S) {
3884 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3885 Op->MBOpt.Val = Opt;
3886 Op->StartLoc = S;
3887 Op->EndLoc = S;
3888 return Op;
3889 }
3890
3891 static std::unique_ptr<ARMOperand>
3892 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3893 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3894 Op->ISBOpt.Val = Opt;
3895 Op->StartLoc = S;
3896 Op->EndLoc = S;
3897 return Op;
3898 }
3899
3900 static std::unique_ptr<ARMOperand>
3901 CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3902 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3903 Op->TSBOpt.Val = Opt;
3904 Op->StartLoc = S;
3905 Op->EndLoc = S;
3906 return Op;
3907 }
3908
3909 static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3910 SMLoc S) {
3911 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3912 Op->IFlags.Val = IFlags;
3913 Op->StartLoc = S;
3914 Op->EndLoc = S;
3915 return Op;
3916 }
3917
3918 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3919 auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3920 Op->MMask.Val = MMask;
3921 Op->StartLoc = S;
3922 Op->EndLoc = S;
3923 return Op;
3924 }
3925
3926 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3927 auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3928 Op->BankedReg.Val = Reg;
3929 Op->StartLoc = S;
3930 Op->EndLoc = S;
3931 return Op;
3932 }
3933};
3934
3935} // end anonymous namespace.
3936
3937void ARMOperand::print(raw_ostream &OS) const {
3938 auto RegName = [](MCRegister Reg) {
3939 if (Reg)
3941 else
3942 return "noreg";
3943 };
3944
3945 switch (Kind) {
3946 case k_CondCode:
3947 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3948 break;
3949 case k_VPTPred:
3950 OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3951 break;
3952 case k_CCOut:
3953 OS << "<ccout " << RegName(getReg()) << ">";
3954 break;
3955 case k_ITCondMask: {
3956 static const char *const MaskStr[] = {
3957 "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3958 "(tt)", "(ttet)", "(tte)", "(ttee)",
3959 "(t)", "(tett)", "(tet)", "(tete)",
3960 "(te)", "(teet)", "(tee)", "(teee)",
3961 };
3962 assert((ITMask.Mask & 0xf) == ITMask.Mask);
3963 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3964 break;
3965 }
3966 case k_CoprocNum:
3967 OS << "<coprocessor number: " << getCoproc() << ">";
3968 break;
3969 case k_CoprocReg:
3970 OS << "<coprocessor register: " << getCoproc() << ">";
3971 break;
3972 case k_CoprocOption:
3973 OS << "<coprocessor option: " << CoprocOption.Val << ">";
3974 break;
3975 case k_MSRMask:
3976 OS << "<mask: " << getMSRMask() << ">";
3977 break;
3978 case k_BankedReg:
3979 OS << "<banked reg: " << getBankedReg() << ">";
3980 break;
3981 case k_Immediate:
3982 OS << *getImm();
3983 break;
3984 case k_MemBarrierOpt:
3985 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3986 break;
3987 case k_InstSyncBarrierOpt:
3988 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3989 break;
3990 case k_TraceSyncBarrierOpt:
3991 OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3992 break;
3993 case k_Memory:
3994 OS << "<memory";
3995 if (Memory.BaseRegNum)
3996 OS << " base:" << RegName(Memory.BaseRegNum);
3997 if (Memory.OffsetImm)
3998 OS << " offset-imm:" << *Memory.OffsetImm;
3999 if (Memory.OffsetRegNum)
4000 OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
4001 << RegName(Memory.OffsetRegNum);
4002 if (Memory.ShiftType != ARM_AM::no_shift) {
4003 OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
4004 OS << " shift-imm:" << Memory.ShiftImm;
4005 }
4006 if (Memory.Alignment)
4007 OS << " alignment:" << Memory.Alignment;
4008 OS << ">";
4009 break;
4010 case k_PostIndexRegister:
4011 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
4012 << RegName(PostIdxReg.RegNum);
4013 if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
4014 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
4015 << PostIdxReg.ShiftImm;
4016 OS << ">";
4017 break;
4018 case k_ProcIFlags: {
4019 OS << "<ARM_PROC::";
4020 unsigned IFlags = getProcIFlags();
4021 for (int i=2; i >= 0; --i)
4022 if (IFlags & (1 << i))
4023 OS << ARM_PROC::IFlagsToString(1 << i);
4024 OS << ">";
4025 break;
4026 }
4027 case k_Register:
4028 OS << "<register " << RegName(getReg()) << ">";
4029 break;
4030 case k_ShifterImmediate:
4031 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
4032 << " #" << ShifterImm.Imm << ">";
4033 break;
4034 case k_ShiftedRegister:
4035 OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
4036 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
4037 << RegName(RegShiftedReg.ShiftReg) << ">";
4038 break;
4039 case k_ShiftedImmediate:
4040 OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
4041 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
4042 << RegShiftedImm.ShiftImm << ">";
4043 break;
4044 case k_RotateImmediate:
4045 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
4046 break;
4047 case k_ModifiedImmediate:
4048 OS << "<mod_imm #" << ModImm.Bits << ", #"
4049 << ModImm.Rot << ")>";
4050 break;
4051 case k_ConstantPoolImmediate:
4052 OS << "<constant_pool_imm #" << *getConstantPoolImm();
4053 break;
4054 case k_BitfieldDescriptor:
4055 OS << "<bitfield " << "lsb: " << Bitfield.LSB
4056 << ", width: " << Bitfield.Width << ">";
4057 break;
4058 case k_RegisterList:
4059 case k_RegisterListWithAPSR:
4060 case k_DPRRegisterList:
4061 case k_SPRRegisterList:
4062 case k_FPSRegisterListWithVPR:
4063 case k_FPDRegisterListWithVPR: {
4064 OS << "<register_list ";
4065
4066 const SmallVectorImpl<unsigned> &RegList = getRegList();
4068 I = RegList.begin(), E = RegList.end(); I != E; ) {
4069 OS << RegName(*I);
4070 if (++I < E) OS << ", ";
4071 }
4072
4073 OS << ">";
4074 break;
4075 }
4076 case k_VectorList:
4077 OS << "<vector_list " << VectorList.Count << " * "
4078 << RegName(VectorList.RegNum) << ">";
4079 break;
4080 case k_VectorListAllLanes:
4081 OS << "<vector_list(all lanes) " << VectorList.Count << " * "
4082 << RegName(VectorList.RegNum) << ">";
4083 break;
4084 case k_VectorListIndexed:
4085 OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
4086 << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
4087 break;
4088 case k_Token:
4089 OS << "'" << getToken() << "'";
4090 break;
4091 case k_VectorIndex:
4092 OS << "<vectorindex " << getVectorIndex() << ">";
4093 break;
4094 }
4095}
4096
4097/// @name Auto-generated Match Functions
4098/// {
4099
4101
4102/// }
4103
4104static bool isDataTypeToken(StringRef Tok) {
4105 static const DenseSet<StringRef> DataTypes{
4106 ".8", ".16", ".32", ".64", ".i8", ".i16", ".i32", ".i64",
4107 ".u8", ".u16", ".u32", ".u64", ".s8", ".s16", ".s32", ".s64",
4108 ".p8", ".p16", ".f32", ".f64", ".f", ".d"};
4109 return DataTypes.contains(Tok);
4110}
4111
4113 unsigned MnemonicOpsEndInd = 1;
4114 // Special case for CPS which has a Mnemonic side token for possibly storing
4115 // ie/id variant
4116 if (Operands[0]->isToken() &&
4117 static_cast<ARMOperand &>(*Operands[0]).getToken() == "cps") {
4118 if (Operands.size() > 1 && Operands[1]->isImm() &&
4119 static_cast<ARMOperand &>(*Operands[1]).getImm()->getKind() ==
4121 (dyn_cast<MCConstantExpr>(
4122 static_cast<ARMOperand &>(*Operands[1]).getImm())
4123 ->getValue() == ARM_PROC::IE ||
4124 dyn_cast<MCConstantExpr>(
4125 static_cast<ARMOperand &>(*Operands[1]).getImm())
4126 ->getValue() == ARM_PROC::ID))
4127 ++MnemonicOpsEndInd;
4128 }
4129
4130 // In some circumstances the condition code moves to the right
4131 bool RHSCondCode = false;
4132 while (MnemonicOpsEndInd < Operands.size()) {
4133 auto Op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
4134 // Special case for it instructions which have a condition code on the RHS
4135 if (Op.isITMask()) {
4136 RHSCondCode = true;
4137 MnemonicOpsEndInd++;
4138 } else if (Op.isToken() &&
4139 (
4140 // There are several special cases not covered by
4141 // isDataTypeToken
4142 Op.getToken() == ".w" || Op.getToken() == ".bf16" ||
4143 Op.getToken() == ".p64" || Op.getToken() == ".f16" ||
4144 isDataTypeToken(Op.getToken()))) {
4145 // In the mnemonic operators the cond code must always precede the data
4146 // type. So we can now safely assume any subsequent cond code is on the
4147 // RHS. As is the case for VCMP and VPT.
4148 RHSCondCode = true;
4149 MnemonicOpsEndInd++;
4150 }
4151 // Skip all mnemonic operator types
4152 else if (Op.isCCOut() || (Op.isCondCode() && !RHSCondCode) ||
4153 Op.isVPTPred() || (Op.isToken() && Op.getToken() == ".w"))
4154 MnemonicOpsEndInd++;
4155 else
4156 break;
4157 }
4158 return MnemonicOpsEndInd;
4159}
4160
4161bool ARMAsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
4162 SMLoc &EndLoc) {
4163 const AsmToken &Tok = getParser().getTok();
4164 StartLoc = Tok.getLoc();
4165 EndLoc = Tok.getEndLoc();
4166 Reg = tryParseRegister();
4167
4168 return Reg == (unsigned)-1;
4169}
4170
4171ParseStatus ARMAsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
4172 SMLoc &EndLoc) {
4173 if (parseRegister(Reg, StartLoc, EndLoc))
4174 return ParseStatus::NoMatch;
4175 return ParseStatus::Success;
4176}
4177
4178/// Try to parse a register name. The token must be an Identifier when called,
4179/// and if it is a register name the token is eaten and the register number is
4180/// returned. Otherwise return -1.
4181int ARMAsmParser::tryParseRegister(bool AllowOutOfBoundReg) {
4182 MCAsmParser &Parser = getParser();
4183 const AsmToken &Tok = Parser.getTok();
4184 if (Tok.isNot(AsmToken::Identifier)) return -1;
4185
4186 std::string lowerCase = Tok.getString().lower();
4187 unsigned RegNum = MatchRegisterName(lowerCase);
4188 if (!RegNum) {
4189 RegNum = StringSwitch<unsigned>(lowerCase)
4190 .Case("r13", ARM::SP)
4191 .Case("r14", ARM::LR)
4192 .Case("r15", ARM::PC)
4193 .Case("ip", ARM::R12)
4194 // Additional register name aliases for 'gas' compatibility.
4195 .Case("a1", ARM::R0)
4196 .Case("a2", ARM::R1)
4197 .Case("a3", ARM::R2)
4198 .Case("a4", ARM::R3)
4199 .Case("v1", ARM::R4)
4200 .Case("v2", ARM::R5)
4201 .Case("v3", ARM::R6)
4202 .Case("v4", ARM::R7)
4203 .Case("v5", ARM::R8)
4204 .Case("v6", ARM::R9)
4205 .Case("v7", ARM::R10)
4206 .Case("v8", ARM::R11)
4207 .Case("sb", ARM::R9)
4208 .Case("sl", ARM::R10)
4209 .Case("fp", ARM::R11)
4210 .Default(0);
4211 }
4212 if (!RegNum) {
4213 // Check for aliases registered via .req. Canonicalize to lower case.
4214 // That's more consistent since register names are case insensitive, and
4215 // it's how the original entry was passed in from MC/MCParser/AsmParser.
4216 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
4217 // If no match, return failure.
4218 if (Entry == RegisterReqs.end())
4219 return -1;
4220 Parser.Lex(); // Eat identifier token.
4221 return Entry->getValue();
4222 }
4223
4224 // Some FPUs only have 16 D registers, so D16-D31 are invalid
4225 if (!AllowOutOfBoundReg && !hasD32() && RegNum >= ARM::D16 &&
4226 RegNum <= ARM::D31)
4227 return -1;
4228
4229 Parser.Lex(); // Eat identifier token.
4230
4231 return RegNum;
4232}
4233
4234std::optional<ARM_AM::ShiftOpc> ARMAsmParser::tryParseShiftToken() {
4235 MCAsmParser &Parser = getParser();
4236 const AsmToken &Tok = Parser.getTok();
4237 if (Tok.isNot(AsmToken::Identifier))
4238 return std::nullopt;
4239
4240 std::string lowerCase = Tok.getString().lower();
4242 .Case("asl", ARM_AM::lsl)
4243 .Case("lsl", ARM_AM::lsl)
4244 .Case("lsr", ARM_AM::lsr)
4245 .Case("asr", ARM_AM::asr)
4246 .Case("ror", ARM_AM::ror)
4247 .Case("rrx", ARM_AM::rrx)
4248 .Default(std::nullopt);
4249}
4250
4251// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
4252// If a recoverable error occurs, return 1. If an irrecoverable error
4253// occurs, return -1. An irrecoverable error is one where tokens have been
4254// consumed in the process of trying to parse the shifter (i.e., when it is
4255// indeed a shifter operand, but malformed).
4256int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
4257 MCAsmParser &Parser = getParser();
4258 SMLoc S = Parser.getTok().getLoc();
4259
4260 auto ShiftTyOpt = tryParseShiftToken();
4261 if (ShiftTyOpt == std::nullopt)
4262 return 1;
4263 auto ShiftTy = ShiftTyOpt.value();
4264
4265 Parser.Lex(); // Eat the operator.
4266
4267 // The source register for the shift has already been added to the
4268 // operand list, so we need to pop it off and combine it into the shifted
4269 // register operand instead.
4270 std::unique_ptr<ARMOperand> PrevOp(
4271 (ARMOperand *)Operands.pop_back_val().release());
4272 if (!PrevOp->isReg())
4273 return Error(PrevOp->getStartLoc(), "shift must be of a register");
4274 int SrcReg = PrevOp->getReg();
4275
4276 SMLoc EndLoc;
4277 int64_t Imm = 0;
4278 int ShiftReg = 0;
4279 if (ShiftTy == ARM_AM::rrx) {
4280 // RRX Doesn't have an explicit shift amount. The encoder expects
4281 // the shift register to be the same as the source register. Seems odd,
4282 // but OK.
4283 ShiftReg = SrcReg;
4284 } else {
4285 // Figure out if this is shifted by a constant or a register (for non-RRX).
4286 if (Parser.getTok().is(AsmToken::Hash) ||
4287 Parser.getTok().is(AsmToken::Dollar)) {
4288 Parser.Lex(); // Eat hash.
4289 SMLoc ImmLoc = Parser.getTok().getLoc();
4290 const MCExpr *ShiftExpr = nullptr;
4291 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4292 Error(ImmLoc, "invalid immediate shift value");
4293 return -1;
4294 }
4295 // The expression must be evaluatable as an immediate.
4296 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
4297 if (!CE) {
4298 Error(ImmLoc, "invalid immediate shift value");
4299 return -1;
4300 }
4301 // Range check the immediate.
4302 // lsl, ror: 0 <= imm <= 31
4303 // lsr, asr: 0 <= imm <= 32
4304 Imm = CE->getValue();
4305 if (Imm < 0 ||
4306 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
4307 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
4308 Error(ImmLoc, "immediate shift value out of range");
4309 return -1;
4310 }
4311 // shift by zero is a nop. Always send it through as lsl.
4312 // ('as' compatibility)
4313 if (Imm == 0)
4314 ShiftTy = ARM_AM::lsl;
4315 } else if (Parser.getTok().is(AsmToken::Identifier)) {
4316 SMLoc L = Parser.getTok().getLoc();
4317 EndLoc = Parser.getTok().getEndLoc();
4318 ShiftReg = tryParseRegister();
4319 if (ShiftReg == -1) {
4320 Error(L, "expected immediate or register in shift operand");
4321 return -1;
4322 }
4323 } else {
4324 Error(Parser.getTok().getLoc(),
4325 "expected immediate or register in shift operand");
4326 return -1;
4327 }
4328 }
4329
4330 if (ShiftReg && ShiftTy != ARM_AM::rrx)
4331 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4332 ShiftReg, Imm,
4333 S, EndLoc));
4334 else
4335 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4336 S, EndLoc));
4337
4338 return 0;
4339}
4340
4341/// Try to parse a register name. The token must be an Identifier when called.
4342/// If it's a register, an AsmOperand is created. Another AsmOperand is created
4343/// if there is a "writeback". 'true' if it's not a register.
4344///
4345/// TODO this is likely to change to allow different register types and or to
4346/// parse for a specific register type.
4347bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
4348 MCAsmParser &Parser = getParser();
4349 SMLoc RegStartLoc = Parser.getTok().getLoc();
4350 SMLoc RegEndLoc = Parser.getTok().getEndLoc();
4351 int RegNo = tryParseRegister();
4352 if (RegNo == -1)
4353 return true;
4354
4355 Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4356
4357 const AsmToken &ExclaimTok = Parser.getTok();
4358 if (ExclaimTok.is(AsmToken::Exclaim)) {
4359 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
4360 ExclaimTok.getLoc()));
4361 Parser.Lex(); // Eat exclaim token
4362 return false;
4363 }
4364
4365 // Also check for an index operand. This is only legal for vector registers,
4366 // but that'll get caught OK in operand matching, so we don't need to
4367 // explicitly filter everything else out here.
4368 if (Parser.getTok().is(AsmToken::LBrac)) {
4369 SMLoc SIdx = Parser.getTok().getLoc();
4370 Parser.Lex(); // Eat left bracket token.
4371
4372 const MCExpr *ImmVal;
4373 if (getParser().parseExpression(ImmVal))
4374 return true;
4375 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4376 if (!MCE)
4377 return TokError("immediate value expected for vector index");
4378
4379 if (Parser.getTok().isNot(AsmToken::RBrac))
4380 return Error(Parser.getTok().getLoc(), "']' expected");
4381
4382 SMLoc E = Parser.getTok().getEndLoc();
4383 Parser.Lex(); // Eat right bracket token.
4384
4385 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
4386 SIdx, E,
4387 getContext()));
4388 }
4389
4390 return false;
4391}
4392
4393/// MatchCoprocessorOperandName - Try to parse an coprocessor related
4394/// instruction with a symbolic operand name.
4395/// We accept "crN" syntax for GAS compatibility.
4396/// <operand-name> ::= <prefix><number>
4397/// If CoprocOp is 'c', then:
4398/// <prefix> ::= c | cr
4399/// If CoprocOp is 'p', then :
4400/// <prefix> ::= p
4401/// <number> ::= integer in range [0, 15]
4402static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
4403 // Use the same layout as the tablegen'erated register name matcher. Ugly,
4404 // but efficient.
4405 if (Name.size() < 2 || Name[0] != CoprocOp)
4406 return -1;
4407 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
4408
4409 switch (Name.size()) {
4410 default: return -1;
4411 case 1:
4412 switch (Name[0]) {
4413 default: return -1;
4414 case '0': return 0;
4415 case '1': return 1;
4416 case '2': return 2;
4417 case '3': return 3;
4418 case '4': return 4;
4419 case '5': return 5;
4420 case '6': return 6;
4421 case '7': return 7;
4422 case '8': return 8;
4423 case '9': return 9;
4424 }
4425 case 2:
4426 if (Name[0] != '1')
4427 return -1;
4428 switch (Name[1]) {
4429 default: return -1;
4430 // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
4431 // However, old cores (v5/v6) did use them in that way.
4432 case '0': return 10;
4433 case '1': return 11;
4434 case '2': return 12;
4435 case '3': return 13;
4436 case '4': return 14;
4437 case '5': return 15;
4438 }
4439 }
4440}
4441
4442/// parseITCondCode - Try to parse a condition code for an IT instruction.
4443ParseStatus ARMAsmParser::parseITCondCode(OperandVector &Operands) {
4444 MCAsmParser &Parser = getParser();
4445 SMLoc S = Parser.getTok().getLoc();
4446 const AsmToken &Tok = Parser.getTok();
4447 if (!Tok.is(AsmToken::Identifier))
4448 return ParseStatus::NoMatch;
4449 unsigned CC = ARMCondCodeFromString(Tok.getString());
4450 if (CC == ~0U)
4451 return ParseStatus::NoMatch;
4452 Parser.Lex(); // Eat the token.
4453
4454 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
4455
4456 return ParseStatus::Success;
4457}
4458
4459/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
4460/// token must be an Identifier when called, and if it is a coprocessor
4461/// number, the token is eaten and the operand is added to the operand list.
4462ParseStatus ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
4463 MCAsmParser &Parser = getParser();
4464 SMLoc S = Parser.getTok().getLoc();
4465 const AsmToken &Tok = Parser.getTok();
4466 if (Tok.isNot(AsmToken::Identifier))
4467 return ParseStatus::NoMatch;
4468
4469 int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
4470 if (Num == -1)
4471 return ParseStatus::NoMatch;
4472 if (!isValidCoprocessorNumber(Num, getSTI().getFeatureBits()))
4473 return ParseStatus::NoMatch;
4474
4475 Parser.Lex(); // Eat identifier token.
4476 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4477 return ParseStatus::Success;
4478}
4479
4480/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
4481/// token must be an Identifier when called, and if it is a coprocessor
4482/// number, the token is eaten and the operand is added to the operand list.
4483ParseStatus ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
4484 MCAsmParser &Parser = getParser();
4485 SMLoc S = Parser.getTok().getLoc();
4486 const AsmToken &Tok = Parser.getTok();
4487 if (Tok.isNot(AsmToken::Identifier))
4488 return ParseStatus::NoMatch;
4489
4490 int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
4491 if (Reg == -1)
4492 return ParseStatus::NoMatch;
4493
4494 Parser.Lex(); // Eat identifier token.
4495 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
4496 return ParseStatus::Success;
4497}
4498
4499/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
4500/// coproc_option : '{' imm0_255 '}'
4501ParseStatus ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
4502 MCAsmParser &Parser = getParser();
4503 SMLoc S = Parser.getTok().getLoc();
4504
4505 // If this isn't a '{', this isn't a coprocessor immediate operand.
4506 if (Parser.getTok().isNot(AsmToken::LCurly))
4507 return ParseStatus::NoMatch;
4508 Parser.Lex(); // Eat the '{'
4509
4510 const MCExpr *Expr;
4511 SMLoc Loc = Parser.getTok().getLoc();
4512 if (getParser().parseExpression(Expr))
4513 return Error(Loc, "illegal expression");
4514 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4515 if (!CE || CE->getValue() < 0 || CE->getValue() > 255)
4516 return Error(Loc,
4517 "coprocessor option must be an immediate in range [0, 255]");
4518 int Val = CE->getValue();
4519
4520 // Check for and consume the closing '}'
4521 if (Parser.getTok().isNot(AsmToken::RCurly))
4522 return ParseStatus::Failure;
4523 SMLoc E = Parser.getTok().getEndLoc();
4524 Parser.Lex(); // Eat the '}'
4525
4526 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
4527 return ParseStatus::Success;
4528}
4529
4530// For register list parsing, we need to map from raw GPR register numbering
4531// to the enumeration values. The enumeration values aren't sorted by
4532// register number due to our using "sp", "lr" and "pc" as canonical names.
4533static unsigned getNextRegister(unsigned Reg) {
4534 // If this is a GPR, we need to do it manually, otherwise we can rely
4535 // on the sort ordering of the enumeration since the other reg-classes
4536 // are sane.
4537 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4538 return Reg + 1;
4539 switch(Reg) {
4540 default: llvm_unreachable("Invalid GPR number!");
4541 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2;
4542 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4;
4543 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6;
4544 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8;
4545 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10;
4546 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
4547 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR;
4548 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0;
4549 }
4550}
4551
4552// Insert an <Encoding, Register> pair in an ordered vector. Return true on
4553// success, or false, if duplicate encoding found.
4554static bool
4555insertNoDuplicates(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
4556 unsigned Enc, unsigned Reg) {
4557 Regs.emplace_back(Enc, Reg);
4558 for (auto I = Regs.rbegin(), J = I + 1, E = Regs.rend(); J != E; ++I, ++J) {
4559 if (J->first == Enc) {
4560 Regs.erase(J.base());
4561 return false;
4562 }
4563 if (J->first < Enc)
4564 break;
4565 std::swap(*I, *J);
4566 }
4567 return true;
4568}
4569
4570/// Parse a register list.
4571bool ARMAsmParser::parseRegisterList(OperandVector &Operands, bool EnforceOrder,
4572 bool AllowRAAC, bool AllowOutOfBoundReg) {
4573 MCAsmParser &Parser = getParser();
4574 if (Parser.getTok().isNot(AsmToken::LCurly))
4575 return TokError("Token is not a Left Curly Brace");
4576 SMLoc S = Parser.getTok().getLoc();
4577 Parser.Lex(); // Eat '{' token.
4578 SMLoc RegLoc = Parser.getTok().getLoc();
4579
4580 // Check the first register in the list to see what register class
4581 // this is a list of.
4582 int Reg = tryParseRegister();
4583 if (Reg == -1)
4584 return Error(RegLoc, "register expected");
4585 if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4586 return Error(RegLoc, "pseudo-register not allowed");
4587 // The reglist instructions have at most 16 registers, so reserve
4588 // space for that many.
4589 int EReg = 0;
4591
4592 // Allow Q regs and just interpret them as the two D sub-registers.
4593 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4594 Reg = getDRegFromQReg(Reg);
4595 EReg = MRI->getEncodingValue(Reg);
4596 Registers.emplace_back(EReg, Reg);
4597 ++Reg;
4598 }
4599 const MCRegisterClass *RC;
4600 if (Reg == ARM::RA_AUTH_CODE ||
4601 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4602 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4603 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4604 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4605 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4606 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4607 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4608 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4609 else
4610 return Error(RegLoc, "invalid register in register list");
4611
4612 // Store the register.
4613 EReg = MRI->getEncodingValue(Reg);
4614 Registers.emplace_back(EReg, Reg);
4615
4616 // This starts immediately after the first register token in the list,
4617 // so we can see either a comma or a minus (range separator) as a legal
4618 // next token.
4619 while (Parser.getTok().is(AsmToken::Comma) ||
4620 Parser.getTok().is(AsmToken::Minus)) {
4621 if (Parser.getTok().is(AsmToken::Minus)) {
4622 if (Reg == ARM::RA_AUTH_CODE)
4623 return Error(RegLoc, "pseudo-register not allowed");
4624 Parser.Lex(); // Eat the minus.
4625 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4626 int EndReg = tryParseRegister(AllowOutOfBoundReg);
4627 if (EndReg == -1)
4628 return Error(AfterMinusLoc, "register expected");
4629 if (EndReg == ARM::RA_AUTH_CODE)
4630 return Error(AfterMinusLoc, "pseudo-register not allowed");
4631 // Allow Q regs and just interpret them as the two D sub-registers.
4632 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4633 EndReg = getDRegFromQReg(EndReg) + 1;
4634 // If the register is the same as the start reg, there's nothing
4635 // more to do.
4636 if (Reg == EndReg)
4637 continue;
4638 // The register must be in the same register class as the first.
4639 if (!RC->contains(Reg))
4640 return Error(AfterMinusLoc, "invalid register in register list");
4641 // Ranges must go from low to high.
4642 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4643 return Error(AfterMinusLoc, "bad range in register list");
4644
4645 // Add all the registers in the range to the register list.
4646 while (Reg != EndReg) {
4648 EReg = MRI->getEncodingValue(Reg);
4649 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4650 Warning(AfterMinusLoc, StringRef("duplicated register (") +
4652 ") in register list");
4653 }
4654 }
4655 continue;
4656 }
4657 Parser.Lex(); // Eat the comma.
4658 RegLoc = Parser.getTok().getLoc();
4659 int OldReg = Reg;
4660 const AsmToken RegTok = Parser.getTok();
4661 Reg = tryParseRegister(AllowOutOfBoundReg);
4662 if (Reg == -1)
4663 return Error(RegLoc, "register expected");
4664 if (!AllowRAAC && Reg == ARM::RA_AUTH_CODE)
4665 return Error(RegLoc, "pseudo-register not allowed");
4666 // Allow Q regs and just interpret them as the two D sub-registers.
4667 bool isQReg = false;
4668 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4669 Reg = getDRegFromQReg(Reg);
4670 isQReg = true;
4671 }
4672 if (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg) &&
4673 RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4674 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4675 // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4676 // subset of GPRRegClassId except it contains APSR as well.
4677 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4678 }
4679 if (Reg == ARM::VPR &&
4680 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4681 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4682 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4683 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4684 EReg = MRI->getEncodingValue(Reg);
4685 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4686 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4687 ") in register list");
4688 }
4689 continue;
4690 }
4691 // The register must be in the same register class as the first.
4692 if ((Reg == ARM::RA_AUTH_CODE &&
4693 RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4694 (Reg != ARM::RA_AUTH_CODE && !RC->contains(Reg)))
4695 return Error(RegLoc, "invalid register in register list");
4696 // In most cases, the list must be monotonically increasing. An
4697 // exception is CLRM, which is order-independent anyway, so
4698 // there's no potential for confusion if you write clrm {r2,r1}
4699 // instead of clrm {r1,r2}.
4700 if (EnforceOrder &&
4701 MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4702 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4703 Warning(RegLoc, "register list not in ascending order");
4704 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4705 return Error(RegLoc, "register list not in ascending order");
4706 }
4707 // VFP register lists must also be contiguous.
4708 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4709 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4710 Reg != OldReg + 1)
4711 return Error(RegLoc, "non-contiguous register range");
4712 EReg = MRI->getEncodingValue(Reg);
4713 if (!insertNoDuplicates(Registers, EReg, Reg)) {
4714 Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4715 ") in register list");
4716 }
4717 if (isQReg) {
4718 EReg = MRI->getEncodingValue(++Reg);
4719 Registers.emplace_back(EReg, Reg);
4720 }
4721 }
4722
4723 if (Parser.getTok().isNot(AsmToken::RCurly))
4724 return Error(Parser.getTok().getLoc(), "'}' expected");
4725 SMLoc E = Parser.getTok().getEndLoc();
4726 Parser.Lex(); // Eat '}' token.
4727
4728 // Push the register list operand.
4729 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4730
4731 // The ARM system instruction variants for LDM/STM have a '^' token here.
4732 if (Parser.getTok().is(AsmToken::Caret)) {
4733 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4734 Parser.Lex(); // Eat '^' token.
4735 }
4736
4737 return false;
4738}
4739
4740// Helper function to parse the lane index for vector lists.
4741ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4742 unsigned &Index, SMLoc &EndLoc) {
4743 MCAsmParser &Parser = getParser();
4744 Index = 0; // Always return a defined index value.
4745 if (Parser.getTok().is(AsmToken::LBrac)) {
4746 Parser.Lex(); // Eat the '['.
4747 if (Parser.getTok().is(AsmToken::RBrac)) {
4748 // "Dn[]" is the 'all lanes' syntax.
4749 LaneKind = AllLanes;
4750 EndLoc = Parser.getTok().getEndLoc();
4751 Parser.Lex(); // Eat the ']'.
4752 return ParseStatus::Success;
4753 }
4754
4755 // There's an optional '#' token here. Normally there wouldn't be, but
4756 // inline assemble puts one in, and it's friendly to accept that.
4757 if (Parser.getTok().is(AsmToken::Hash))
4758 Parser.Lex(); // Eat '#' or '$'.
4759
4760 const MCExpr *LaneIndex;
4761 SMLoc Loc = Parser.getTok().getLoc();
4762 if (getParser().parseExpression(LaneIndex))
4763 return Error(Loc, "illegal expression");
4764 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4765 if (!CE)
4766 return Error(Loc, "lane index must be empty or an integer");
4767 if (Parser.getTok().isNot(AsmToken::RBrac))
4768 return Error(Parser.getTok().getLoc(), "']' expected");
4769 EndLoc = Parser.getTok().getEndLoc();
4770 Parser.Lex(); // Eat the ']'.
4771 int64_t Val = CE->getValue();
4772
4773 // FIXME: Make this range check context sensitive for .8, .16, .32.
4774 if (Val < 0 || Val > 7)
4775 return Error(Parser.getTok().getLoc(), "lane index out of range");
4776 Index = Val;
4777 LaneKind = IndexedLane;
4778 return ParseStatus::Success;
4779 }
4780 LaneKind = NoLanes;
4781 return ParseStatus::Success;
4782}
4783
4784// parse a vector register list
4785ParseStatus ARMAsmParser::parseVectorList(OperandVector &Operands) {
4786 MCAsmParser &Parser = getParser();
4787 VectorLaneTy LaneKind;
4788 unsigned LaneIndex;
4789 SMLoc S = Parser.getTok().getLoc();
4790 // As an extension (to match gas), support a plain D register or Q register
4791 // (without encosing curly braces) as a single or double entry list,
4792 // respectively.
4793 // If there is no lane supplied, just parse as a register and
4794 // use the custom matcher to convert to list if necessary
4795 if (!hasMVE() && Parser.getTok().is(AsmToken::Identifier)) {
4796 SMLoc E = Parser.getTok().getEndLoc();
4797 int Reg = tryParseRegister();
4798 if (Reg == -1)
4799 return ParseStatus::NoMatch;
4800 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4801 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4802 if (!Res.isSuccess())
4803 return Res;
4804 switch (LaneKind) {
4805 case NoLanes:
4806 Operands.push_back(ARMOperand::CreateReg(Reg, S, E));
4807 break;
4808 case AllLanes:
4809 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4810 S, E));
4811 break;
4812 case IndexedLane:
4813 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4814 LaneIndex,
4815 false, S, E));
4816 break;
4817 }
4818 return ParseStatus::Success;
4819 }
4820 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4821 Reg = getDRegFromQReg(Reg);
4822 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex, E);
4823 if (!Res.isSuccess())
4824 return Res;
4825 switch (LaneKind) {
4826 case NoLanes:
4827 Operands.push_back(ARMOperand::CreateReg(Reg, S, E));
4828 break;
4829 case AllLanes:
4830 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4831 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4832 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4833 S, E));
4834 break;
4835 case IndexedLane:
4836 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4837 LaneIndex,
4838 false, S, E));
4839 break;
4840 }
4841 return ParseStatus::Success;
4842 }
4843 Operands.push_back(ARMOperand::CreateReg(Reg, S, E));
4844 return ParseStatus::Success;
4845 }
4846
4847 if (Parser.getTok().isNot(AsmToken::LCurly))
4848 return ParseStatus::NoMatch;
4849
4850 Parser.Lex(); // Eat '{' token.
4851 SMLoc RegLoc = Parser.getTok().getLoc();
4852
4853 int Reg = tryParseRegister();
4854 if (Reg == -1)
4855 return Error(RegLoc, "register expected");
4856 unsigned Count = 1;
4857 int Spacing = 0;
4858 unsigned FirstReg = Reg;
4859
4860 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4861 return Error(Parser.getTok().getLoc(),
4862 "vector register in range Q0-Q7 expected");
4863 // The list is of D registers, but we also allow Q regs and just interpret
4864 // them as the two D sub-registers.
4865 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4866 FirstReg = Reg = getDRegFromQReg(Reg);
4867 Spacing = 1; // double-spacing requires explicit D registers, otherwise
4868 // it's ambiguous with four-register single spaced.
4869 ++Reg;
4870 ++Count;
4871 }
4872
4873 SMLoc E;
4874 if (!parseVectorLane(LaneKind, LaneIndex, E).isSuccess())
4875 return ParseStatus::Failure;
4876
4877 while (Parser.getTok().is(AsmToken::Comma) ||
4878 Parser.getTok().is(AsmToken::Minus)) {
4879 if (Parser.getTok().is(AsmToken::Minus)) {
4880 if (!Spacing)
4881 Spacing = 1; // Register range implies a single spaced list.
4882 else if (Spacing == 2)
4883 return Error(Parser.getTok().getLoc(),
4884 "sequential registers in double spaced list");
4885 Parser.Lex(); // Eat the minus.
4886 SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4887 int EndReg = tryParseRegister();
4888 if (EndReg == -1)
4889 return Error(AfterMinusLoc, "register expected");
4890 // Allow Q regs and just interpret them as the two D sub-registers.
4891 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4892 EndReg = getDRegFromQReg(EndReg) + 1;
4893 // If the register is the same as the start reg, there's nothing
4894 // more to do.
4895 if (Reg == EndReg)
4896 continue;
4897 // The register must be in the same register class as the first.
4898 if ((hasMVE() &&
4899 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(EndReg)) ||
4900 (!hasMVE() &&
4901 !ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)))
4902 return Error(AfterMinusLoc, "invalid register in register list");
4903 // Ranges must go from low to high.
4904 if (Reg > EndReg)
4905 return Error(AfterMinusLoc, "bad range in register list");
4906 // Parse the lane specifier if present.
4907 VectorLaneTy NextLaneKind;
4908 unsigned NextLaneIndex;
4909 if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4910 return ParseStatus::Failure;
4911 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4912 return Error(AfterMinusLoc, "mismatched lane index in register list");
4913
4914 // Add all the registers in the range to the register list.
4915 Count += EndReg - Reg;
4916 Reg = EndReg;
4917 continue;
4918 }
4919 Parser.Lex(); // Eat the comma.
4920 RegLoc = Parser.getTok().getLoc();
4921 int OldReg = Reg;
4922 Reg = tryParseRegister();
4923 if (Reg == -1)
4924 return Error(RegLoc, "register expected");
4925
4926 if (hasMVE()) {
4927 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(Reg))
4928 return Error(RegLoc, "vector register in range Q0-Q7 expected");
4929 Spacing = 1;
4930 }
4931 // vector register lists must be contiguous.
4932 // It's OK to use the enumeration values directly here rather, as the
4933 // VFP register classes have the enum sorted properly.
4934 //
4935 // The list is of D registers, but we also allow Q regs and just interpret
4936 // them as the two D sub-registers.
4937 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4938 if (!Spacing)
4939 Spacing = 1; // Register range implies a single spaced list.
4940 else if (Spacing == 2)
4941 return Error(
4942 RegLoc,
4943 "invalid register in double-spaced list (must be 'D' register')");
4944 Reg = getDRegFromQReg(Reg);
4945 if (Reg != OldReg + 1)
4946 return Error(RegLoc, "non-contiguous register range");
4947 ++Reg;
4948 Count += 2;
4949 // Parse the lane specifier if present.
4950 VectorLaneTy NextLaneKind;
4951 unsigned NextLaneIndex;
4952 SMLoc LaneLoc = Parser.getTok().getLoc();
4953 if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4954 return ParseStatus::Failure;
4955 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4956 return Error(LaneLoc, "mismatched lane index in register list");
4957 continue;
4958 }
4959 // Normal D register.
4960 // Figure out the register spacing (single or double) of the list if
4961 // we don't know it already.
4962 if (!Spacing)
4963 Spacing = 1 + (Reg == OldReg + 2);
4964
4965 // Just check that it's contiguous and keep going.
4966 if (Reg != OldReg + Spacing)
4967 return Error(RegLoc, "non-contiguous register range");
4968 ++Count;
4969 // Parse the lane specifier if present.
4970 VectorLaneTy NextLaneKind;
4971 unsigned NextLaneIndex;
4972 SMLoc EndLoc = Parser.getTok().getLoc();
4973 if (!parseVectorLane(NextLaneKind, NextLaneIndex, E).isSuccess())
4974 return ParseStatus::Failure;
4975 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4976 return Error(EndLoc, "mismatched lane index in register list");
4977 }
4978
4979 if (Parser.getTok().isNot(AsmToken::RCurly))
4980 return Error(Parser.getTok().getLoc(), "'}' expected");
4981 E = Parser.getTok().getEndLoc();
4982 Parser.Lex(); // Eat '}' token.
4983
4984 switch (LaneKind) {
4985 case NoLanes:
4986 case AllLanes: {
4987 // Two-register operands have been converted to the
4988 // composite register classes.
4989 if (Count == 2 && !hasMVE()) {
4990 const MCRegisterClass *RC = (Spacing == 1) ?
4991 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4992 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4993 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4994 }
4995 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4996 ARMOperand::CreateVectorListAllLanes);
4997 Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S, E));
4998 break;
4999 }
5000 case IndexedLane:
5001 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
5002 LaneIndex,
5003 (Spacing == 2),
5004 S, E));
5005 break;
5006 }
5007 return ParseStatus::Success;
5008}
5009
5010/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
5011ParseStatus ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
5012 MCAsmParser &Parser = getParser();
5013 SMLoc S = Parser.getTok().getLoc();
5014 const AsmToken &Tok = Parser.getTok();
5015 unsigned Opt;
5016
5017 if (Tok.is(AsmToken::Identifier)) {
5018 StringRef OptStr = Tok.getString();
5019
5020 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
5021 .Case("sy", ARM_MB::SY)
5022 .Case("st", ARM_MB::ST)
5023 .Case("ld", ARM_MB::LD)
5024 .Case("sh", ARM_MB::ISH)
5025 .Case("ish", ARM_MB::ISH)
5026 .Case("shst", ARM_MB::ISHST)
5027 .Case("ishst", ARM_MB::ISHST)
5028 .Case("ishld", ARM_MB::ISHLD)
5029 .Case("nsh", ARM_MB::NSH)
5030 .Case("un", ARM_MB::NSH)
5031 .Case("nshst", ARM_MB::NSHST)
5032 .Case("nshld", ARM_MB::NSHLD)
5033 .Case("unst", ARM_MB::NSHST)
5034 .Case("osh", ARM_MB::OSH)
5035 .Case("oshst", ARM_MB::OSHST)
5036 .Case("oshld", ARM_MB::OSHLD)
5037 .Default(~0U);
5038
5039 // ishld, oshld, nshld and ld are only available from ARMv8.
5040 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
5041 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
5042 Opt = ~0U;
5043
5044 if (Opt == ~0U)
5045 return ParseStatus::NoMatch;
5046
5047 Parser.Lex(); // Eat identifier token.
5048 } else if (Tok.is(AsmToken::Hash) ||
5049 Tok.is(AsmToken::Dollar) ||
5050 Tok.is(AsmToken::Integer)) {
5051 if (Parser.getTok().isNot(AsmToken::Integer))
5052 Parser.Lex(); // Eat '#' or '$'.
5053 SMLoc Loc = Parser.getTok().getLoc();
5054
5055 const MCExpr *MemBarrierID;
5056 if (getParser().parseExpression(MemBarrierID))
5057 return Error(Loc, "illegal expression");
5058
5059 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
5060 if (!CE)
5061 return Error(Loc, "constant expression expected");
5062
5063 int Val = CE->getValue();
5064 if (Val & ~0xf)
5065 return Error(Loc, "immediate value out of range");
5066
5067 Opt = ARM_MB::RESERVED_0 + Val;
5068 } else
5069 return ParseStatus::Failure;
5070
5071 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
5072 return ParseStatus::Success;
5073}
5074
5076ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
5077 MCAsmParser &Parser = getParser();
5078 SMLoc S = Parser.getTok().getLoc();
5079 const AsmToken &Tok = Parser.getTok();
5080
5081 if (Tok.isNot(AsmToken::Identifier))
5082 return ParseStatus::NoMatch;
5083
5084 if (!Tok.getString().equals_insensitive("csync"))
5085 return ParseStatus::NoMatch;
5086
5087 Parser.Lex(); // Eat identifier token.
5088
5089 Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
5090 return ParseStatus::Success;
5091}
5092
5093/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
5095ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
5096 MCAsmParser &Parser = getParser();
5097 SMLoc S = Parser.getTok().getLoc();
5098 const AsmToken &Tok = Parser.getTok();
5099 unsigned Opt;
5100
5101 if (Tok.is(AsmToken::Identifier)) {
5102 StringRef OptStr = Tok.getString();
5103
5104 if (OptStr.equals_insensitive("sy"))
5105 Opt = ARM_ISB::SY;
5106 else
5107 return ParseStatus::NoMatch;
5108
5109 Parser.Lex(); // Eat identifier token.
5110 } else if (Tok.is(AsmToken::Hash) ||
5111 Tok.is(AsmToken::Dollar) ||
5112 Tok.is(AsmToken::Integer)) {
5113 if (Parser.getTok().isNot(AsmToken::Integer))
5114 Parser.Lex(); // Eat '#' or '$'.
5115 SMLoc Loc = Parser.getTok().getLoc();
5116
5117 const MCExpr *ISBarrierID;
5118 if (getParser().parseExpression(ISBarrierID))
5119 return Error(Loc, "illegal expression");
5120
5121 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
5122 if (!CE)
5123 return Error(Loc, "constant expression expected");
5124
5125 int Val = CE->getValue();
5126 if (Val & ~0xf)
5127 return Error(Loc, "immediate value out of range");
5128
5129 Opt = ARM_ISB::RESERVED_0 + Val;
5130 } else
5131 return ParseStatus::Failure;
5132
5133 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5134 (ARM_ISB::InstSyncBOpt)Opt, S));
5135 return ParseStatus::Success;
5136}
5137
5138/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
5139ParseStatus ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
5140 MCAsmParser &Parser = getParser();
5141 SMLoc S = Parser.getTok().getLoc();
5142 const AsmToken &Tok = Parser.getTok();
5143 if (!Tok.is(AsmToken::Identifier))
5144 return ParseStatus::NoMatch;
5145 StringRef IFlagsStr = Tok.getString();
5146
5147 // An iflags string of "none" is interpreted to mean that none of the AIF
5148 // bits are set. Not a terribly useful instruction, but a valid encoding.
5149 unsigned IFlags = 0;
5150 if (IFlagsStr != "none") {
5151 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
5152 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
5153 .Case("a", ARM_PROC::A)
5154 .Case("i", ARM_PROC::I)
5155 .Case("f", ARM_PROC::F)
5156 .Default(~0U);
5157
5158 // If some specific iflag is already set, it means that some letter is
5159 // present more than once, this is not acceptable.
5160 if (Flag == ~0U || (IFlags & Flag))
5161 return ParseStatus::NoMatch;
5162
5163 IFlags |= Flag;
5164 }
5165 }
5166
5167 Parser.Lex(); // Eat identifier token.
5168 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
5169 return ParseStatus::Success;
5170}
5171
5172/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
5173ParseStatus ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
5174 // Don't parse two MSR registers in a row
5175 if (static_cast<ARMOperand &>(*Operands.back()).isMSRMask() ||
5176 static_cast<ARMOperand &>(*Operands.back()).isBankedReg())
5177 return ParseStatus::NoMatch;
5178 MCAsmParser &Parser = getParser();
5179 SMLoc S = Parser.getTok().getLoc();
5180 const AsmToken &Tok = Parser.getTok();
5181
5182 if (Tok.is(AsmToken::Integer)) {
5183 int64_t Val = Tok.getIntVal();
5184 if (Val > 255 || Val < 0) {
5185 return ParseStatus::NoMatch;
5186 }
5187 unsigned SYSmvalue = Val & 0xFF;
5188 Parser.Lex();
5189 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5190 return ParseStatus::Success;
5191 }
5192
5193 if (!Tok.is(AsmToken::Identifier))
5194 return ParseStatus::NoMatch;
5195 StringRef Mask = Tok.getString();
5196
5197 if (isMClass()) {
5198 auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
5199 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5200 return ParseStatus::NoMatch;
5201
5202 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5203
5204 Parser.Lex(); // Eat identifier token.
5205 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5206 return ParseStatus::Success;
5207 }
5208
5209 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
5210 size_t Start = 0, Next = Mask.find('_');
5211 StringRef Flags = "";
5212 std::string SpecReg = Mask.slice(Start, Next).lower();
5213 if (Next != StringRef::npos)
5214 Flags = Mask.slice(Next+1, Mask.size());
5215
5216 // FlagsVal contains the complete mask:
5217 // 3-0: Mask
5218 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5219 unsigned FlagsVal = 0;
5220
5221 if (SpecReg == "apsr") {
5222 FlagsVal = StringSwitch<unsigned>(Flags)
5223 .Case("nzcvq", 0x8) // same as CPSR_f
5224 .Case("g", 0x4) // same as CPSR_s
5225 .Case("nzcvqg", 0xc) // same as CPSR_fs
5226 .Default(~0U);
5227
5228 if (FlagsVal == ~0U) {
5229 if (!Flags.empty())
5230 return ParseStatus::NoMatch;
5231 else
5232 FlagsVal = 8; // No flag
5233 }
5234 } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
5235 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
5236 if (Flags == "all" || Flags == "")
5237 Flags = "fc";
5238 for (int i = 0, e = Flags.size(); i != e; ++i) {
5239 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
5240 .Case("c", 1)
5241 .Case("x", 2)
5242 .Case("s", 4)
5243 .Case("f", 8)
5244 .Default(~0U);
5245
5246 // If some specific flag is already set, it means that some letter is
5247 // present more than once, this is not acceptable.
5248 if (Flag == ~0U || (FlagsVal & Flag))
5249 return ParseStatus::NoMatch;
5250 FlagsVal |= Flag;
5251 }
5252 } else // No match for special register.
5253 return ParseStatus::NoMatch;
5254
5255 // Special register without flags is NOT equivalent to "fc" flags.
5256 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
5257 // two lines would enable gas compatibility at the expense of breaking
5258 // round-tripping.
5259 //
5260 // if (!FlagsVal)
5261 // FlagsVal = 0x9;
5262
5263 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
5264 if (SpecReg == "spsr")
5265 FlagsVal |= 16;
5266
5267 Parser.Lex(); // Eat identifier token.
5268 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
5269 return ParseStatus::Success;
5270}
5271
5272/// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
5273/// use in the MRS/MSR instructions added to support virtualization.
5274ParseStatus ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
5275 // Don't parse two Banked registers in a row
5276 if (static_cast<ARMOperand &>(*Operands.back()).isBankedReg() ||
5277 static_cast<ARMOperand &>(*Operands.back()).isMSRMask())
5278 return ParseStatus::NoMatch;
5279 MCAsmParser &Parser = getParser();
5280 SMLoc S = Parser.getTok().getLoc();
5281 const AsmToken &Tok = Parser.getTok();
5282 if (!Tok.is(AsmToken::Identifier))
5283 return ParseStatus::NoMatch;
5284 StringRef RegName = Tok.getString();
5285
5286 auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
5287 if (!TheReg)
5288 return ParseStatus::NoMatch;
5289 unsigned Encoding = TheReg->Encoding;
5290
5291 Parser.Lex(); // Eat identifier token.
5292 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
5293 return ParseStatus::Success;
5294}
5295
5296// FIXME: Unify the different methods for handling shift operators
5297// and use TableGen matching mechanisms to do the validation rather than
5298// separate parsing paths.
5299ParseStatus ARMAsmParser::parsePKHImm(OperandVector &Operands,
5300 ARM_AM::ShiftOpc Op, int Low, int High) {
5301 MCAsmParser &Parser = getParser();
5302 auto ShiftCodeOpt = tryParseShiftToken();
5303
5304 if (!ShiftCodeOpt.has_value())
5305 return ParseStatus::NoMatch;
5306 auto ShiftCode = ShiftCodeOpt.value();
5307
5308 // The wrong shift code has been provided. Can error here as has matched the
5309 // correct operand in this case.
5310 if (ShiftCode != Op)
5311 return Error(Parser.getTok().getLoc(),
5312 ARM_AM::getShiftOpcStr(Op) + " operand expected.");
5313
5314 Parser.Lex(); // Eat shift type token.
5315
5316 // There must be a '#' and a shift amount.
5317 if (Parser.getTok().isNot(AsmToken::Hash) &&
5318 Parser.getTok().isNot(AsmToken::Dollar))
5319 return ParseStatus::NoMatch;
5320 Parser.Lex(); // Eat hash token.
5321
5322 const MCExpr *ShiftAmount;
5323 SMLoc Loc = Parser.getTok().getLoc();
5324 SMLoc EndLoc;
5325 if (getParser().parseExpression(ShiftAmount, EndLoc))
5326 return Error(Loc, "illegal expression");
5327 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5328 if (!CE)
5329 return Error(Loc, "constant expression expected");
5330 int Val = CE->getValue();
5331 if (Val < Low || Val > High)
5332 return Error(Loc, "immediate value out of range");
5333
5334 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
5335
5336 return ParseStatus::Success;
5337}
5338
5339ParseStatus ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
5340 MCAsmParser &Parser = getParser();
5341 const AsmToken &Tok = Parser.getTok();
5342 SMLoc S = Tok.getLoc();
5343 if (Tok.isNot(AsmToken::Identifier))
5344 return Error(S, "'be' or 'le' operand expected");
5345 int Val = StringSwitch<int>(Tok.getString().lower())
5346 .Case("be", 1)
5347 .Case("le", 0)
5348 .Default(-1);
5349 Parser.Lex(); // Eat the token.
5350
5351 if (Val == -1)
5352 return Error(S, "'be' or 'le' operand expected");
5353 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
5354 getContext()),
5355 S, Tok.getEndLoc()));
5356 return ParseStatus::Success;
5357}
5358
5359/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
5360/// instructions. Legal values are:
5361/// lsl #n 'n' in [0,31]
5362/// asr #n 'n' in [1,32]
5363/// n == 32 encoded as n == 0.
5364ParseStatus ARMAsmParser::parseShifterImm(OperandVector &Operands) {
5365 MCAsmParser &Parser = getParser();
5366 const AsmToken &Tok = Parser.getTok();
5367 SMLoc S = Tok.getLoc();
5368 if (Tok.isNot(AsmToken::Identifier))
5369 return ParseStatus::NoMatch;
5370 StringRef ShiftName = Tok.getString();
5371 bool isASR;
5372 if (ShiftName == "lsl" || ShiftName == "LSL")
5373 isASR = false;
5374 else if (ShiftName == "asr" || ShiftName == "ASR")
5375 isASR = true;
5376 else
5377 return ParseStatus::NoMatch;
5378 Parser.Lex(); // Eat the operator.
5379
5380 // A '#' and a shift amount.
5381 if (Parser.getTok().isNot(AsmToken::Hash) &&
5382 Parser.getTok().isNot(AsmToken::Dollar))
5383 return Error(Parser.getTok().getLoc(), "'#' expected");
5384 Parser.Lex(); // Eat hash token.
5385 SMLoc ExLoc = Parser.getTok().getLoc();
5386
5387 const MCExpr *ShiftAmount;
5388 SMLoc EndLoc;
5389 if (getParser().parseExpression(ShiftAmount, EndLoc))
5390 return Error(ExLoc, "malformed shift expression");
5391 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5392 if (!CE)
5393 return Error(ExLoc, "shift amount must be an immediate");
5394
5395 int64_t Val = CE->getValue();
5396 if (isASR) {
5397 // Shift amount must be in [1,32]
5398 if (Val < 1 || Val > 32)
5399 return Error(ExLoc, "'asr' shift amount must be in range [1,32]");
5400 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
5401 if (isThumb() && Val == 32)
5402 return Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
5403 if (Val == 32) Val = 0;
5404 } else {
5405 // Shift amount must be in [1,32]
5406 if (Val < 0 || Val > 31)
5407 return Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
5408 }
5409
5410 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
5411
5412 return ParseStatus::Success;
5413}
5414
5415/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
5416/// of instructions. Legal values are:
5417/// ror #n 'n' in {0, 8, 16, 24}
5418ParseStatus ARMAsmParser::parseRotImm(OperandVector &Operands) {
5419 MCAsmParser &Parser = getParser();
5420 const AsmToken &Tok = Parser.getTok();
5421 SMLoc S = Tok.getLoc();
5422 if (Tok.isNot(AsmToken::Identifier))
5423 return ParseStatus::NoMatch;
5424 StringRef ShiftName = Tok.getString();
5425 if (ShiftName != "ror" && ShiftName != "ROR")
5426 return ParseStatus::NoMatch;
5427 Parser.Lex(); // Eat the operator.
5428
5429 // A '#' and a rotate amount.
5430 if (Parser.getTok().isNot(AsmToken::Hash) &&
5431 Parser.getTok().isNot(AsmToken::Dollar))
5432 return Error(Parser.getTok().getLoc(), "'#' expected");
5433 Parser.Lex(); // Eat hash token.
5434 SMLoc ExLoc = Parser.getTok().getLoc();
5435
5436 const MCExpr *ShiftAmount;
5437 SMLoc EndLoc;
5438 if (getParser().parseExpression(ShiftAmount, EndLoc))
5439 return Error(ExLoc, "malformed rotate expression");
5440 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
5441 if (!CE)
5442 return Error(ExLoc, "rotate amount must be an immediate");
5443
5444 int64_t Val = CE->getValue();
5445 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
5446 // normally, zero is represented in asm by omitting the rotate operand
5447 // entirely.
5448 if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5449 return Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
5450
5451 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
5452
5453 return ParseStatus::Success;
5454}
5455
5456ParseStatus ARMAsmParser::parseModImm(OperandVector &Operands) {
5457 MCAsmParser &Parser = getParser();
5458 MCAsmLexer &Lexer = getLexer();
5459 int64_t Imm1, Imm2;
5460
5461 SMLoc S = Parser.getTok().getLoc();
5462
5463 // 1) A mod_imm operand can appear in the place of a register name:
5464 // add r0, #mod_imm
5465 // add r0, r0, #mod_imm
5466 // to correctly handle the latter, we bail out as soon as we see an
5467 // identifier.
5468 //
5469 // 2) Similarly, we do not want to parse into complex operands:
5470 // mov r0, #mod_imm
5471 // mov r0, :lower16:(_foo)
5472 if (Parser.getTok().is(AsmToken::Identifier) ||
5473 Parser.getTok().is(AsmToken::Colon))
5474 return ParseStatus::NoMatch;
5475
5476 // Hash (dollar) is optional as per the ARMARM
5477 if (Parser.getTok().is(AsmToken::Hash) ||
5478 Parser.getTok().is(AsmToken::Dollar)) {
5479 // Avoid parsing into complex operands (#:)
5480 if (Lexer.peekTok().is(AsmToken::Colon))
5481 return ParseStatus::NoMatch;
5482
5483 // Eat the hash (dollar)
5484 Parser.Lex();
5485 }
5486
5487 SMLoc Sx1, Ex1;
5488 Sx1 = Parser.getTok().getLoc();
5489 const MCExpr *Imm1Exp;
5490 if (getParser().parseExpression(Imm1Exp, Ex1))
5491 return Error(Sx1, "malformed expression");
5492
5493 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
5494
5495 if (CE) {
5496 // Immediate must fit within 32-bits
5497 Imm1 = CE->getValue();
5498 int Enc = ARM_AM::getSOImmVal(Imm1);
5499 if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
5500 // We have a match!
5501 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
5502 (Enc & 0xF00) >> 7,
5503 Sx1, Ex1));
5504 return ParseStatus::Success;
5505 }
5506
5507 // We have parsed an immediate which is not for us, fallback to a plain
5508 // immediate. This can happen for instruction aliases. For an example,
5509 // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5510 // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5511 // instruction with a mod_imm operand. The alias is defined such that the
5512 // parser method is shared, that's why we have to do this here.
5513 if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5514 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5515 return ParseStatus::Success;
5516 }
5517 } else {
5518 // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5519 // MCFixup). Fallback to a plain immediate.
5520 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5521 return ParseStatus::Success;
5522 }
5523
5524 // From this point onward, we expect the input to be a (#bits, #rot) pair
5525 if (Parser.getTok().isNot(AsmToken::Comma))
5526 return Error(Sx1,
5527 "expected modified immediate operand: #[0, 255], #even[0-30]");
5528
5529 if (Imm1 & ~0xFF)
5530 return Error(Sx1, "immediate operand must a number in the range [0, 255]");
5531
5532 // Eat the comma
5533 Parser.Lex();
5534
5535 // Repeat for #rot
5536 SMLoc Sx2, Ex2;
5537 Sx2 = Parser.getTok().getLoc();
5538
5539 // Eat the optional hash (dollar)
5540 if (Parser.getTok().is(AsmToken::Hash) ||
5541 Parser.getTok().is(AsmToken::Dollar))
5542 Parser.Lex();
5543
5544 const MCExpr *Imm2Exp;
5545 if (getParser().parseExpression(Imm2Exp, Ex2))
5546 return Error(Sx2, "malformed expression");
5547
5548 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5549
5550 if (CE) {
5551 Imm2 = CE->getValue();
5552 if (!(Imm2 & ~0x1E)) {
5553 // We have a match!
5554 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5555 return ParseStatus::Success;
5556 }
5557 return Error(Sx2,
5558 "immediate operand must an even number in the range [0, 30]");
5559 } else {
5560 return Error(Sx2, "constant expression expected");
5561 }
5562}
5563
5564ParseStatus ARMAsmParser::parseBitfield(OperandVector &Operands) {
5565 MCAsmParser &Parser = getParser();
5566 SMLoc S = Parser.getTok().getLoc();
5567 // The bitfield descriptor is really two operands, the LSB and the width.
5568 if (Parser.getTok().isNot(AsmToken::Hash) &&
5569 Parser.getTok().isNot(AsmToken::Dollar))
5570 return ParseStatus::NoMatch;
5571 Parser.Lex(); // Eat hash token.
5572
5573 const MCExpr *LSBExpr;
5574 SMLoc E = Parser.getTok().getLoc();
5575 if (getParser().parseExpression(LSBExpr))
5576 return Error(E, "malformed immediate expression");
5577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5578 if (!CE)
5579 return Error(E, "'lsb' operand must be an immediate");
5580
5581 int64_t LSB = CE->getValue();
5582 // The LSB must be in the range [0,31]
5583 if (LSB < 0 || LSB > 31)
5584 return Error(E, "'lsb' operand must be in the range [0,31]");
5585 E = Parser.getTok().getLoc();
5586
5587 // Expect another immediate operand.
5588 if (Parser.getTok().isNot(AsmToken::Comma))
5589 return Error(Parser.getTok().getLoc(), "too few operands");
5590 Parser.Lex(); // Eat hash token.
5591 if (Parser.getTok().isNot(AsmToken::Hash) &&
5592 Parser.getTok().isNot(AsmToken::Dollar))
5593 return Error(Parser.getTok().getLoc(), "'#' expected");
5594 Parser.Lex(); // Eat hash token.
5595
5596 const MCExpr *WidthExpr;
5597 SMLoc EndLoc;
5598 if (getParser().parseExpression(WidthExpr, EndLoc))
5599 return Error(E, "malformed immediate expression");
5600 CE = dyn_cast<MCConstantExpr>(WidthExpr);
5601 if (!CE)
5602 return Error(E, "'width' operand must be an immediate");
5603
5604 int64_t Width = CE->getValue();
5605 // The LSB must be in the range [1,32-lsb]
5606 if (Width < 1 || Width > 32 - LSB)
5607 return Error(E, "'width' operand must be in the range [1,32-lsb]");
5608
5609 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5610
5611 return ParseStatus::Success;
5612}
5613
5614ParseStatus ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5615 // Check for a post-index addressing register operand. Specifically:
5616 // postidx_reg := '+' register {, shift}
5617 // | '-' register {, shift}
5618 // | register {, shift}
5619
5620 // This method must return ParseStatus::NoMatch without consuming any tokens
5621 // in the case where there is no match, as other alternatives take other
5622 // parse methods.
5623 MCAsmParser &Parser = getParser();
5624 AsmToken Tok = Parser.getTok();
5625 SMLoc S = Tok.getLoc();
5626 bool haveEaten = false;
5627 bool isAdd = true;
5628 if (Tok.is(AsmToken::Plus)) {
5629 Parser.Lex(); // Eat the '+' token.
5630 haveEaten = true;
5631 } else if (Tok.is(AsmToken::Minus)) {
5632 Parser.Lex(); // Eat the '-' token.
5633 isAdd = false;
5634 haveEaten = true;
5635 }
5636
5637 SMLoc E = Parser.getTok().getEndLoc();
5638 int Reg = tryParseRegister();
5639 if (Reg == -1) {
5640 if (!haveEaten)
5641 return ParseStatus::NoMatch;
5642 return Error(Parser.getTok().getLoc(), "register expected");
5643 }
5644
5646 unsigned ShiftImm = 0;
5647 if (Parser.getTok().is(AsmToken::Comma)) {
5648 Parser.Lex(); // Eat the ','.
5649 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5650 return ParseStatus::Failure;
5651
5652 // FIXME: Only approximates end...may include intervening whitespace.
5653 E = Parser.getTok().getLoc();
5654 }
5655
5656 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
5657 ShiftImm, S, E));
5658
5659 return ParseStatus::Success;
5660}
5661
5662ParseStatus ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5663 // Check for a post-index addressing register operand. Specifically:
5664 // am3offset := '+' register
5665 // | '-' register
5666 // | register
5667 // | # imm
5668 // | # + imm
5669 // | # - imm
5670
5671 // This method must return ParseStatus::NoMatch without consuming any tokens
5672 // in the case where there is no match, as other alternatives take other
5673 // parse methods.
5674 MCAsmParser &Parser = getParser();
5675 AsmToken Tok = Parser.getTok();
5676 SMLoc S = Tok.getLoc();
5677
5678 // Do immediates first, as we always parse those if we have a '#'.
5679 if (Parser.getTok().is(AsmToken::Hash) ||
5680 Parser.getTok().is(AsmToken::Dollar)) {
5681 Parser.Lex(); // Eat '#' or '$'.
5682 // Explicitly look for a '-', as we need to encode negative zero
5683 // differently.
5684 bool isNegative = Parser.getTok().is(AsmToken::Minus);
5685 const MCExpr *Offset;
5686 SMLoc E;
5687 if (getParser().parseExpression(Offset, E))
5688 return ParseStatus::Failure;
5689 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5690 if (!CE)
5691 return Error(S, "constant expression expected");
5692 // Negative zero is encoded as the flag value
5693 // std::numeric_limits<int32_t>::min().
5694 int32_t Val = CE->getValue();
5695 if (isNegative && Val == 0)
5696 Val = std::numeric_limits<int32_t>::min();
5697
5698 Operands.push_back(
5699 ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
5700
5701 return ParseStatus::Success;
5702 }
5703
5704 bool haveEaten = false;
5705 bool isAdd = true;
5706 if (Tok.is(AsmToken::Plus)) {
5707 Parser.Lex(); // Eat the '+' token.
5708 haveEaten = true;
5709 } else if (Tok.is(AsmToken::Minus)) {
5710 Parser.Lex(); // Eat the '-' token.
5711 isAdd = false;
5712 haveEaten = true;
5713 }
5714
5715 Tok = Parser.getTok();
5716 int Reg = tryParseRegister();
5717 if (Reg == -1) {
5718 if (!haveEaten)
5719 return ParseStatus::NoMatch;
5720 return Error(Tok.getLoc(), "register expected");
5721 }
5722
5723 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
5724 0, S, Tok.getEndLoc()));
5725
5726 return ParseStatus::Success;
5727}
5728
5729// Finds the index of the first CondCode operator, if there is none returns 0
5731 unsigned MnemonicOpsEndInd) {
5732 for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
5733 auto Op = static_cast<ARMOperand &>(*Operands[I]);
5734 if (Op.isCondCode())
5735 return I;
5736 }
5737 return 0;
5738}
5739
5741 unsigned MnemonicOpsEndInd) {
5742 for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
5743 auto Op = static_cast<ARMOperand &>(*Operands[I]);
5744 if (Op.isCCOut())
5745 return I;
5746 }
5747 return 0;
5748}
5749
5750/// Convert parsed operands to MCInst. Needed here because this instruction
5751/// only has two register operands, but multiplication is commutative so
5752/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5753void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5754 const OperandVector &Operands) {
5755 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5756 unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5757 unsigned CondOutI = findCCOutInd(Operands, MnemonicOpsEndInd);
5758
5759 // 2 operand form
5760 unsigned RegRd = MnemonicOpsEndInd;
5761 unsigned RegRn = MnemonicOpsEndInd + 1;
5762 unsigned RegRm = MnemonicOpsEndInd;
5763
5764 if (Operands.size() == MnemonicOpsEndInd + 3) {
5765 // If we have a three-operand form, make sure to set Rn to be the operand
5766 // that isn't the same as Rd.
5767 if (((ARMOperand &)*Operands[RegRd]).getReg() ==
5768 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1]).getReg()) {
5769 RegRn = MnemonicOpsEndInd + 2;
5770 RegRm = MnemonicOpsEndInd + 1;
5771 } else {
5772 RegRn = MnemonicOpsEndInd + 1;
5773 RegRm = MnemonicOpsEndInd + 2;
5774 }
5775 }
5776
5777 // Rd
5778 ((ARMOperand &)*Operands[RegRd]).addRegOperands(Inst, 1);
5779 // CCOut
5780 if (CondOutI != 0) {
5781 ((ARMOperand &)*Operands[CondOutI]).addCCOutOperands(Inst, 1);
5782 } else {
5783 ARMOperand Op = *ARMOperand::CreateCCOut(0, Operands[0]->getEndLoc());
5784 Op.addCCOutOperands(Inst, 1);
5785 }
5786 // Rn
5787 ((ARMOperand &)*Operands[RegRn]).addRegOperands(Inst, 1);
5788 // Rm
5789 ((ARMOperand &)*Operands[RegRm]).addRegOperands(Inst, 1);
5790
5791 // Cond code
5792 if (CondI != 0) {
5793 ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
5794 } else {
5795 ARMOperand Op =
5796 *ARMOperand::CreateCondCode(llvm::ARMCC::AL, Operands[0]->getEndLoc());
5797 Op.addCondCodeOperands(Inst, 2);
5798 }
5799}
5800
5801void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5802 const OperandVector &Operands) {
5803 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5804 unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5805 unsigned Cond =
5806 (CondI == 0 ? ARMCC::AL
5807 : static_cast<ARMOperand &>(*Operands[CondI]).getCondCode());
5808
5809 // first decide whether or not the branch should be conditional
5810 // by looking at it's location relative to an IT block
5811 if(inITBlock()) {
5812 // inside an IT block we cannot have any conditional branches. any
5813 // such instructions needs to be converted to unconditional form
5814 switch(Inst.getOpcode()) {
5815 case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5816 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5817 }
5818 } else {
5819 switch(Inst.getOpcode()) {
5820 case ARM::tB:
5821 case ARM::tBcc:
5822 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5823 break;
5824 case ARM::t2B:
5825 case ARM::t2Bcc:
5826 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5827 break;
5828 }
5829 }
5830
5831 // now decide on encoding size based on branch target range
5832 switch(Inst.getOpcode()) {
5833 // classify tB as either t2B or t1B based on range of immediate operand
5834 case ARM::tB: {
5835 ARMOperand &op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
5836 if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5837 Inst.setOpcode(ARM::t2B);
5838 break;
5839 }
5840 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5841 case ARM::tBcc: {
5842 ARMOperand &op = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
5843 if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5844 Inst.setOpcode(ARM::t2Bcc);
5845 break;
5846 }
5847 }
5848 ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addImmOperands(Inst, 1);
5849 if (CondI != 0) {
5850 ((ARMOperand &)*Operands[CondI]).addCondCodeOperands(Inst, 2);
5851 } else {
5852 ARMOperand Op =
5853 *ARMOperand::CreateCondCode(llvm::ARMCC::AL, Operands[0]->getEndLoc());
5854 Op.addCondCodeOperands(Inst, 2);
5855 }
5856}
5857
5858void ARMAsmParser::cvtMVEVMOVQtoDReg(
5859 MCInst &Inst, const OperandVector &Operands) {
5860
5861 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
5862 unsigned CondI = findCondCodeInd(Operands, MnemonicOpsEndInd);
5863
5864 // mnemonic, condition code, Rt, Rt2, Qd, idx, Qd again, idx2
5865 assert(Operands.size() == MnemonicOpsEndInd + 6);
5866
5867 ((ARMOperand &)*Operands[MnemonicOpsEndInd]).addRegOperands(Inst, 1); // Rt
5868 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 1])
5869 .addRegOperands(Inst, 1); // Rt2
5870 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 2])
5871 .addRegOperands(Inst, 1); // Qd
5872 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 3])
5873 .addMVEPairVectorIndexOperands(Inst, 1); // idx
5874 // skip second copy of Qd in Operands[6]
5875 ((ARMOperand &)*Operands[MnemonicOpsEndInd + 5])
5876 .addMVEPairVectorIndexOperands(Inst, 1); // idx2
5877 if (CondI != 0) {
5878 ((ARMOperand &)*Operands[CondI])
5879 .addCondCodeOperands(Inst, 2); // condition code
5880 } else {
5881 ARMOperand Op =
5882 *ARMOperand::CreateCondCode(ARMCC::AL, Operands[0]->getEndLoc());
5883 Op.addCondCodeOperands(Inst, 2);
5884 }
5885}
5886
5887/// Parse an ARM memory expression, return false if successful else return true
5888/// or an error. The first token must be a '[' when called.
5889bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5890 MCAsmParser &Parser = getParser();
5891 SMLoc S, E;
5892 if (Parser.getTok().isNot(AsmToken::LBrac))
5893 return TokError("Token is not a Left Bracket");
5894 S = Parser.getTok().getLoc();
5895 Parser.Lex(); // Eat left bracket token.
5896
5897 const AsmToken &BaseRegTok = Parser.getTok();
5898 int BaseRegNum = tryParseRegister();
5899 if (BaseRegNum == -1)
5900 return Error(BaseRegTok.getLoc(), "register expected");
5901
5902 // The next token must either be a comma, a colon or a closing bracket.
5903 const AsmToken &Tok = Parser.getTok();
5904 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5905 !Tok.is(AsmToken::RBrac))
5906 return Error(Tok.getLoc(), "malformed memory operand");
5907
5908 if (Tok.is(AsmToken::RBrac)) {
5909 E = Tok.getEndLoc();
5910 Parser.Lex(); // Eat right bracket token.
5911
5912 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5913 ARM_AM::no_shift, 0, 0, false,
5914 S, E));
5915
5916 // If there's a pre-indexing writeback marker, '!', just add it as a token
5917 // operand. It's rather odd, but syntactically valid.
5918 if (Parser.getTok().is(AsmToken::Exclaim)) {
5919 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5920 Parser.Lex(); // Eat the '!'.
5921 }
5922
5923 return false;
5924 }
5925
5926 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5927 "Lost colon or comma in memory operand?!");
5928 if (Tok.is(AsmToken::Comma)) {
5929 Parser.Lex(); // Eat the comma.
5930 }
5931
5932 // If we have a ':', it's an alignment specifier.
5933 if (Parser.getTok().is(AsmToken::Colon)) {
5934 Parser.Lex(); // Eat the ':'.
5935 E = Parser.getTok().getLoc();
5936 SMLoc AlignmentLoc = Tok.getLoc();
5937
5938 const MCExpr *Expr;
5939 if (getParser().parseExpression(Expr))
5940 return true;
5941
5942 // The expression has to be a constant. Memory references with relocations
5943 // don't come through here, as they use the <label> forms of the relevant
5944 // instructions.
5945 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5946 if (!CE)
5947 return Error (E, "constant expression expected");
5948
5949 unsigned Align = 0;
5950 switch (CE->getValue()) {
5951 default:
5952 return Error(E,
5953 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5954 case 16: Align = 2; break;
5955 case 32: Align = 4; break;
5956 case 64: Align = 8; break;
5957 case 128: Align = 16; break;
5958 case 256: Align = 32; break;
5959 }
5960
5961 // Now we should have the closing ']'
5962 if (Parser.getTok().isNot(AsmToken::RBrac))
5963 return Error(Parser.getTok().getLoc(), "']' expected");
5964 E = Parser.getTok().getEndLoc();
5965 Parser.Lex(); // Eat right bracket token.
5966
5967 // Don't worry about range checking the value here. That's handled by
5968 // the is*() predicates.
5969 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5971 false, S, E, AlignmentLoc));
5972
5973 // If there's a pre-indexing writeback marker, '!', just add it as a token
5974 // operand.
5975 if (Parser.getTok().is(AsmToken::Exclaim)) {
5976 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5977 Parser.Lex(); // Eat the '!'.
5978 }
5979
5980 return false;
5981 }
5982
5983 // If we have a '#' or '$', it's an immediate offset, else assume it's a
5984 // register offset. Be friendly and also accept a plain integer or expression
5985 // (without a leading hash) for gas compatibility.
5986 if (Parser.getTok().is(AsmToken::Hash) ||
5987 Parser.getTok().is(AsmToken::Dollar) ||
5988 Parser.getTok().is(AsmToken::LParen) ||
5989 Parser.getTok().is(AsmToken::Integer)) {
5990 if (Parser.getTok().is(AsmToken::Hash) ||
5991 Parser.getTok().is(AsmToken::Dollar))
5992 Parser.Lex(); // Eat '#' or '$'
5993 E = Parser.getTok().getLoc();
5994
5995 bool isNegative = getParser().getTok().is(AsmToken::Minus);
5996 const MCExpr *Offset, *AdjustedOffset;
5997 if (getParser().parseExpression(Offset))
5998 return true;
5999
6000 if (const auto *CE = dyn_cast<MCConstantExpr>(Offset)) {
6001 // If the constant was #-0, represent it as
6002 // std::numeric_limits<int32_t>::min().
6003 int32_t Val = CE->getValue();
6004 if (isNegative && Val == 0)
6005 CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6006 getContext());
6007 // Don't worry about range checking the value here. That's handled by
6008 // the is*() predicates.
6009 AdjustedOffset = CE;
6010 } else
6011 AdjustedOffset = Offset;
6012 Operands.push_back(ARMOperand::CreateMem(
6013 BaseRegNum, AdjustedOffset, 0, ARM_AM::no_shift, 0, 0, false, S, E));
6014
6015 // Now we should have the closing ']'
6016 if (Parser.getTok().isNot(AsmToken::RBrac))
6017 return Error(Parser.getTok().getLoc(), "']' expected");
6018 E = Parser.getTok().getEndLoc();
6019 Parser.Lex(); // Eat right bracket token.
6020
6021 // If there's a pre-indexing writeback marker, '!', just add it as a token
6022 // operand.
6023 if (Parser.getTok().is(AsmToken::Exclaim)) {
6024 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
6025 Parser.Lex(); // Eat the '!'.
6026 }
6027
6028 return false;
6029 }
6030
6031 // The register offset is optionally preceded by a '+' or '-'
6032 bool isNegative = false;
6033 if (Parser.getTok().is(AsmToken::Minus)) {
6034 isNegative = true;
6035 Parser.Lex(); // Eat the '-'.
6036 } else if (Parser.getTok().is(AsmToken::Plus)) {
6037 // Nothing to do.
6038 Parser.Lex(); // Eat the '+'.
6039 }
6040
6041 E = Parser.getTok().getLoc();
6042 int OffsetRegNum = tryParseRegister();
6043 if (OffsetRegNum == -1)
6044 return Error(E, "register expected");
6045
6046 // If there's a shift operator, handle it.
6048 unsigned ShiftImm = 0;
6049 if (Parser.getTok().is(AsmToken::Comma)) {
6050 Parser.Lex(); // Eat the ','.
6051 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
6052 return true;
6053 }
6054
6055 // Now we should have the closing ']'
6056 if (Parser.getTok().isNot(AsmToken::RBrac))
6057 return Error(Parser.getTok().getLoc(), "']' expected");
6058 E = Parser.getTok().getEndLoc();
6059 Parser.Lex(); // Eat right bracket token.
6060
6061 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
6062 ShiftType, ShiftImm, 0, isNegative,
6063 S, E));
6064
6065 // If there's a pre-indexing writeback marker, '!', just add it as a token
6066 // operand.
6067 if (Parser.getTok().is(AsmToken::Exclaim)) {
6068 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
6069 Parser.Lex(); // Eat the '!'.
6070 }
6071
6072 return false;
6073}
6074
6075/// parseMemRegOffsetShift - one of these two:
6076/// ( lsl | lsr | asr | ror ) , # shift_amount
6077/// rrx
6078/// return true if it parses a shift otherwise it returns false.
6079bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
6080 unsigned &Amount) {
6081 MCAsmParser &Parser = getParser();
6082 SMLoc Loc = Parser.getTok().getLoc();
6083 const AsmToken &Tok = Parser.getTok();
6084 if (Tok.isNot(AsmToken::Identifier))
6085 return Error(Loc, "illegal shift operator");
6086 StringRef ShiftName = Tok.getString();
6087 if (ShiftName == "lsl" || ShiftName == "LSL" ||
6088 ShiftName == "asl" || ShiftName == "ASL")
6089 St = ARM_AM::lsl;
6090 else if (ShiftName == "lsr" || ShiftName == "LSR")
6091 St = ARM_AM::lsr;
6092 else if (ShiftName == "asr" || ShiftName == "ASR")
6093 St = ARM_AM::asr;
6094 else if (ShiftName == "ror" || ShiftName == "ROR")
6095 St = ARM_AM::ror;
6096 else if (ShiftName == "rrx" || ShiftName == "RRX")
6097 St = ARM_AM::rrx;
6098 else if (ShiftName == "uxtw" || ShiftName == "UXTW")
6099 St = ARM_AM::uxtw;
6100 else
6101 return Error(Loc, "illegal shift operator");
6102 Parser.Lex(); // Eat shift type token.
6103
6104 // rrx stands alone.
6105 Amount = 0;
6106 if (St != ARM_AM::rrx) {
6107 Loc = Parser.getTok().getLoc();
6108 // A '#' and a shift amount.
6109 const AsmToken &HashTok = Parser.getTok();
6110 if (HashTok.isNot(AsmToken::Hash) &&
6111 HashTok.isNot(AsmToken::Dollar))
6112 return Error(HashTok.getLoc(), "'#' expected");
6113 Parser.Lex(); // Eat hash token.
6114
6115 const MCExpr *Expr;
6116 if (getParser().parseExpression(Expr))
6117 return true;
6118 // Range check the immediate.
6119 // lsl, ror: 0 <= imm <= 31
6120 // lsr, asr: 0 <= imm <= 32
6121 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
6122 if (!CE)
6123 return Error(Loc, "shift amount must be an immediate");
6124 int64_t Imm = CE->getValue();
6125 if (Imm < 0 ||
6126 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
6127 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
6128 return Error(Loc, "immediate shift value out of range");
6129 // If <ShiftTy> #0, turn it into a no_shift.
6130 if (Imm == 0)
6131 St = ARM_AM::lsl;
6132 // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
6133 if (Imm == 32)
6134 Imm = 0;
6135 Amount = Imm;
6136 }
6137
6138 return false;
6139}
6140
6141/// parseFPImm - A floating point immediate expression operand.
6142ParseStatus ARMAsmParser::parseFPImm(OperandVector &Operands) {
6143 LLVM_DEBUG(dbgs() << "PARSE FPImm, Ops: " << Operands.size());
6144
6145 MCAsmParser &Parser = getParser();
6146 // Anything that can accept a floating point constant as an operand
6147 // needs to go through here, as the regular parseExpression is
6148 // integer only.
6149 //
6150 // This routine still creates a generic Immediate operand, containing
6151 // a bitcast of the 64-bit floating point value. The various operands
6152 // that accept floats can check whether the value is valid for them
6153 // via the standard is*() predicates.
6154
6155 SMLoc S = Parser.getTok().getLoc();
6156
6157 if (Parser.getTok().isNot(AsmToken::Hash) &&
6158 Parser.getTok().isNot(AsmToken::Dollar))
6159 return ParseStatus::NoMatch;
6160
6161 // Disambiguate the VMOV forms that can accept an FP immediate.
6162 // vmov.f32 <sreg>, #imm
6163 // vmov.f64 <dreg>, #imm
6164 // vmov.f32 <dreg>, #imm @ vector f32x2
6165 // vmov.f32 <qreg>, #imm @ vector f32x4
6166 //
6167 // There are also the NEON VMOV instructions which expect an
6168 // integer constant. Make sure we don't try to parse an FPImm
6169 // for these:
6170 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
6171
6172 bool isVmovf = false;
6173 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
6174 for (unsigned I = 1; I < MnemonicOpsEndInd; ++I) {
6175 ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[I]);
6176 if (TyOp.isToken() &&
6177 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
6178 TyOp.getToken() == ".f16")) {
6179 isVmovf = true;
6180 break;
6181 }
6182 }
6183
6184 ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
6185 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
6186 Mnemonic.getToken() == "fconsts");
6187 if (!(isVmovf || isFconst))
6188 return ParseStatus::NoMatch;
6189
6190 Parser.Lex(); // Eat '#' or '$'.
6191
6192 // Handle negation, as that still comes through as a separate token.
6193 bool isNegative = false;
6194 if (Parser.getTok().is(AsmToken::Minus)) {
6195 isNegative = true;
6196 Parser.Lex();
6197 }
6198 const AsmToken &Tok = Parser.getTok();
6199 SMLoc Loc = Tok.getLoc();
6200 if (Tok.is(AsmToken::Real) && isVmovf) {
6201 APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
6202 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
6203 // If we had a '-' in front, toggle the sign bit.
6204 IntVal ^= (uint64_t)isNegative << 31;
6205 Parser.Lex(); // Eat the token.
6206 Operands.push_back(ARMOperand::CreateImm(
6207 MCConstantExpr::create(IntVal, getContext()),
6208 S, Parser.getTok().getLoc()));
6209 return ParseStatus::Success;
6210 }
6211 // Also handle plain integers. Instructions which allow floating point
6212 // immediates also allow a raw encoded 8-bit value.
6213 if (Tok.is(AsmToken::Integer) && isFconst) {
6214 int64_t Val = Tok.getIntVal();
6215 Parser.Lex(); // Eat the token.
6216 if (Val > 255 || Val < 0)
6217 return Error(Loc, "encoded floating point value out of range");
6218 float RealVal = ARM_AM::getFPImmFloat(Val);
6219 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
6220
6221 Operands.push_back(ARMOperand::CreateImm(
6222 MCConstantExpr::create(Val, getContext()), S,
6223 Parser.getTok().getLoc()));
6224 return ParseStatus::Success;
6225 }
6226
6227 return Error(Loc, "invalid floating point immediate");
6228}
6229
6230/// Parse a arm instruction operand. For now this parses the operand regardless
6231/// of the mnemonic.
6232bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
6233 MCAsmParser &Parser = getParser();
6234 SMLoc S, E;
6235
6236 // Check if the current operand has a custom associated parser, if so, try to
6237 // custom parse the operand, or fallback to the general approach.
6238 ParseStatus ResTy = MatchOperandParserImpl(Operands, Mnemonic);
6239 if (ResTy.isSuccess())
6240 return false;
6241 // If there wasn't a custom match, try the generic matcher below. Otherwise,
6242 // there was a match, but an error occurred, in which case, just return that
6243 // the operand parsing failed.
6244 if (ResTy.isFailure())
6245 return true;
6246
6247 switch (getLexer().getKind()) {
6248 default:
6249 Error(Parser.getTok().getLoc(), "unexpected token in operand");
6250 return true;
6251 case AsmToken::Identifier: {
6252 // If we've seen a branch mnemonic, the next operand must be a label. This
6253 // is true even if the label is a register name. So "br r1" means branch to
6254 // label "r1".
6255 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
6256 if (!ExpectLabel) {
6257 if (!tryParseRegisterWithWriteBack(Operands))
6258 return false;
6259 int Res = tryParseShiftRegister(Operands);
6260 if (Res == 0) // success
6261 return false;
6262 else if (Res == -1) // irrecoverable error
6263 return true;
6264 // If this is VMRS, check for the apsr_nzcv operand.
6265 if (Mnemonic == "vmrs" &&
6266 Parser.getTok().getString().equals_insensitive("apsr_nzcv")) {
6267 S = Parser.getTok().getLoc();
6268 Parser.Lex();
6269 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
6270 return false;
6271 }
6272 }
6273
6274 // Fall though for the Identifier case that is not a register or a
6275 // special name.
6276 [[fallthrough]];
6277 }
6278 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4)
6279 case AsmToken::Integer: // things like 1f and 2b as a branch targets
6280 case AsmToken::String: // quoted label names.
6281 case AsmToken::Dot: { // . as a branch target
6282 // This was not a register so parse other operands that start with an
6283 // identifier (like labels) as expressions and create them as immediates.
6284 const MCExpr *IdVal;
6285 S = Parser.getTok().getLoc();
6286 if (getParser().parseExpression(IdVal))
6287 return true;
6288 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6289 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
6290 return false;
6291 }
6292 case AsmToken::LBrac:
6293 return parseMemory(Operands);
6294 case AsmToken::LCurly: {
6295 bool AllowOutOfBoundReg = Mnemonic == "vlldm" || Mnemonic == "vlstm";
6296 return parseRegisterList(Operands, !Mnemonic.starts_with("clr"), false,
6297 AllowOutOfBoundReg);
6298 }
6299 case AsmToken::Dollar:
6300 case AsmToken::Hash: {
6301 // #42 -> immediate
6302 // $ 42 -> immediate
6303 // $foo -> symbol name
6304 // $42 -> symbol name
6305 S = Parser.getTok().getLoc();
6306
6307 // Favor the interpretation of $-prefixed operands as symbol names.
6308 // Cases where immediates are explicitly expected are handled by their
6309 // specific ParseMethod implementations.
6310 auto AdjacentToken = getLexer().peekTok(/*ShouldSkipSpace=*/false);
6311 bool ExpectIdentifier = Parser.getTok().is(AsmToken::Dollar) &&
6312 (AdjacentToken.is(AsmToken::Identifier) ||
6313 AdjacentToken.is(AsmToken::Integer));
6314 if (!ExpectIdentifier) {
6315 // Token is not part of identifier. Drop leading $ or # before parsing
6316 // expression.
6317 Parser.Lex();
6318 }
6319
6320 if (Parser.getTok().isNot(AsmToken::Colon)) {
6321 bool IsNegative = Parser.getTok().is(AsmToken::Minus);
6322 const MCExpr *ImmVal;
6323 if (getParser().parseExpression(ImmVal))
6324 return true;
6325 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
6326 if (CE) {
6327 int32_t Val = CE->getValue();
6328 if (IsNegative && Val == 0)
6329 ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
6330 getContext());
6331 }
6332 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6333 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
6334
6335 // There can be a trailing '!' on operands that we want as a separate
6336 // '!' Token operand. Handle that here. For example, the compatibility
6337 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
6338 if (Parser.getTok().is(AsmToken::Exclaim)) {
6339 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
6340 Parser.getTok().getLoc()));
6341 Parser.Lex(); // Eat exclaim token
6342 }
6343 return false;
6344 }
6345 // w/ a ':' after the '#', it's just like a plain ':'.
6346 [[fallthrough]];
6347 }
6348 case AsmToken::Colon: {
6349 S = Parser.getTok().getLoc();
6350 // ":lower16:", ":upper16:", ":lower0_7:", ":lower8_15:", ":upper0_7:" and
6351 // ":upper8_15:", expression prefixes
6352 // FIXME: Check it's an expression prefix,
6353 // e.g. (FOO - :lower16:BAR) isn't legal.
6354 ARMMCExpr::VariantKind RefKind;
6355 if (parsePrefix(RefKind))
6356 return true;
6357
6358 const MCExpr *SubExprVal;
6359 if (getParser().parseExpression(SubExprVal))
6360 return true;
6361
6362 const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
6363 getContext());
6364 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6365 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
6366 return false;
6367 }
6368 case AsmToken::Equal: {
6369 S = Parser.getTok().getLoc();
6370 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
6371 return Error(S, "unexpected token in operand");
6372 Parser.Lex(); // Eat '='
6373 const MCExpr *SubExprVal;
6374 if (getParser().parseExpression(SubExprVal))
6375 return true;
6376 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
6377
6378 // execute-only: we assume that assembly programmers know what they are
6379 // doing and allow literal pool creation here
6380 Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
6381 return false;
6382 }
6383 }
6384}
6385
6386bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6387 const MCExpr *Expr = nullptr;
6388 SMLoc L = getParser().getTok().getLoc();
6389 if (check(getParser().parseExpression(Expr), L, "expected expression"))
6390 return true;
6391 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
6392 if (check(!Value, L, "expected constant expression"))
6393 return true;
6394 Out = Value->getValue();
6395 return false;
6396}
6397
6398// parsePrefix - Parse ARM 16-bit relocations expression prefixes, i.e.
6399// :lower16: and :upper16: and Thumb 8-bit relocation expression prefixes, i.e.
6400// :upper8_15:, :upper0_7:, :lower8_15: and :lower0_7:
6401bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
6402 MCAsmParser &Parser = getParser();
6403 RefKind = ARMMCExpr::VK_ARM_None;
6404
6405 // consume an optional '#' (GNU compatibility)
6406 if (getLexer().is(AsmToken::Hash))
6407 Parser.Lex();
6408
6409 assert(getLexer().is(AsmToken::Colon) && "expected a :");
6410 Parser.Lex(); // Eat ':'
6411
6412 if (getLexer().isNot(AsmToken::Identifier)) {
6413 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
6414 return true;
6415 }
6416
6417 enum {
6418 COFF = (1 << MCContext::IsCOFF),
6419 ELF = (1 << MCContext::IsELF),
6420 MACHO = (1 << MCContext::IsMachO),
6421 WASM = (1 << MCContext::IsWasm),
6422 };
6423 static const struct PrefixEntry {
6424 const char *Spelling;
6425 ARMMCExpr::VariantKind VariantKind;
6426 uint8_t SupportedFormats;
6427 } PrefixEntries[] = {
6428 {"upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO},
6429 {"lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO},
6430 {"upper8_15", ARMMCExpr::VK_ARM_HI_8_15, ELF},
6431 {"upper0_7", ARMMCExpr::VK_ARM_HI_0_7, ELF},
6432 {"lower8_15", ARMMCExpr::VK_ARM_LO_8_15, ELF},
6433 {"lower0_7", ARMMCExpr::VK_ARM_LO_0_7, ELF},
6434 };
6435
6436 StringRef IDVal = Parser.getTok().getIdentifier();
6437
6438 const auto &Prefix =
6439 llvm::find_if(PrefixEntries, [&IDVal](const PrefixEntry &PE) {
6440 return PE.Spelling == IDVal;
6441 });
6442 if (Prefix == std::end(PrefixEntries)) {
6443 Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
6444 return true;
6445 }
6446
6447 uint8_t CurrentFormat;
6448 switch (getContext().getObjectFileType()) {
6449 case MCContext::IsMachO:
6450 CurrentFormat = MACHO;
6451 break;
6452 case MCContext::IsELF:
6453 CurrentFormat = ELF;
6454 break;
6455 case MCContext::IsCOFF:
6456 CurrentFormat = COFF;
6457 break;
6458 case MCContext::IsWasm:
6459 CurrentFormat = WASM;
6460 break;
6461 case MCContext::IsGOFF:
6462 case MCContext::IsSPIRV:
6463 case MCContext::IsXCOFF:
6465 llvm_unreachable("unexpected object format");
6466 break;
6467 }
6468
6469 if (~Prefix->SupportedFormats & CurrentFormat) {
6470 Error(Parser.getTok().getLoc(),
6471 "cannot represent relocation in the current file format");
6472 return true;
6473 }
6474
6475 RefKind = Prefix->VariantKind;
6476 Parser.Lex();
6477
6478 if (getLexer().isNot(AsmToken::Colon)) {
6479 Error(Parser.getTok().getLoc(), "unexpected token after prefix");
6480 return true;
6481 }
6482 Parser.Lex(); // Eat the last ':'
6483
6484 // consume an optional trailing '#' (GNU compatibility) bla
6485 parseOptionalToken(AsmToken::Hash);
6486
6487 return false;
6488}
6489
6490/// Given a mnemonic, split out possible predication code and carry
6491/// setting letters to form a canonical mnemonic and flags.
6492//
6493// FIXME: Would be nice to autogen this.
6494// FIXME: This is a bit of a maze of special cases.
6495StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
6496 ARMCC::CondCodes &PredicationCode,
6497 ARMVCC::VPTCodes &VPTPredicationCode,
6498 bool &CarrySetting,
6499 unsigned &ProcessorIMod,
6500 StringRef &ITMask) {
6501 PredicationCode = ARMCC::AL;
6502 VPTPredicationCode = ARMVCC::None;
6503 CarrySetting = false;
6504 ProcessorIMod = 0;
6505
6506 // Ignore some mnemonics we know aren't predicated forms.
6507 //
6508 // FIXME: Would be nice to autogen this.
6509 if ((Mnemonic == "movs" && isThumb()) || Mnemonic == "teq" ||
6510 Mnemonic == "vceq" || Mnemonic == "svc" || Mnemonic == "mls" ||
6511 Mnemonic == "smmls" || Mnemonic == "vcls" || Mnemonic == "vmls" ||
6512 Mnemonic == "vnmls" || Mnemonic == "vacge" || Mnemonic == "vcge" ||
6513 Mnemonic == "vclt" || Mnemonic == "vacgt" || Mnemonic == "vaclt" ||
6514 Mnemonic == "vacle" || Mnemonic == "hlt" || Mnemonic == "vcgt" ||
6515 Mnemonic == "vcle" || Mnemonic == "smlal" || Mnemonic == "umaal" ||
6516 Mnemonic == "umlal" || Mnemonic == "vabal" || Mnemonic == "vmlal" ||
6517 Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || Mnemonic == "fmuls" ||
6518 Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
6519 Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
6520 Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
6521 Mnemonic == "vrintm" || Mnemonic == "hvc" ||
6522 Mnemonic.starts_with("vsel") || Mnemonic == "vins" ||
6523 Mnemonic == "vmovx" || Mnemonic == "bxns" || Mnemonic == "blxns" ||
6524 Mnemonic == "vdot" || Mnemonic == "vmmla" || Mnemonic == "vudot" ||
6525 Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6526 Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "wls" ||
6527 Mnemonic == "le" || Mnemonic == "dls" || Mnemonic == "csel" ||
6528 Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6529 Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6530 Mnemonic == "cset" || Mnemonic == "csetm" || Mnemonic == "aut" ||
6531 Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "bti")
6532 return Mnemonic;
6533
6534 // First, split out any predication code. Ignore mnemonics we know aren't
6535 // predicated but do have a carry-set and so weren't caught above.
6536 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
6537 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
6538 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
6539 Mnemonic != "sbcs" && Mnemonic != "rscs" &&
6540 !(hasMVE() &&
6541 (Mnemonic == "vmine" || Mnemonic == "vshle" || Mnemonic == "vshlt" ||
6542 Mnemonic == "vshllt" || Mnemonic == "vrshle" || Mnemonic == "vrshlt" ||
6543 Mnemonic == "vmvne" || Mnemonic == "vorne" || Mnemonic == "vnege" ||
6544 Mnemonic == "vnegt" || Mnemonic == "vmule" || Mnemonic == "vmult" ||
6545 Mnemonic == "vrintne" || Mnemonic == "vcmult" ||
6546 Mnemonic == "vcmule" || Mnemonic == "vpsele" || Mnemonic == "vpselt" ||
6547 Mnemonic.starts_with("vq")))) {
6548 unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
6549 if (CC != ~0U) {
6550 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
6551 PredicationCode = static_cast<ARMCC::CondCodes>(CC);
6552 }
6553 }
6554
6555 // Next, determine if we have a carry setting bit. We explicitly ignore all
6556 // the instructions we know end in 's'.
6557 if (Mnemonic.ends_with("s") &&
6558 !(Mnemonic == "cps" || Mnemonic == "mls" || Mnemonic == "mrs" ||
6559 Mnemonic == "smmls" || Mnemonic == "vabs" || Mnemonic == "vcls" ||
6560 Mnemonic == "vmls" || Mnemonic == "vmrs" || Mnemonic == "vnmls" ||
6561 Mnemonic == "vqabs" || Mnemonic == "vrecps" || Mnemonic == "vrsqrts" ||
6562 Mnemonic == "srs" || Mnemonic == "flds" || Mnemonic == "fmrs" ||
6563 Mnemonic == "fsqrts" || Mnemonic == "fsubs" || Mnemonic == "fsts" ||
6564 Mnemonic == "fcpys" || Mnemonic == "fdivs" || Mnemonic == "fmuls" ||
6565 Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || Mnemonic == "vfms" ||
6566 Mnemonic == "vfnms" || Mnemonic == "fconsts" || Mnemonic == "bxns" ||
6567 Mnemonic == "blxns" || Mnemonic == "vfmas" || Mnemonic == "vmlas" ||
6568 (Mnemonic == "movs" && isThumb()))) {
6569 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
6570 CarrySetting = true;
6571 }
6572
6573 // The "cps" instruction can have a interrupt mode operand which is glued into
6574 // the mnemonic. Check if this is the case, split it and parse the imod op
6575 if (Mnemonic.starts_with("cps")) {
6576 // Split out any imod code.
6577 unsigned IMod =
6578 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
6579 .Case("ie", ARM_PROC::IE)
6580 .Case("id", ARM_PROC::ID)
6581 .Default(~0U);
6582 if (IMod != ~0U) {
6583 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
6584 ProcessorIMod = IMod;
6585 }
6586 }
6587
6588 if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
6589 Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
6590 Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
6591 Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt" && Mnemonic != "vmullt" &&
6592 Mnemonic != "vqmovnt" && Mnemonic != "vqmovunt" &&
6593 Mnemonic != "vqmovnt" && Mnemonic != "vmovnt" && Mnemonic != "vqdmullt" &&
6594 Mnemonic != "vpnot" && Mnemonic != "vcvtt" && Mnemonic != "vcvt") {
6595 unsigned VCC =
6596 ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size() - 1));
6597 if (VCC != ~0U) {
6598 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
6599 VPTPredicationCode = static_cast<ARMVCC::VPTCodes>(VCC);
6600 }
6601 return Mnemonic;
6602 }
6603
6604 // The "it" instruction has the condition mask on the end of the mnemonic.
6605 if (Mnemonic.starts_with("it")) {
6606 ITMask = Mnemonic.slice(2, Mnemonic.size());
6607 Mnemonic = Mnemonic.slice(0, 2);
6608 }
6609
6610 if (Mnemonic.starts_with("vpst")) {
6611 ITMask = Mnemonic.slice(4, Mnemonic.size());
6612 Mnemonic = Mnemonic.slice(0, 4);
6613 } else if (Mnemonic.starts_with("vpt")) {
6614 ITMask = Mnemonic.slice(3, Mnemonic.size());
6615 Mnemonic = Mnemonic.slice(0, 3);
6616 }
6617
6618 return Mnemonic;
6619}
6620
6621/// Given a canonical mnemonic, determine if the instruction ever allows
6622/// inclusion of carry set or predication code operands.
6623//
6624// FIXME: It would be nice to autogen this.
6625void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6626 StringRef ExtraToken,
6627 StringRef FullInst,
6628 bool &CanAcceptCarrySet,
6629 bool &CanAcceptPredicationCode,
6630 bool &CanAcceptVPTPredicationCode) {
6631 CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6632
6633 CanAcceptCarrySet =
6634 Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6635 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6636 Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6637 Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6638 Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6639 Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6640 Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6641 (!isThumb() &&
6642 (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6643 Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6644
6645 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6646 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6647 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6648 Mnemonic.starts_with("crc32") || Mnemonic.starts_with("cps") ||
6649 Mnemonic.starts_with("vsel") || Mnemonic == "vmaxnm" ||
6650 Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6651 Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6652 Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6653 Mnemonic.starts_with("aes") || Mnemonic == "hvc" ||
6654 Mnemonic == "setpan" || Mnemonic.starts_with("sha1") ||
6655 Mnemonic.starts_with("sha256") ||
6656 (FullInst.starts_with("vmull") && FullInst.ends_with(".p64")) ||
6657 Mnemonic == "vmovx" || Mnemonic == "vins" || Mnemonic == "vudot" ||
6658 Mnemonic == "vsdot" || Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6659 Mnemonic == "vfmal" || Mnemonic == "vfmsl" || Mnemonic == "vfmat" ||
6660 Mnemonic == "vfmab" || Mnemonic == "vdot" || Mnemonic == "vmmla" ||
6661 Mnemonic == "sb" || Mnemonic == "ssbb" || Mnemonic == "pssbb" ||
6662 Mnemonic == "vsmmla" || Mnemonic == "vummla" || Mnemonic == "vusmmla" ||
6663 Mnemonic == "vusdot" || Mnemonic == "vsudot" || Mnemonic == "bfcsel" ||
6664 Mnemonic == "wls" || Mnemonic == "dls" || Mnemonic == "le" ||
6665 Mnemonic == "csel" || Mnemonic == "csinc" || Mnemonic == "csinv" ||
6666 Mnemonic == "csneg" || Mnemonic == "cinc" || Mnemonic == "cinv" ||
6667 Mnemonic == "cneg" || Mnemonic == "cset" || Mnemonic == "csetm" ||
6668 (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6669 !MS.isITPredicableCDEInstr(Mnemonic)) ||
6670 Mnemonic.starts_with("vpt") || Mnemonic.starts_with("vpst") ||
6671 Mnemonic == "pac" || Mnemonic == "pacbti" || Mnemonic == "aut" ||
6672 Mnemonic == "bti" ||
6673 (hasMVE() &&
6674 (Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vld2") ||
6675 Mnemonic.starts_with("vst4") || Mnemonic.starts_with("vld4") ||
6676 Mnemonic.starts_with("wlstp") || Mnemonic.starts_with("dlstp") ||
6677 Mnemonic.starts_with("letp")))) {
6678 // These mnemonics are never predicable
6679 CanAcceptPredicationCode = false;
6680 } else if (!isThumb()) {
6681 // Some instructions are only predicable in Thumb mode
6682 CanAcceptPredicationCode =
6683 Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6684 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6685 Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6686 Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6687 Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6688 Mnemonic != "stc2" && Mnemonic != "stc2l" && Mnemonic != "tsb" &&
6689 !Mnemonic.starts_with("rfe") && !Mnemonic.starts_with("srs");
6690 } else if (isThumbOne()) {
6691 if (hasV6MOps())
6692 CanAcceptPredicationCode = Mnemonic != "movs";
6693 else
6694 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6695 } else
6696 CanAcceptPredicationCode = true;
6697}
6698
6699bool operandsContainWide(OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6700 for (unsigned I = 0; I < MnemonicOpsEndInd; ++I) {
6701 auto &Op = static_cast<ARMOperand &>(*Operands[I]);
6702 if (Op.isToken() && Op.getToken() == ".w")
6703 return true;
6704 }
6705 return false;
6706}
6707
6708// Some Thumb instructions have two operand forms that are not
6709// available as three operand, convert to two operand form if possible.
6710//
6711// FIXME: We would really like to be able to tablegen'erate this.
6712void ARMAsmParser::tryConvertingToTwoOperandForm(
6713 StringRef Mnemonic, ARMCC::CondCodes PredicationCode, bool CarrySetting,
6714 OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6715
6716 if (operandsContainWide(Operands, MnemonicOpsEndInd))
6717 return;
6718 if (Operands.size() != MnemonicOpsEndInd + 3)
6719 return;
6720
6721 const auto &Op3 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]);
6722 auto &Op4 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
6723 if (!Op3.isReg() || !Op4.isReg())
6724 return;
6725
6726 auto Op3Reg = Op3.getReg();
6727 auto Op4Reg = Op4.getReg();
6728
6729 // For most Thumb2 cases we just generate the 3 operand form and reduce
6730 // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6731 // won't accept SP or PC so we do the transformation here taking care
6732 // with immediate range in the 'add sp, sp #imm' case.
6733 auto &Op5 = static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 2]);
6734 if (isThumbTwo()) {
6735 if (Mnemonic != "add")
6736 return;
6737 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6738 (Op5.isReg() && Op5.getReg() == ARM::PC);
6739 if (!TryTransform) {
6740 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6741 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6742 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6743 Op5.isImm() && !Op5.isImm0_508s4());
6744 }
6745 if (!TryTransform)
6746 return;
6747 } else if (!isThumbOne())
6748 return;
6749
6750 if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6751 Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6752 Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6753 Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6754 return;
6755
6756 // If first 2 operands of a 3 operand instruction are the same
6757 // then transform to 2 operand version of the same instruction
6758 // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6759 bool Transform = Op3Reg == Op4Reg;
6760
6761 // For communtative operations, we might be able to transform if we swap
6762 // Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially
6763 // as tADDrsp.
6764 const ARMOperand *LastOp = &Op5;
6765 bool Swap = false;
6766 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6767 ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6768 Mnemonic == "and" || Mnemonic == "eor" ||
6769 Mnemonic == "adc" || Mnemonic == "orr")) {
6770 Swap = true;
6771 LastOp = &Op4;
6772 Transform = true;
6773 }
6774
6775 // If both registers are the same then remove one of them from
6776 // the operand list, with certain exceptions.
6777 if (Transform) {
6778 // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6779 // 2 operand forms don't exist.
6780 if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6781 LastOp->isReg())
6782 Transform = false;
6783
6784 // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6785 // 3-bits because the ARMARM says not to.
6786 if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6787 Transform = false;
6788 }
6789
6790 if (Transform) {
6791 if (Swap)
6792 std::swap(Op4, Op5);
6793 Operands.erase(Operands.begin() + MnemonicOpsEndInd);
6794 }
6795}
6796
6797// this function returns true if the operand is one of the following
6798// relocations: :upper8_15:, :upper0_7:, :lower8_15: or :lower0_7:
6800 ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
6801 if (!Op.isImm())
6802 return false;
6803 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6804 if (CE)
6805 return false;
6806 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6807 if (!E)
6808 return false;
6809 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6810 if (ARM16Expr && (ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_8_15 ||
6811 ARM16Expr->getKind() == ARMMCExpr::VK_ARM_HI_0_7 ||
6812 ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_8_15 ||
6813 ARM16Expr->getKind() == ARMMCExpr::VK_ARM_LO_0_7))
6814 return true;
6815 return false;
6816}
6817
6818bool ARMAsmParser::shouldOmitVectorPredicateOperand(
6819 StringRef Mnemonic, OperandVector &Operands, unsigned MnemonicOpsEndInd) {
6820 if (!hasMVE() || Operands.size() <= MnemonicOpsEndInd)
6821 return true;
6822
6823 if (Mnemonic.starts_with("vld2") || Mnemonic.starts_with("vld4") ||
6824 Mnemonic.starts_with("vst2") || Mnemonic.starts_with("vst4"))
6825 return true;
6826
6827 if (Mnemonic.starts_with("vctp") || Mnemonic.starts_with("vpnot"))
6828 return false;
6829
6830 if (Mnemonic.starts_with("vmov") &&
6831 !(Mnemonic.starts_with("vmovl") || Mnemonic.starts_with("vmovn") ||
6832 Mnemonic.starts_with("vmovx"))) {
6833 for (auto &Operand : Operands) {
6834 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6835 ((*Operand).isReg() &&
6836 (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6837 (*Operand).getReg()) ||
6838 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6839 (*Operand).getReg())))) {
6840 return true;
6841 }
6842 }
6843 return false;
6844 } else {
6845 for (auto &Operand : Operands) {
6846 // We check the larger class QPR instead of just the legal class
6847 // MQPR, to more accurately report errors when using Q registers
6848 // outside of the allowed range.
6849 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6850 static_cast<ARMOperand &>(*Operand).isQReg())
6851 return false;
6852 }
6853 return true;
6854 }
6855}
6856
6857// FIXME: This bit should probably be handled via an explicit match class
6858// in the .td files that matches the suffix instead of having it be
6859// a literal string token the way it is now.
6861 return Mnemonic.starts_with("vldm") || Mnemonic.starts_with("vstm");
6862}
6863
6864static void applyMnemonicAliases(StringRef &Mnemonic,
6865 const FeatureBitset &Features,
6866 unsigned VariantID);
6867
6868// The GNU assembler has aliases of ldrd, strd, ldrexd, strexd, ldaexd, and
6869// stlexd with the second register omitted. We don't have a way to do that in
6870// tablegen, so fix it up here.
6871//
6872// We have to be careful to not emit an invalid Rt2 here, because the rest of
6873// the assembly parser could then generate confusing diagnostics refering to
6874// it. If we do find anything that prevents us from doing the transformation we
6875// bail out, and let the assembly parser report an error on the instruction as
6876// it is written.
6877void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6879 unsigned MnemonicOpsEndInd) {
6880 if (Mnemonic != "ldrd" && Mnemonic != "strd" && Mnemonic != "ldrexd" &&
6881 Mnemonic != "strexd" && Mnemonic != "ldaexd" && Mnemonic != "stlexd")
6882 return;
6883
6884 unsigned IdX = Mnemonic == "strexd" || Mnemonic == "stlexd"
6885 ? MnemonicOpsEndInd + 1
6886 : MnemonicOpsEndInd;
6887
6888 if (Operands.size() < IdX + 2)
6889 return;
6890
6891 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[IdX]);
6892 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[IdX + 1]);
6893
6894 if (!Op2.isReg())
6895 return;
6896 if (!Op3.isGPRMem())
6897 return;
6898
6899 const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6900 if (!GPR.contains(Op2.getReg()))
6901 return;
6902
6903 unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
6904 if (!isThumb() && (RtEncoding & 1)) {
6905 // In ARM mode, the registers must be from an aligned pair, this
6906 // restriction does not apply in Thumb mode.
6907 return;
6908 }
6909 if (Op2.getReg() == ARM::PC)
6910 return;
6911 unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
6912 if (!PairedReg || PairedReg == ARM::PC ||
6913 (PairedReg == ARM::SP && !hasV8Ops()))
6914 return;
6915
6916 Operands.insert(
6917 Operands.begin() + IdX + 1,
6918 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6919}
6920
6921// Dual-register instruction have the following syntax:
6922// <mnemonic> <predicate>? <coproc>, <Rdest>, <Rdest+1>, <Rsrc>, ..., #imm
6923// This function tries to remove <Rdest+1> and replace <Rdest> with a pair
6924// operand. If the conversion fails an error is diagnosed, and the function
6925// returns true.
6926bool ARMAsmParser::CDEConvertDualRegOperand(StringRef Mnemonic,
6928 unsigned MnemonicOpsEndInd) {
6929 assert(MS.isCDEDualRegInstr(Mnemonic));
6930
6931 if (Operands.size() < 3 + MnemonicOpsEndInd)
6932 return false;
6933
6934 StringRef Op2Diag(
6935 "operand must be an even-numbered register in the range [r0, r10]");
6936
6937 const MCParsedAsmOperand &Op2 = *Operands[MnemonicOpsEndInd + 1];
6938 if (!Op2.isReg())
6939 return Error(Op2.getStartLoc(), Op2Diag);
6940
6941 unsigned RNext;
6942 unsigned RPair;
6943 switch (Op2.getReg()) {
6944 default:
6945 return Error(Op2.getStartLoc(), Op2Diag);
6946 case ARM::R0:
6947 RNext = ARM::R1;
6948 RPair = ARM::R0_R1;
6949 break;
6950 case ARM::R2:
6951 RNext = ARM::R3;
6952 RPair = ARM::R2_R3;
6953 break;
6954 case ARM::R4:
6955 RNext = ARM::R5;
6956 RPair = ARM::R4_R5;
6957 break;
6958 case ARM::R6:
6959 RNext = ARM::R7;
6960 RPair = ARM::R6_R7;
6961 break;
6962 case ARM::R8:
6963 RNext = ARM::R9;
6964 RPair = ARM::R8_R9;
6965 break;
6966 case ARM::R10:
6967 RNext = ARM::R11;
6968 RPair = ARM::R10_R11;
6969 break;
6970 }
6971
6972 const MCParsedAsmOperand &Op3 = *Operands[MnemonicOpsEndInd + 2];
6973 if (!Op3.isReg() || Op3.getReg() != RNext)
6974 return Error(Op3.getStartLoc(), "operand must be a consecutive register");
6975
6976 Operands.erase(Operands.begin() + MnemonicOpsEndInd + 2);
6977 Operands[MnemonicOpsEndInd + 1] =
6978 ARMOperand::CreateReg(RPair, Op2.getStartLoc(), Op2.getEndLoc());
6979 return false;
6980}
6981
6982void removeCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
6983 for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
6984 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode()) {
6985 Operands.erase(Operands.begin() + I);
6986 --MnemonicOpsEndInd;
6987 break;
6988 }
6989}
6990
6991void removeCCOut(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
6992 for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
6993 if (static_cast<ARMOperand &>(*Operands[I]).isCCOut()) {
6994 Operands.erase(Operands.begin() + I);
6995 --MnemonicOpsEndInd;
6996 break;
6997 }
6998}
6999
7000void removeVPTCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd) {
7001 for (unsigned I = 0; I < MnemonicOpsEndInd; ++I)
7002 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred()) {
7003 Operands.erase(Operands.begin() + I);
7004 --MnemonicOpsEndInd;
7005 break;
7006 }
7007}
7008
7009/// Parse an arm instruction mnemonic followed by its operands.
7010bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
7011 SMLoc NameLoc, OperandVector &Operands) {
7012 MCAsmParser &Parser = getParser();
7013
7014 // Apply mnemonic aliases before doing anything else, as the destination
7015 // mnemonic may include suffices and we want to handle them normally.
7016 // The generic tblgen'erated code does this later, at the start of
7017 // MatchInstructionImpl(), but that's too late for aliases that include
7018 // any sort of suffix.
7019 const FeatureBitset &AvailableFeatures = getAvailableFeatures();
7020 unsigned AssemblerDialect = getParser().getAssemblerDialect();
7021 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
7022
7023 // First check for the ARM-specific .req directive.
7024 if (Parser.getTok().is(AsmToken::Identifier) &&
7025 Parser.getTok().getIdentifier().lower() == ".req") {
7026 parseDirectiveReq(Name, NameLoc);
7027 // We always return 'error' for this, as we're done with this
7028 // statement and don't need to match the 'instruction."
7029 return true;
7030 }
7031
7032 // Create the leading tokens for the mnemonic, split by '.' characters.
7033 size_t Start = 0, Next = Name.find('.');
7034 StringRef Mnemonic = Name.slice(Start, Next);
7035 StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
7036
7037 // Split out the predication code and carry setting flag from the mnemonic.
7038 ARMCC::CondCodes PredicationCode;
7039 ARMVCC::VPTCodes VPTPredicationCode;
7040 unsigned ProcessorIMod;
7041 bool CarrySetting;
7042 StringRef ITMask;
7043 Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
7044 CarrySetting, ProcessorIMod, ITMask);
7045
7046 // In Thumb1, only the branch (B) instruction can be predicated.
7047 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
7048 return Error(NameLoc, "conditional execution not supported in Thumb1");
7049 }
7050
7051 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
7052
7053 // Handle the mask for IT and VPT instructions. In ARMOperand and
7054 // MCOperand, this is stored in a format independent of the
7055 // condition code: the lowest set bit indicates the end of the
7056 // encoding, and above that, a 1 bit indicates 'else', and an 0
7057 // indicates 'then'. E.g.
7058 // IT -> 1000
7059 // ITx -> x100 (ITT -> 0100, ITE -> 1100)
7060 // ITxy -> xy10 (e.g. ITET -> 1010)
7061 // ITxyz -> xyz1 (e.g. ITEET -> 1101)
7062 // Note: See the ARM::PredBlockMask enum in
7063 // /lib/Target/ARM/Utils/ARMBaseInfo.h
7064 if (Mnemonic == "it" || Mnemonic.starts_with("vpt") ||
7065 Mnemonic.starts_with("vpst")) {
7066 SMLoc Loc = Mnemonic == "it" ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
7067 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
7068 SMLoc::getFromPointer(NameLoc.getPointer() + 4);
7069 if (ITMask.size() > 3) {
7070 if (Mnemonic == "it")
7071 return Error(Loc, "too many conditions on IT instruction");
7072 return Error(Loc, "too many conditions on VPT instruction");
7073 }
7074 unsigned Mask = 8;
7075 for (char Pos : llvm::reverse(ITMask)) {
7076 if (Pos != 't' && Pos != 'e') {
7077 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
7078 }
7079 Mask >>= 1;
7080 if (Pos == 'e')
7081 Mask |= 8;
7082 }
7083 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
7084 }
7085
7086 // FIXME: This is all a pretty gross hack. We should automatically handle
7087 // optional operands like this via tblgen.
7088
7089 // Next, add the CCOut and ConditionCode operands, if needed.
7090 //
7091 // For mnemonics which can ever incorporate a carry setting bit or predication
7092 // code, our matching model involves us always generating CCOut and
7093 // ConditionCode operands to match the mnemonic "as written" and then we let
7094 // the matcher deal with finding the right instruction or generating an
7095 // appropriate error.
7096 bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7097 getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
7098 CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7099
7100 // If we had a carry-set on an instruction that can't do that, issue an
7101 // error.
7102 if (!CanAcceptCarrySet && CarrySetting) {
7103 return Error(NameLoc, "instruction '" + Mnemonic +
7104 "' can not set flags, but 's' suffix specified");
7105 }
7106 // If we had a predication code on an instruction that can't do that, issue an
7107 // error.
7108 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
7109 return Error(NameLoc, "instruction '" + Mnemonic +
7110 "' is not predicable, but condition code specified");
7111 }
7112
7113 // If we had a VPT predication code on an instruction that can't do that, issue an
7114 // error.
7115 if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
7116 return Error(NameLoc, "instruction '" + Mnemonic +
7117 "' is not VPT predicable, but VPT code T/E is specified");
7118 }
7119
7120 // Add the carry setting operand, if necessary.
7121 if (CanAcceptCarrySet && CarrySetting) {
7122 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
7123 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
7124 Loc));
7125 }
7126
7127 // Add the predication code operand, if necessary.
7128 if (CanAcceptPredicationCode && PredicationCode != llvm::ARMCC::AL) {
7129 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7130 CarrySetting);
7131 Operands.push_back(ARMOperand::CreateCondCode(
7132 ARMCC::CondCodes(PredicationCode), Loc));
7133 }
7134
7135 // Add the VPT predication code operand, if necessary.
7136 // Dont add in certain cases of VCVT as this needs to be disambiguated
7137 // after operand parsing.
7138 if (CanAcceptVPTPredicationCode && VPTPredicationCode != llvm::ARMVCC::None &&
7139 !(Mnemonic.starts_with("vcvt") && Mnemonic != "vcvta" &&
7140 Mnemonic != "vcvtn" && Mnemonic != "vcvtp" && Mnemonic != "vcvtm")) {
7141 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
7142 CarrySetting);
7143 Operands.push_back(ARMOperand::CreateVPTPred(
7144 ARMVCC::VPTCodes(VPTPredicationCode), Loc));
7145 }
7146
7147 // Add the processor imod operand, if necessary.
7148 if (ProcessorIMod) {
7149 Operands.push_back(ARMOperand::CreateImm(
7150 MCConstantExpr::create(ProcessorIMod, getContext()),
7151 NameLoc, NameLoc));
7152 } else if (Mnemonic == "cps" && isMClass()) {
7153 return Error(NameLoc, "instruction 'cps' requires effect for M-class");
7154 }
7155
7156 // Add the remaining tokens in the mnemonic.
7157 while (Next != StringRef::npos) {
7158 Start = Next;
7159 Next = Name.find('.', Start + 1);
7160 ExtraToken = Name.slice(Start, Next);
7161
7162 // Some NEON instructions have an optional datatype suffix that is
7163 // completely ignored. Check for that.
7164 if (isDataTypeToken(ExtraToken) &&
7165 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
7166 continue;
7167
7168 // For for ARM mode generate an error if the .n qualifier is used.
7169 if (ExtraToken == ".n" && !isThumb()) {
7170 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7171 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
7172 "arm mode");
7173 }
7174
7175 // The .n qualifier is always discarded as that is what the tables
7176 // and matcher expect. In ARM mode the .w qualifier has no effect,
7177 // so discard it to avoid errors that can be caused by the matcher.
7178 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
7179 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
7180 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
7181 }
7182 }
7183
7184 // This marks the end of the LHS Mnemonic operators.
7185 // This is used for indexing into the non-menmonic operators as some of the
7186 // mnemonic operators are optional and therfore indexes can differ.
7187 unsigned MnemonicOpsEndInd = Operands.size();
7188
7189 // Read the remaining operands.
7190 if (getLexer().isNot(AsmToken::EndOfStatement)) {
7191 // Read the first operand.
7192 if (parseOperand(Operands, Mnemonic)) {
7193 return true;
7194 }
7195
7196 while (parseOptionalToken(AsmToken::Comma)) {
7197 // Parse and remember the operand.
7198 if (parseOperand(Operands, Mnemonic)) {
7199 return true;
7200 }
7201 }
7202 }
7203
7204 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
7205 return true;
7206
7207 tryConvertingToTwoOperandForm(Mnemonic, PredicationCode, CarrySetting,
7208 Operands, MnemonicOpsEndInd);
7209
7210 if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7211 // Dual-register instructions use even-odd register pairs as their
7212 // destination operand, in assembly such pair is spelled as two
7213 // consecutive registers, without any special syntax. ConvertDualRegOperand
7214 // tries to convert such operand into register pair, e.g. r2, r3 -> r2_r3.
7215 // It returns true, if an error message has been emitted. If the function
7216 // returns false, the function either succeeded or an error (e.g. missing
7217 // operand) will be diagnosed elsewhere.
7218 if (MS.isCDEDualRegInstr(Mnemonic)) {
7219 bool GotError =
7220 CDEConvertDualRegOperand(Mnemonic, Operands, MnemonicOpsEndInd);
7221 if (GotError)
7222 return GotError;
7223 }
7224 }
7225
7226 if (hasMVE()) {
7227 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7228 MnemonicOpsEndInd) &&
7229 Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
7230 // Very nasty hack to deal with the vector predicated variant of vmovlt
7231 // the scalar predicated vmov with condition 'lt'. We can not tell them
7232 // apart until we have parsed their operands.
7233 Operands.erase(Operands.begin() + 1);
7234 Operands.erase(Operands.begin());
7235 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7236 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7237 Mnemonic.size() - 1 + CarrySetting);
7238 Operands.insert(Operands.begin(),
7239 ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
7240 Operands.insert(Operands.begin(),
7241 ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
7242 } else if (Mnemonic == "vcvt" && PredicationCode == ARMCC::NE &&
7243 !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7244 MnemonicOpsEndInd)) {
7245 // Another nasty hack to deal with the ambiguity between vcvt with scalar
7246 // predication 'ne' and vcvtn with vector predication 'e'. As above we
7247 // can only distinguish between the two after we have parsed their
7248 // operands.
7249 Operands.erase(Operands.begin() + 1);
7250 Operands.erase(Operands.begin());
7251 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7252 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7253 Mnemonic.size() - 1 + CarrySetting);
7254 Operands.insert(Operands.begin(),
7255 ARMOperand::CreateVPTPred(ARMVCC::Else, PLoc));
7256 Operands.insert(Operands.begin(),
7257 ARMOperand::CreateToken(StringRef("vcvtn"), MLoc));
7258 } else if (Mnemonic == "vmul" && PredicationCode == ARMCC::LT &&
7259 !shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7260 MnemonicOpsEndInd)) {
7261 // Another hack, this time to distinguish between scalar predicated vmul
7262 // with 'lt' predication code and the vector instruction vmullt with
7263 // vector predication code "none"
7264 removeCondCode(Operands, MnemonicOpsEndInd);
7265 Operands.erase(Operands.begin());
7266 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7267 Operands.insert(Operands.begin(),
7268 ARMOperand::CreateToken(StringRef("vmullt"), MLoc));
7269 } else if (Mnemonic.starts_with("vcvt") && !Mnemonic.starts_with("vcvta") &&
7270 !Mnemonic.starts_with("vcvtn") &&
7271 !Mnemonic.starts_with("vcvtp") &&
7272 !Mnemonic.starts_with("vcvtm")) {
7273 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7274 MnemonicOpsEndInd)) {
7275 // We could not split the vector predicate off vcvt because it might
7276 // have been the scalar vcvtt instruction. Now we know its a vector
7277 // instruction, we still need to check whether its the vector
7278 // predicated vcvt with 'Then' predication or the vector vcvtt. We can
7279 // distinguish the two based on the suffixes, if it is any of
7280 // ".f16.f32", ".f32.f16", ".f16.f64" or ".f64.f16" then it is the vcvtt.
7281 if (Mnemonic.starts_with("vcvtt") && MnemonicOpsEndInd > 2) {
7282 auto Sz1 =
7283 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd - 2]);
7284 auto Sz2 =
7285 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd - 1]);
7286 if (!(Sz1.isToken() && Sz1.getToken().starts_with(".f") &&
7287 Sz2.isToken() && Sz2.getToken().starts_with(".f"))) {
7288 Operands.erase(Operands.begin());
7289 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
7290 VPTPredicationCode = ARMVCC::Then;
7291
7292 Mnemonic = Mnemonic.substr(0, 4);
7293 Operands.insert(Operands.begin(),
7294 ARMOperand::CreateToken(Mnemonic, MLoc));
7295 }
7296 }
7297 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
7298 Mnemonic.size() + CarrySetting);
7299 // Add VPTPred
7300 Operands.insert(Operands.begin() + 1,
7301 ARMOperand::CreateVPTPred(
7302 ARMVCC::VPTCodes(VPTPredicationCode), PLoc));
7303 ++MnemonicOpsEndInd;
7304 }
7305 } else if (CanAcceptVPTPredicationCode) {
7306 // For all other instructions, make sure only one of the two
7307 // predication operands is left behind, depending on whether we should
7308 // use the vector predication.
7309 if (shouldOmitVectorPredicateOperand(Mnemonic, Operands,
7310 MnemonicOpsEndInd)) {
7311 removeVPTCondCode(Operands, MnemonicOpsEndInd);
7312 }
7313 }
7314 }
7315
7316 if (VPTPredicationCode != ARMVCC::None) {
7317 bool usedVPTPredicationCode = false;
7318 for (unsigned I = 1; I < Operands.size(); ++I)
7319 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7320 usedVPTPredicationCode = true;
7321 if (!usedVPTPredicationCode) {
7322 // If we have a VPT predication code and we haven't just turned it
7323 // into an operand, then it was a mistake for splitMnemonic to
7324 // separate it from the rest of the mnemonic in the first place,
7325 // and this may lead to wrong disassembly (e.g. scalar floating
7326 // point VCMPE is actually a different instruction from VCMP, so
7327 // we mustn't treat them the same). In that situation, glue it
7328 // back on.
7329 Mnemonic = Name.slice(0, Mnemonic.size() + 1);
7330 Operands.erase(Operands.begin());
7331 Operands.insert(Operands.begin(),
7332 ARMOperand::CreateToken(Mnemonic, NameLoc));
7333 }
7334 }
7335
7336 // ARM mode 'blx' need special handling, as the register operand version
7337 // is predicable, but the label operand version is not. So, we can't rely
7338 // on the Mnemonic based checking to correctly figure out when to put
7339 // a k_CondCode operand in the list. If we're trying to match the label
7340 // version, remove the k_CondCode operand here.
7341 if (!isThumb() && Mnemonic == "blx" &&
7342 Operands.size() == MnemonicOpsEndInd + 1 &&
7343 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).isImm())
7344 removeCondCode(Operands, MnemonicOpsEndInd);
7345
7346 // GNU Assembler extension (compatibility).
7347 fixupGNULDRDAlias(Mnemonic, Operands, MnemonicOpsEndInd);
7348
7349 // Adjust operands of ldrexd/strexd to MCK_GPRPair.
7350 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
7351 // a single GPRPair reg operand is used in the .td file to replace the two
7352 // GPRs. However, when parsing from asm, the two GRPs cannot be
7353 // automatically
7354 // expressed as a GPRPair, so we have to manually merge them.
7355 // FIXME: We would really like to be able to tablegen'erate this.
7356 bool IsLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
7357 if (!isThumb() && Operands.size() > MnemonicOpsEndInd + 1 + (!IsLoad) &&
7358 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
7359 Mnemonic == "stlexd")) {
7360 unsigned Idx = IsLoad ? MnemonicOpsEndInd : MnemonicOpsEndInd + 1;
7361 ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
7362 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
7363
7364 const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
7365 // Adjust only if Op1 is a GPR.
7366 if (Op1.isReg() && MRC.contains(Op1.getReg())) {
7367 unsigned Reg1 = Op1.getReg();
7368 unsigned Rt = MRI->getEncodingValue(Reg1);
7369 unsigned Reg2 = Op2.getReg();
7370 unsigned Rt2 = MRI->getEncodingValue(Reg2);
7371 // Rt2 must be Rt + 1.
7372 if (Rt + 1 != Rt2)
7373 return Error(Op2.getStartLoc(),
7374 IsLoad ? "destination operands must be sequential"
7375 : "source operands must be sequential");
7376
7377 // Rt must be even
7378 if (Rt & 1)
7379 return Error(
7380 Op1.getStartLoc(),
7381 IsLoad ? "destination operands must start start at an even register"
7382 : "source operands must start start at an even register");
7383
7384 unsigned NewReg = MRI->getMatchingSuperReg(
7385 Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
7386
7387 Operands[Idx] =
7388 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
7389 Operands.erase(Operands.begin() + Idx + 1);
7390 }
7391 }
7392
7393 // FIXME: As said above, this is all a pretty gross hack. This instruction
7394 // does not fit with other "subs" and tblgen.
7395 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
7396 // so the Mnemonic is the original name "subs" and delete the predicate
7397 // operand so it will match the table entry.
7398 if (isThumbTwo() && Mnemonic == "sub" &&
7399 Operands.size() == MnemonicOpsEndInd + 3 &&
7400 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).isReg() &&
7401 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]).getReg() ==
7402 ARM::PC &&
7403 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]).isReg() &&
7404 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]).getReg() ==
7405 ARM::LR &&
7406 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 2]).isImm()) {
7407 Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
7408 removeCCOut(Operands, MnemonicOpsEndInd);
7409 }
7410 return false;
7411}
7412
7413// Validate context-sensitive operand constraints.
7414
7415// return 'true' if register list contains non-low GPR registers,
7416// 'false' otherwise. If Reg is in the register list or is HiReg, set
7417// 'containsReg' to true.
7418static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
7419 unsigned Reg, unsigned HiReg,
7420 bool &containsReg) {
7421 containsReg = false;
7422 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
7423 unsigned OpReg = Inst.getOperand(i).getReg();
7424 if (OpReg == Reg)
7425 containsReg = true;
7426 // Anything other than a low register isn't legal here.
7427 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
7428 return true;
7429 }
7430 return false;
7431}
7432
7433// Check if the specified regisgter is in the register list of the inst,
7434// starting at the indicated operand number.
7435static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
7436 for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
7437 unsigned OpReg = Inst.getOperand(i).getReg();
7438 if (OpReg == Reg)
7439 return true;
7440 }
7441 return false;
7442}
7443
7444// Return true if instruction has the interesting property of being
7445// allowed in IT blocks, but not being predicable.
7446static bool instIsBreakpoint(const MCInst &Inst) {
7447 return Inst.getOpcode() == ARM::tBKPT ||
7448 Inst.getOpcode() == ARM::BKPT ||
7449 Inst.getOpcode() == ARM::tHLT ||
7450 Inst.getOpcode() == ARM::HLT;
7451}
7452
7454 unsigned MnemonicOpsEndInd) {
7455 for (unsigned I = MnemonicOpsEndInd; I < Operands.size(); ++I) {
7456 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[I]);
7457 if (Op.isRegList()) {
7458 return I;
7459 }
7460 }
7461 return 0;
7462}
7463
7464bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
7465 const OperandVector &Operands,
7466 unsigned MnemonicOpsEndInd,
7467 unsigned ListIndex, bool IsARPop) {
7468 bool ListContainsSP = listContainsReg(Inst, ListIndex, ARM::SP);
7469 bool ListContainsLR = listContainsReg(Inst, ListIndex, ARM::LR);
7470 bool ListContainsPC = listContainsReg(Inst, ListIndex, ARM::PC);
7471
7472 if (!IsARPop && ListContainsSP)
7473 return Error(
7474 Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7475 "SP may not be in the register list");
7476 if (ListContainsPC && ListContainsLR)
7477 return Error(
7478 Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7479 "PC and LR may not be in the register list simultaneously");
7480 return false;
7481}
7482
7483bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
7484 const OperandVector &Operands,
7485 unsigned MnemonicOpsEndInd,
7486 unsigned ListIndex) {
7487 bool ListContainsSP = listContainsReg(Inst, ListIndex, ARM::SP);
7488 bool ListContainsPC = listContainsReg(Inst, ListIndex, ARM::PC);
7489
7490 if (ListContainsSP && ListContainsPC)
7491 return Error(
7492 Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7493 "SP and PC may not be in the register list");
7494 if (ListContainsSP)
7495 return Error(
7496 Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7497 "SP may not be in the register list");
7498 if (ListContainsPC)
7499 return Error(
7500 Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
7501 "PC may not be in the register list");
7502 return false;
7503}
7504
7505bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
7506 bool Load, bool ARMMode, bool Writeback,
7507 unsigned MnemonicOpsEndInd) {
7508 unsigned RtIndex = Load || !Writeback ? 0 : 1;
7509 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
7510 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
7511
7512 if (ARMMode) {
7513 // Rt can't be R14.
7514 if (Rt == 14)
7515 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7516 "Rt can't be R14");
7517
7518 // Rt must be even-numbered.
7519 if ((Rt & 1) == 1)
7520 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7521 "Rt must be even-numbered");
7522
7523 // Rt2 must be Rt + 1.
7524 if (Rt2 != Rt + 1) {
7525 if (Load)
7526 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7527 "destination operands must be sequential");
7528 else
7529 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7530 "source operands must be sequential");
7531 }
7532
7533 // FIXME: Diagnose m == 15
7534 // FIXME: Diagnose ldrd with m == t || m == t2.
7535 }
7536
7537 if (!ARMMode && Load) {
7538 if (Rt2 == Rt)
7539 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7540 "destination operands can't be identical");
7541 }
7542
7543 if (Writeback) {
7544 unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
7545
7546 if (Rn == Rt || Rn == Rt2) {
7547 if (Load)
7548 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7549 "base register needs to be different from destination "
7550 "registers");
7551 else
7552 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7553 "source register and base register can't be identical");
7554 }
7555
7556 // FIXME: Diagnose ldrd/strd with writeback and n == 15.
7557 // (Except the immediate form of ldrd?)
7558 }
7559
7560 return false;
7561}
7562
7564 for (unsigned i = 0; i < MCID.NumOperands; ++i) {
7565 if (ARM::isVpred(MCID.operands()[i].OperandType))
7566 return i;
7567 }
7568 return -1;
7569}
7570
7571static bool isVectorPredicable(const MCInstrDesc &MCID) {
7572 return findFirstVectorPredOperandIdx(MCID) != -1;
7573}
7574
7576 ARMOperand &Op = static_cast<ARMOperand &>(MCOp);
7577 if (!Op.isImm())
7578 return false;
7579 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7580 if (CE)
7581 return false;
7582 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
7583 if (!E)
7584 return false;
7585 return true;
7586}
7587
7588// FIXME: We would really like to be able to tablegen'erate this.
7589bool ARMAsmParser::validateInstruction(MCInst &Inst,
7590 const OperandVector &Operands,
7591 unsigned MnemonicOpsEndInd) {
7592 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
7593 SMLoc Loc = Operands[0]->getStartLoc();
7594
7595 // Check the IT block state first.
7596 // NOTE: BKPT and HLT instructions have the interesting property of being
7597 // allowed in IT blocks, but not being predicable. They just always execute.
7598 if (inITBlock() && !instIsBreakpoint(Inst)) {
7599 // The instruction must be predicable.
7600 if (!MCID.isPredicable())
7601 return Error(Loc, "instructions in IT block must be predicable");
7604 if (Cond != currentITCond()) {
7605 // Find the condition code Operand to get its SMLoc information.
7606 SMLoc CondLoc = Operands[0]->getEndLoc();
7607 for (unsigned I = 1; I < Operands.size(); ++I)
7608 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
7609 CondLoc = Operands[I]->getStartLoc();
7610 return Error(CondLoc, "incorrect condition in IT block; got '" +
7612 "', but expected '" +
7613 ARMCondCodeToString(currentITCond()) + "'");
7614 }
7615 // Check for non-'al' condition codes outside of the IT block.
7616 } else if (isThumbTwo() && MCID.isPredicable() &&
7617 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7618 ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
7619 Inst.getOpcode() != ARM::t2Bcc &&
7620 Inst.getOpcode() != ARM::t2BFic) {
7621 return Error(Loc, "predicated instructions must be in IT block");
7622 } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
7623 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
7624 ARMCC::AL) {
7625 return Warning(Loc, "predicated instructions should be in IT block");
7626 } else if (!MCID.isPredicable()) {
7627 // Check the instruction doesn't have a predicate operand anyway
7628 // that it's not allowed to use. Sometimes this happens in order
7629 // to keep instructions the same shape even though one cannot
7630 // legally be predicated, e.g. vmul.f16 vs vmul.f32.
7631 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
7632 if (MCID.operands()[i].isPredicate()) {
7633 if (Inst.getOperand(i).getImm() != ARMCC::AL)
7634 return Error(Loc, "instruction is not predicable");
7635 break;
7636 }
7637 }
7638 }
7639
7640 // PC-setting instructions in an IT block, but not the last instruction of
7641 // the block, are UNPREDICTABLE.
7642 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7643 return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
7644 }
7645
7646 if (inVPTBlock() && !instIsBreakpoint(Inst)) {
7647 unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7648 if (!isVectorPredicable(MCID))
7649 return Error(Loc, "instruction in VPT block must be predicable");
7650 unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
7651 unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
7652 if (Pred != VPTPred) {
7653 SMLoc PredLoc;
7654 for (unsigned I = 1; I < Operands.size(); ++I)
7655 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
7656 PredLoc = Operands[I]->getStartLoc();
7657 return Error(PredLoc, "incorrect predication in VPT block; got '" +
7659 "', but expected '" +
7660 ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
7661 }
7662 }
7663 else if (isVectorPredicable(MCID) &&
7664 Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
7666 return Error(Loc, "VPT predicated instructions must be in VPT block");
7667
7668 const unsigned Opcode = Inst.getOpcode();
7669 switch (Opcode) {
7670 case ARM::VLLDM:
7671 case ARM::VLLDM_T2:
7672 case ARM::VLSTM:
7673 case ARM::VLSTM_T2: {
7674 // Since in some cases both T1 and T2 are valid, tablegen can not always
7675 // pick the correct instruction.
7676 if (Operands.size() ==
7677 MnemonicOpsEndInd + 2) { // a register list has been provided
7678 ARMOperand &Op = static_cast<ARMOperand &>(
7679 *Operands[MnemonicOpsEndInd + 1]); // the register list, a dpr_reglist
7680 assert(Op.isDPRRegList());
7681 auto &RegList = Op.getRegList();
7682 // T2 requires v8.1-M.Main (cannot be handled by tablegen)
7683 if (RegList.size() == 32 && !hasV8_1MMainline()) {
7684 return Error(Op.getEndLoc(), "T2 version requires v8.1-M.Main");
7685 }
7686 // When target has 32 D registers, T1 is undefined.
7687 if (hasD32() && RegList.size() != 32) {
7688 return Error(Op.getEndLoc(), "operand must be exactly {d0-d31}");
7689 }
7690 // When target has 16 D registers, both T1 and T2 are valid.
7691 if (!hasD32() && (RegList.size() != 16 && RegList.size() != 32)) {
7692 return Error(Op.getEndLoc(),
7693 "operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)");
7694 }
7695 }
7696 return false;
7697 }
7698 case ARM::t2IT: {
7699 // Encoding is unpredictable if it ever results in a notional 'NV'
7700 // predicate. Since we don't parse 'NV' directly this means an 'AL'
7701 // predicate with an "else" mask bit.
7702 unsigned Cond = Inst.getOperand(0).getImm();
7703 unsigned Mask = Inst.getOperand(1).getImm();
7704
7705 // Conditions only allowing a 't' are those with no set bit except
7706 // the lowest-order one that indicates the end of the sequence. In
7707 // other words, powers of 2.
7708 if (Cond == ARMCC::AL && llvm::popcount(Mask) != 1)
7709 return Error(Loc, "unpredictable IT predicate sequence");
7710 break;
7711 }
7712 case ARM::LDRD:
7713 if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ true,
7714 /*Writeback*/ false, MnemonicOpsEndInd))
7715 return true;
7716 break;
7717 case ARM::LDRD_PRE:
7718 case ARM::LDRD_POST:
7719 if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ true,
7720 /*Writeback*/ true, MnemonicOpsEndInd))
7721 return true;
7722 break;
7723 case ARM::t2LDRDi8:
7724 if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ false,
7725 /*Writeback*/ false, MnemonicOpsEndInd))
7726 return true;
7727 break;
7728 case ARM::t2LDRD_PRE:
7729 case ARM::t2LDRD_POST:
7730 if (validateLDRDSTRD(Inst, Operands, /*Load*/ true, /*ARMMode*/ false,
7731 /*Writeback*/ true, MnemonicOpsEndInd))
7732 return true;
7733 break;
7734 case ARM::t2BXJ: {
7735 const unsigned RmReg = Inst.getOperand(0).getReg();
7736 // Rm = SP is no longer unpredictable in v8-A
7737 if (RmReg == ARM::SP && !hasV8Ops())
7738 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7739 "r13 (SP) is an unpredictable operand to BXJ");
7740 return false;
7741 }
7742 case ARM::STRD:
7743 if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ true,
7744 /*Writeback*/ false, MnemonicOpsEndInd))
7745 return true;
7746 break;
7747 case ARM::STRD_PRE:
7748 case ARM::STRD_POST:
7749 if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ true,
7750 /*Writeback*/ true, MnemonicOpsEndInd))
7751 return true;
7752 break;
7753 case ARM::t2STRD_PRE:
7754 case ARM::t2STRD_POST:
7755 if (validateLDRDSTRD(Inst, Operands, /*Load*/ false, /*ARMMode*/ false,
7756 /*Writeback*/ true, MnemonicOpsEndInd))
7757 return true;
7758 break;
7759 case ARM::STR_PRE_IMM:
7760 case ARM::STR_PRE_REG:
7761 case ARM::t2STR_PRE:
7762 case ARM::STR_POST_IMM:
7763 case ARM::STR_POST_REG:
7764 case ARM::t2STR_POST:
7765 case ARM::STRH_PRE:
7766 case ARM::t2STRH_PRE:
7767 case ARM::STRH_POST:
7768 case ARM::t2STRH_POST:
7769 case ARM::STRB_PRE_IMM:
7770 case ARM::STRB_PRE_REG:
7771 case ARM::t2STRB_PRE:
7772 case ARM::STRB_POST_IMM:
7773 case ARM::STRB_POST_REG:
7774 case ARM::t2STRB_POST: {
7775 // Rt must be different from Rn.
7776 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7777 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7778
7779 if (Rt == Rn)
7780 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
7781 "source register and base register can't be identical");
7782 return false;
7783 }
7784 case ARM::t2LDR_PRE_imm:
7785 case ARM::t2LDR_POST_imm:
7786 case ARM::t2STR_PRE_imm:
7787 case ARM::t2STR_POST_imm: {
7788 // Rt must be different from Rn.
7789 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7790 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7791
7792 if (Rt == Rn)
7793 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7794 "destination register and base register can't be identical");
7795 if (Inst.getOpcode() == ARM::t2LDR_POST_imm ||
7796 Inst.getOpcode() == ARM::t2STR_POST_imm) {
7797 int Imm = Inst.getOperand(2).getImm();
7798 if (Imm > 255 || Imm < -255)
7799 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7800 "operand must be in range [-255, 255]");
7801 }
7802 if (Inst.getOpcode() == ARM::t2STR_PRE_imm ||
7803 Inst.getOpcode() == ARM::t2STR_POST_imm) {
7804 if (Inst.getOperand(0).getReg() == ARM::PC) {
7805 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7806 "operand must be a register in range [r0, r14]");
7807 }
7808 }
7809 return false;
7810 }
7811
7812 case ARM::t2LDRB_OFFSET_imm:
7813 case ARM::t2LDRB_PRE_imm:
7814 case ARM::t2LDRB_POST_imm:
7815 case ARM::t2STRB_OFFSET_imm:
7816 case ARM::t2STRB_PRE_imm:
7817 case ARM::t2STRB_POST_imm: {
7818 if (Inst.getOpcode() == ARM::t2LDRB_POST_imm ||
7819 Inst.getOpcode() == ARM::t2STRB_POST_imm ||
7820 Inst.getOpcode() == ARM::t2LDRB_PRE_imm ||
7821 Inst.getOpcode() == ARM::t2STRB_PRE_imm) {
7822 int Imm = Inst.getOperand(2).getImm();
7823 if (Imm > 255 || Imm < -255)
7824 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7825 "operand must be in range [-255, 255]");
7826 } else if (Inst.getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7827 Inst.getOpcode() == ARM::t2STRB_OFFSET_imm) {
7828 int Imm = Inst.getOperand(2).getImm();
7829 if (Imm > 0 || Imm < -255)
7830 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7831 "operand must be in range [0, 255] with a negative sign");
7832 }
7833 if (Inst.getOperand(0).getReg() == ARM::PC) {
7834 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7835 "if operand is PC, should call the LDRB (literal)");
7836 }
7837 return false;
7838 }
7839
7840 case ARM::t2LDRH_OFFSET_imm:
7841 case ARM::t2LDRH_PRE_imm:
7842 case ARM::t2LDRH_POST_imm:
7843 case ARM::t2STRH_OFFSET_imm:
7844 case ARM::t2STRH_PRE_imm:
7845 case ARM::t2STRH_POST_imm: {
7846 if (Inst.getOpcode() == ARM::t2LDRH_POST_imm ||
7847 Inst.getOpcode() == ARM::t2STRH_POST_imm ||
7848 Inst.getOpcode() == ARM::t2LDRH_PRE_imm ||
7849 Inst.getOpcode() == ARM::t2STRH_PRE_imm) {
7850 int Imm = Inst.getOperand(2).getImm();
7851 if (Imm > 255 || Imm < -255)
7852 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7853 "operand must be in range [-255, 255]");
7854 } else if (Inst.getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7855 Inst.getOpcode() == ARM::t2STRH_OFFSET_imm) {
7856 int Imm = Inst.getOperand(2).getImm();
7857 if (Imm > 0 || Imm < -255)
7858 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7859 "operand must be in range [0, 255] with a negative sign");
7860 }
7861 if (Inst.getOperand(0).getReg() == ARM::PC) {
7862 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7863 "if operand is PC, should call the LDRH (literal)");
7864 }
7865 return false;
7866 }
7867
7868 case ARM::t2LDRSB_OFFSET_imm:
7869 case ARM::t2LDRSB_PRE_imm:
7870 case ARM::t2LDRSB_POST_imm: {
7871 if (Inst.getOpcode() == ARM::t2LDRSB_POST_imm ||
7872 Inst.getOpcode() == ARM::t2LDRSB_PRE_imm) {
7873 int Imm = Inst.getOperand(2).getImm();
7874 if (Imm > 255 || Imm < -255)
7875 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7876 "operand must be in range [-255, 255]");
7877 } else if (Inst.getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7878 int Imm = Inst.getOperand(2).getImm();
7879 if (Imm > 0 || Imm < -255)
7880 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7881 "operand must be in range [0, 255] with a negative sign");
7882 }
7883 if (Inst.getOperand(0).getReg() == ARM::PC) {
7884 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7885 "if operand is PC, should call the LDRH (literal)");
7886 }
7887 return false;
7888 }
7889
7890 case ARM::t2LDRSH_OFFSET_imm:
7891 case ARM::t2LDRSH_PRE_imm:
7892 case ARM::t2LDRSH_POST_imm: {
7893 if (Inst.getOpcode() == ARM::t2LDRSH_POST_imm ||
7894 Inst.getOpcode() == ARM::t2LDRSH_PRE_imm) {
7895 int Imm = Inst.getOperand(2).getImm();
7896 if (Imm > 255 || Imm < -255)
7897 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7898 "operand must be in range [-255, 255]");
7899 } else if (Inst.getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7900 int Imm = Inst.getOperand(2).getImm();
7901 if (Imm > 0 || Imm < -255)
7902 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7903 "operand must be in range [0, 255] with a negative sign");
7904 }
7905 if (Inst.getOperand(0).getReg() == ARM::PC) {
7906 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7907 "if operand is PC, should call the LDRH (literal)");
7908 }
7909 return false;
7910 }
7911
7912 case ARM::LDR_PRE_IMM:
7913 case ARM::LDR_PRE_REG:
7914 case ARM::t2LDR_PRE:
7915 case ARM::LDR_POST_IMM:
7916 case ARM::LDR_POST_REG:
7917 case ARM::t2LDR_POST:
7918 case ARM::LDRH_PRE:
7919 case ARM::t2LDRH_PRE:
7920 case ARM::LDRH_POST:
7921 case ARM::t2LDRH_POST:
7922 case ARM::LDRSH_PRE:
7923 case ARM::t2LDRSH_PRE:
7924 case ARM::LDRSH_POST:
7925 case ARM::t2LDRSH_POST:
7926 case ARM::LDRB_PRE_IMM:
7927 case ARM::LDRB_PRE_REG:
7928 case ARM::t2LDRB_PRE:
7929 case ARM::LDRB_POST_IMM:
7930 case ARM::LDRB_POST_REG:
7931 case ARM::t2LDRB_POST:
7932 case ARM::LDRSB_PRE:
7933 case ARM::t2LDRSB_PRE:
7934 case ARM::LDRSB_POST:
7935 case ARM::t2LDRSB_POST: {
7936 // Rt must be different from Rn.
7937 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7938 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7939
7940 if (Rt == Rn)
7941 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7942 "destination register and base register can't be identical");
7943 return false;
7944 }
7945
7946 case ARM::MVE_VLDRBU8_rq:
7947 case ARM::MVE_VLDRBU16_rq:
7948 case ARM::MVE_VLDRBS16_rq:
7949 case ARM::MVE_VLDRBU32_rq:
7950 case ARM::MVE_VLDRBS32_rq:
7951 case ARM::MVE_VLDRHU16_rq:
7952 case ARM::MVE_VLDRHU16_rq_u:
7953 case ARM::MVE_VLDRHU32_rq:
7954 case ARM::MVE_VLDRHU32_rq_u:
7955 case ARM::MVE_VLDRHS32_rq:
7956 case ARM::MVE_VLDRHS32_rq_u:
7957 case ARM::MVE_VLDRWU32_rq:
7958 case ARM::MVE_VLDRWU32_rq_u:
7959 case ARM::MVE_VLDRDU64_rq:
7960 case ARM::MVE_VLDRDU64_rq_u:
7961 case ARM::MVE_VLDRWU32_qi:
7962 case ARM::MVE_VLDRWU32_qi_pre:
7963 case ARM::MVE_VLDRDU64_qi:
7964 case ARM::MVE_VLDRDU64_qi_pre: {
7965 // Qd must be different from Qm.
7966 unsigned QdIdx = 0, QmIdx = 2;
7967 bool QmIsPointer = false;
7968 switch (Opcode) {
7969 case ARM::MVE_VLDRWU32_qi:
7970 case ARM::MVE_VLDRDU64_qi:
7971 QmIdx = 1;
7972 QmIsPointer = true;
7973 break;
7974 case ARM::MVE_VLDRWU32_qi_pre:
7975 case ARM::MVE_VLDRDU64_qi_pre:
7976 QdIdx = 1;
7977 QmIsPointer = true;
7978 break;
7979 }
7980
7981 const unsigned Qd = MRI->getEncodingValue(Inst.getOperand(QdIdx).getReg());
7982 const unsigned Qm = MRI->getEncodingValue(Inst.getOperand(QmIdx).getReg());
7983
7984 if (Qd == Qm) {
7985 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
7986 Twine("destination vector register and vector ") +
7987 (QmIsPointer ? "pointer" : "offset") +
7988 " register can't be identical");
7989 }
7990 return false;
7991 }
7992
7993 case ARM::SBFX:
7994 case ARM::t2SBFX:
7995 case ARM::UBFX:
7996 case ARM::t2UBFX: {
7997 // Width must be in range [1, 32-lsb].
7998 unsigned LSB = Inst.getOperand(2).getImm();
7999 unsigned Widthm1 = Inst.getOperand(3).getImm();
8000 if (Widthm1 >= 32 - LSB)
8001 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8002 "bitfield width must be in range [1,32-lsb]");
8003 return false;
8004 }
8005 // Notionally handles ARM::tLDMIA_UPD too.
8006 case ARM::tLDMIA: {
8007 // If we're parsing Thumb2, the .w variant is available and handles
8008 // most cases that are normally illegal for a Thumb1 LDM instruction.
8009 // We'll make the transformation in processInstruction() if necessary.
8010 //
8011 // Thumb LDM instructions are writeback iff the base register is not
8012 // in the register list.
8013 unsigned Rn = Inst.getOperand(0).getReg();
8014 bool HasWritebackToken =
8015 (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8016 .isToken() &&
8017 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8018 .getToken() == "!");
8019
8020 bool ListContainsBase;
8021 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
8022 return Error(
8023 Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
8024 "registers must be in range r0-r7");
8025 // If we should have writeback, then there should be a '!' token.
8026 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
8027 return Error(
8028 Operands[getRegListInd(Operands, MnemonicOpsEndInd)]->getStartLoc(),
8029 "writeback operator '!' expected");
8030 // If we should not have writeback, there must not be a '!'. This is
8031 // true even for the 32-bit wide encodings.
8032 if (ListContainsBase && HasWritebackToken)
8033 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8034 "writeback operator '!' not allowed when base register "
8035 "in register list");
8036
8037 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8038 return true;
8039 break;
8040 }
8041 case ARM::LDMIA_UPD:
8042 case ARM::LDMDB_UPD:
8043 case ARM::LDMIB_UPD:
8044 case ARM::LDMDA_UPD:
8045 // ARM variants loading and updating the same register are only officially
8046 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
8047 if (!hasV7Ops())
8048 break;
8049 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
8050 return Error(Operands.back()->getStartLoc(),
8051 "writeback register not allowed in register list");
8052 break;
8053 case ARM::t2LDMIA:
8054 case ARM::t2LDMDB:
8055 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8056 return true;
8057 break;
8058 case ARM::t2STMIA:
8059 case ARM::t2STMDB:
8060 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8061 return true;
8062 break;
8063 case ARM::t2LDMIA_UPD:
8064 case ARM::t2LDMDB_UPD:
8065 case ARM::t2STMIA_UPD:
8066 case ARM::t2STMDB_UPD:
8067 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
8068 return Error(Operands.back()->getStartLoc(),
8069 "writeback register not allowed in register list");
8070
8071 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
8072 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8073 return true;
8074 } else {
8075 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 3))
8076 return true;
8077 }
8078 break;
8079
8080 case ARM::sysLDMIA_UPD:
8081 case ARM::sysLDMDA_UPD:
8082 case ARM::sysLDMDB_UPD:
8083 case ARM::sysLDMIB_UPD:
8084 if (!listContainsReg(Inst, 3, ARM::PC))
8085 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8086 "writeback register only allowed on system LDM "
8087 "if PC in register-list");
8088 break;
8089 case ARM::sysSTMIA_UPD:
8090 case ARM::sysSTMDA_UPD:
8091 case ARM::sysSTMDB_UPD:
8092 case ARM::sysSTMIB_UPD:
8093 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8094 "system STM cannot have writeback register");
8095 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
8096 // so only issue a diagnostic for thumb1. The instructions will be
8097 // switched to the t2 encodings in processInstruction() if necessary.
8098 case ARM::tPOP: {
8099 bool ListContainsBase;
8100 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
8101 !isThumbTwo())
8102 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8103 "registers must be in range r0-r7 or pc");
8104 if (validatetLDMRegList(Inst, Operands, MnemonicOpsEndInd, 2, !isMClass()))
8105 return true;
8106 break;
8107 }
8108 case ARM::tPUSH: {
8109 bool ListContainsBase;
8110 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
8111 !isThumbTwo())
8112 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8113 "registers must be in range r0-r7 or lr");
8114 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 2))
8115 return true;
8116 break;
8117 }
8118 case ARM::tSTMIA_UPD: {
8119 bool ListContainsBase, InvalidLowList;
8120 InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
8121 0, ListContainsBase);
8122 if (InvalidLowList && !isThumbTwo())
8123 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8124 "registers must be in range r0-r7");
8125
8126 // This would be converted to a 32-bit stm, but that's not valid if the
8127 // writeback register is in the list.
8128 if (InvalidLowList && ListContainsBase)
8129 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8130 "writeback operator '!' not allowed when base register "
8131 "in register list");
8132
8133 if (validatetSTMRegList(Inst, Operands, MnemonicOpsEndInd, 4))
8134 return true;
8135 break;
8136 }
8137 case ARM::tADDrSP:
8138 // If the non-SP source operand and the destination operand are not the
8139 // same, we need thumb2 (for the wide encoding), or we have an error.
8140 if (!isThumbTwo() &&
8141 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8142 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8143 "source register must be the same as destination");
8144 }
8145 break;
8146
8147 case ARM::t2ADDrr:
8148 case ARM::t2ADDrs:
8149 case ARM::t2SUBrr:
8150 case ARM::t2SUBrs:
8151 if (Inst.getOperand(0).getReg() == ARM::SP &&
8152 Inst.getOperand(1).getReg() != ARM::SP)
8153 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8154 "source register must be sp if destination is sp");
8155 break;
8156
8157 // Final range checking for Thumb unconditional branch instructions.
8158 case ARM::tB:
8159 if (!(static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd]))
8160 .isSignedOffset<11, 1>())
8161 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8162 "branch target out of range");
8163 break;
8164 case ARM::t2B: {
8165 int op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8166 : MnemonicOpsEndInd + 1;
8167 ARMOperand &Operand = static_cast<ARMOperand &>(*Operands[op]);
8168 // Delay the checks of symbolic expressions until they are resolved.
8169 if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8170 !Operand.isSignedOffset<24, 1>())
8171 return Error(Operands[op]->getStartLoc(), "branch target out of range");
8172 break;
8173 }
8174 // Final range checking for Thumb conditional branch instructions.
8175 case ARM::tBcc:
8176 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8177 .isSignedOffset<8, 1>())
8178 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8179 "branch target out of range");
8180 break;
8181 case ARM::t2Bcc: {
8182 int Op = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8183 : MnemonicOpsEndInd + 1;
8184 if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
8185 return Error(Operands[Op]->getStartLoc(), "branch target out of range");
8186 break;
8187 }
8188 case ARM::tCBZ:
8189 case ARM::tCBNZ: {
8190 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8191 .isUnsignedOffset<6, 1>())
8192 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8193 "branch target out of range");
8194 break;
8195 }
8196 case ARM::MOVi16:
8197 case ARM::MOVTi16:
8198 case ARM::t2MOVi16:
8199 case ARM::t2MOVTi16:
8200 {
8201 // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
8202 // especially when we turn it into a movw and the expression <symbol> does
8203 // not have a :lower16: or :upper16 as part of the expression. We don't
8204 // want the behavior of silently truncating, which can be unexpected and
8205 // lead to bugs that are difficult to find since this is an easy mistake
8206 // to make.
8207 int i = (Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8208 : MnemonicOpsEndInd + 1;
8209 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
8210 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8211 if (CE) break;
8212 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
8213 if (!E) break;
8214 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
8215 if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
8216 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
8217 return Error(
8218 Op.getStartLoc(),
8219 "immediate expression for mov requires :lower16: or :upper16");
8220 break;
8221 }
8222 case ARM::tADDi8: {
8223 MCParsedAsmOperand &Op = *Operands[MnemonicOpsEndInd + 1];
8225 return Error(Op.getStartLoc(),
8226 "Immediate expression for Thumb adds requires :lower0_7:,"
8227 " :lower8_15:, :upper0_7: or :upper8_15:");
8228 break;
8229 }
8230 case ARM::tMOVi8: {
8231 MCParsedAsmOperand &Op = *Operands[MnemonicOpsEndInd];
8233 return Error(Op.getStartLoc(),
8234 "Immediate expression for Thumb movs requires :lower0_7:,"
8235 " :lower8_15:, :upper0_7: or :upper8_15:");
8236 break;
8237 }
8238 case ARM::HINT:
8239 case ARM::t2HINT: {
8240 unsigned Imm8 = Inst.getOperand(0).getImm();
8241 unsigned Pred = Inst.getOperand(1).getImm();
8242 // ESB is not predicable (pred must be AL). Without the RAS extension, this
8243 // behaves as any other unallocated hint.
8244 if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
8245 return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
8246 "predicable, but condition "
8247 "code specified");
8248 if (Imm8 == 0x14 && Pred != ARMCC::AL)
8249 return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
8250 "predicable, but condition "
8251 "code specified");
8252 break;
8253 }
8254 case ARM::t2BFi:
8255 case ARM::t2BFr:
8256 case ARM::t2BFLi:
8257 case ARM::t2BFLr: {
8258 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8259 .isUnsignedOffset<4, 1>() ||
8260 (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0)) {
8261 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8262 "branch location out of range or not a multiple of 2");
8263 }
8264
8265 if (Opcode == ARM::t2BFi) {
8266 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8267 .isSignedOffset<16, 1>())
8268 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8269 "branch target out of range or not a multiple of 2");
8270 } else if (Opcode == ARM::t2BFLi) {
8271 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8272 .isSignedOffset<18, 1>())
8273 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8274 "branch target out of range or not a multiple of 2");
8275 }
8276 break;
8277 }
8278 case ARM::t2BFic: {
8279 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd])
8280 .isUnsignedOffset<4, 1>() ||
8281 (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
8282 return Error(Operands[1]->getStartLoc(),
8283 "branch location out of range or not a multiple of 2");
8284
8285 if (!static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8286 .isSignedOffset<16, 1>())
8287 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8288 "branch target out of range or not a multiple of 2");
8289
8290 assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
8291 "branch location and else branch target should either both be "
8292 "immediates or both labels");
8293
8294 if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
8295 int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
8296 if (Diff != 4 && Diff != 2)
8297 return Error(
8298 Operands[3]->getStartLoc(),
8299 "else branch target must be 2 or 4 greater than the branch location");
8300 }
8301 break;
8302 }
8303 case ARM::t2CLRM: {
8304 for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
8305 if (Inst.getOperand(i).isReg() &&
8306 !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8307 Inst.getOperand(i).getReg())) {
8308 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8309 "invalid register in register list. Valid registers are "
8310 "r0-r12, lr/r14 and APSR.");
8311 }
8312 }
8313 break;
8314 }
8315 case ARM::DSB:
8316 case ARM::t2DSB: {
8317
8318 if (Inst.getNumOperands() < 2)
8319 break;
8320
8321 unsigned Option = Inst.getOperand(0).getImm();
8322 unsigned Pred = Inst.getOperand(1).getImm();
8323
8324 // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
8325 if (Option == 0 && Pred != ARMCC::AL)
8326 return Error(Operands[1]->getStartLoc(),
8327 "instruction 'ssbb' is not predicable, but condition code "
8328 "specified");
8329 if (Option == 4 && Pred != ARMCC::AL)
8330 return Error(Operands[1]->getStartLoc(),
8331 "instruction 'pssbb' is not predicable, but condition code "
8332 "specified");
8333 break;
8334 }
8335 case ARM::VMOVRRS: {
8336 // Source registers must be sequential.
8337 const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
8338 const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
8339 if (Sm1 != Sm + 1)
8340 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8341 "source operands must be sequential");
8342 break;
8343 }
8344 case ARM::VMOVSRR: {
8345 // Destination registers must be sequential.
8346 const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
8347 const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
8348 if (Sm1 != Sm + 1)
8349 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8350 "destination operands must be sequential");
8351 break;
8352 }
8353 case ARM::VLDMDIA:
8354 case ARM::VSTMDIA: {
8355 ARMOperand &Op =
8356 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
8357 auto &RegList = Op.getRegList();
8358 if (RegList.size() < 1 || RegList.size() > 16)
8359 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8360 "list of registers must be at least 1 and at most 16");
8361 break;
8362 }
8363 case ARM::MVE_VQDMULLs32bh:
8364 case ARM::MVE_VQDMULLs32th:
8365 case ARM::MVE_VCMULf32:
8366 case ARM::MVE_VMULLBs32:
8367 case ARM::MVE_VMULLTs32:
8368 case ARM::MVE_VMULLBu32:
8369 case ARM::MVE_VMULLTu32: {
8370 if (Operands[MnemonicOpsEndInd]->getReg() ==
8371 Operands[MnemonicOpsEndInd + 1]->getReg()) {
8372 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8373 "Qd register and Qn register can't be identical");
8374 }
8375 if (Operands[MnemonicOpsEndInd]->getReg() ==
8376 Operands[MnemonicOpsEndInd + 2]->getReg()) {
8377 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8378 "Qd register and Qm register can't be identical");
8379 }
8380 break;
8381 }
8382 case ARM::MVE_VREV64_8:
8383 case ARM::MVE_VREV64_16:
8384 case ARM::MVE_VREV64_32:
8385 case ARM::MVE_VQDMULL_qr_s32bh:
8386 case ARM::MVE_VQDMULL_qr_s32th: {
8387 if (Operands[MnemonicOpsEndInd]->getReg() ==
8388 Operands[MnemonicOpsEndInd + 1]->getReg()) {
8389 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8390 "Qd register and Qn register can't be identical");
8391 }
8392 break;
8393 }
8394 case ARM::MVE_VCADDi32:
8395 case ARM::MVE_VCADDf32:
8396 case ARM::MVE_VHCADDs32: {
8397 if (Operands[MnemonicOpsEndInd]->getReg() ==
8398 Operands[MnemonicOpsEndInd + 2]->getReg()) {
8399 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8400 "Qd register and Qm register can't be identical");
8401 }
8402 break;
8403 }
8404 case ARM::MVE_VMOV_rr_q: {
8405 if (Operands[MnemonicOpsEndInd + 2]->getReg() !=
8406 Operands[MnemonicOpsEndInd + 4]->getReg())
8407 return Error(Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8408 "Q-registers must be the same");
8409 if (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 3])
8410 .getVectorIndex() !=
8411 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 5])
8412 .getVectorIndex() +
8413 2)
8414 return Error(Operands[MnemonicOpsEndInd + 3]->getStartLoc(),
8415 "Q-register indexes must be 2 and 0 or 3 and 1");
8416 break;
8417 }
8418 case ARM::MVE_VMOV_q_rr: {
8419 if (Operands[MnemonicOpsEndInd]->getReg() !=
8420 Operands[MnemonicOpsEndInd + 2]->getReg())
8421 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8422 "Q-registers must be the same");
8423 if (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
8424 .getVectorIndex() !=
8425 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 3])
8426 .getVectorIndex() +
8427 2)
8428 return Error(Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8429 "Q-register indexes must be 2 and 0 or 3 and 1");
8430 break;
8431 }
8432 case ARM::MVE_SQRSHR:
8433 case ARM::MVE_UQRSHL: {
8434 if (Operands[MnemonicOpsEndInd]->getReg() ==
8435 Operands[MnemonicOpsEndInd + 1]->getReg()) {
8436 return Error(Operands[MnemonicOpsEndInd]->getStartLoc(),
8437 "Rda register and Rm register can't be identical");
8438 }
8439 break;
8440 }
8441 case ARM::UMAAL:
8442 case ARM::UMLAL:
8443 case ARM::UMULL:
8444 case ARM::t2UMAAL:
8445 case ARM::t2UMLAL:
8446 case ARM::t2UMULL:
8447 case ARM::SMLAL:
8448 case ARM::SMLALBB:
8449 case ARM::SMLALBT:
8450 case ARM::SMLALD:
8451 case ARM::SMLALDX:
8452 case ARM::SMLALTB:
8453 case ARM::SMLALTT:
8454 case ARM::SMLSLD:
8455 case ARM::SMLSLDX:
8456 case ARM::SMULL:
8457 case ARM::t2SMLAL:
8458 case ARM::t2SMLALBB:
8459 case ARM::t2SMLALBT:
8460 case ARM::t2SMLALD:
8461 case ARM::t2SMLALDX:
8462 case ARM::t2SMLALTB:
8463 case ARM::t2SMLALTT:
8464 case ARM::t2SMLSLD:
8465 case ARM::t2SMLSLDX:
8466 case ARM::t2SMULL: {
8467 unsigned RdHi = Inst.getOperand(0).getReg();
8468 unsigned RdLo = Inst.getOperand(1).getReg();
8469 if(RdHi == RdLo) {
8470 return Error(Loc,
8471 "unpredictable instruction, RdHi and RdLo must be different");
8472 }
8473 break;
8474 }
8475
8476 case ARM::CDE_CX1:
8477 case ARM::CDE_CX1A:
8478 case ARM::CDE_CX1D:
8479 case ARM::CDE_CX1DA:
8480 case ARM::CDE_CX2:
8481 case ARM::CDE_CX2A:
8482 case ARM::CDE_CX2D:
8483 case ARM::CDE_CX2DA:
8484 case ARM::CDE_CX3:
8485 case ARM::CDE_CX3A:
8486 case ARM::CDE_CX3D:
8487 case ARM::CDE_CX3DA:
8488 case ARM::CDE_VCX1_vec:
8489 case ARM::CDE_VCX1_fpsp:
8490 case ARM::CDE_VCX1_fpdp:
8491 case ARM::CDE_VCX1A_vec:
8492 case ARM::CDE_VCX1A_fpsp:
8493 case ARM::CDE_VCX1A_fpdp:
8494 case ARM::CDE_VCX2_vec:
8495 case ARM::CDE_VCX2_fpsp:
8496 case ARM::CDE_VCX2_fpdp:
8497 case ARM::CDE_VCX2A_vec:
8498 case ARM::CDE_VCX2A_fpsp:
8499 case ARM::CDE_VCX2A_fpdp:
8500 case ARM::CDE_VCX3_vec:
8501 case ARM::CDE_VCX3_fpsp:
8502 case ARM::CDE_VCX3_fpdp:
8503 case ARM::CDE_VCX3A_vec:
8504 case ARM::CDE_VCX3A_fpsp:
8505 case ARM::CDE_VCX3A_fpdp: {
8506 assert(Inst.getOperand(1).isImm() &&
8507 "CDE operand 1 must be a coprocessor ID");
8508 int64_t Coproc = Inst.getOperand(1).getImm();
8509 if (Coproc < 8 && !ARM::isCDECoproc(Coproc, *STI))
8510 return Error(Operands[1]->getStartLoc(),
8511 "coprocessor must be configured as CDE");
8512 else if (Coproc >= 8)
8513 return Error(Operands[1]->getStartLoc(),
8514 "coprocessor must be in the range [p0, p7]");
8515 break;
8516 }
8517
8518 case ARM::t2CDP:
8519 case ARM::t2CDP2:
8520 case ARM::t2LDC2L_OFFSET:
8521 case ARM::t2LDC2L_OPTION:
8522 case ARM::t2LDC2L_POST:
8523 case ARM::t2LDC2L_PRE:
8524 case ARM::t2LDC2_OFFSET:
8525 case ARM::t2LDC2_OPTION:
8526 case ARM::t2LDC2_POST:
8527 case ARM::t2LDC2_PRE:
8528 case ARM::t2LDCL_OFFSET:
8529 case ARM::t2LDCL_OPTION:
8530 case ARM::t2LDCL_POST:
8531 case ARM::t2LDCL_PRE:
8532 case ARM::t2LDC_OFFSET:
8533 case ARM::t2LDC_OPTION:
8534 case ARM::t2LDC_POST:
8535 case ARM::t2LDC_PRE:
8536 case ARM::t2MCR:
8537 case ARM::t2MCR2:
8538 case ARM::t2MCRR:
8539 case ARM::t2MCRR2:
8540 case ARM::t2MRC:
8541 case ARM::t2MRC2:
8542 case ARM::t2MRRC:
8543 case ARM::t2MRRC2:
8544 case ARM::t2STC2L_OFFSET:
8545 case ARM::t2STC2L_OPTION:
8546 case ARM::t2STC2L_POST:
8547 case ARM::t2STC2L_PRE:
8548 case ARM::t2STC2_OFFSET:
8549 case ARM::t2STC2_OPTION:
8550 case ARM::t2STC2_POST:
8551 case ARM::t2STC2_PRE:
8552 case ARM::t2STCL_OFFSET:
8553 case ARM::t2STCL_OPTION:
8554 case ARM::t2STCL_POST:
8555 case ARM::t2STCL_PRE:
8556 case ARM::t2STC_OFFSET:
8557 case ARM::t2STC_OPTION:
8558 case ARM::t2STC_POST:
8559 case ARM::t2STC_PRE: {
8560 unsigned Opcode = Inst.getOpcode();
8561 // Inst.getOperand indexes operands in the (oops ...) and (iops ...) dags,
8562 // CopInd is the index of the coprocessor operand.
8563 size_t CopInd = 0;
8564 if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8565 CopInd = 2;
8566 else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8567 CopInd = 1;
8568 assert(Inst.getOperand(CopInd).isImm() &&
8569 "Operand must be a coprocessor ID");
8570 int64_t Coproc = Inst.getOperand(CopInd).getImm();
8571 // Operands[2] is the coprocessor operand at syntactic level
8572 if (ARM::isCDECoproc(Coproc, *STI))
8573 return Error(Operands[2]->getStartLoc(),
8574 "coprocessor must be configured as GCP");
8575 break;
8576 }
8577 }
8578
8579 return false;
8580}
8581
8582static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
8583 switch(Opc) {
8584 default: llvm_unreachable("unexpected opcode!");
8585 // VST1LN
8586 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
8587 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8588 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8589 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD;
8590 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
8591 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
8592 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8;
8593 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
8594 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
8595
8596 // VST2LN
8597 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
8598 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8599 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8600 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8601 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8602
8603 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD;
8604 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
8605 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
8606 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
8607 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
8608
8609 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8;
8610 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
8611 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
8612 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
8613 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
8614
8615 // VST3LN
8616 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
8617 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8618 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8619 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
8620 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8621 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD;
8622 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
8623 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
8624 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
8625 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
8626 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8;
8627 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
8628 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
8629 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
8630 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
8631
8632 // VST3
8633 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
8634 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8635 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8636 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
8637 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8638 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8639 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD;
8640 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
8641 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
8642 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD;
8643 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
8644 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
8645 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8;
8646 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
8647 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
8648 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8;
8649 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
8650 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
8651
8652 // VST4LN
8653 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
8654 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8655 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8656 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
8657 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8658 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD;
8659 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
8660 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
8661 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
8662 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
8663 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8;
8664 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
8665 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
8666 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
8667 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
8668
8669 // VST4
8670 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
8671 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8672 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8673 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
8674 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8675 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8676 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD;
8677 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
8678 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
8679 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD;
8680 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
8681 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
8682 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8;
8683 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
8684 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
8685 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8;
8686 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
8687 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
8688 }
8689}
8690
8691static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
8692 switch(Opc) {
8693 default: llvm_unreachable("unexpected opcode!");
8694 // VLD1LN
8695 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
8696 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8697 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8698 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD;
8699 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
8700 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
8701 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8;
8702 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
8703 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
8704
8705 // VLD2LN
8706 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
8707 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8708 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8709 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
8710 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8711 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD;
8712 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
8713 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
8714 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
8715 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
8716 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8;
8717 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
8718 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
8719 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
8720 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
8721
8722 // VLD3DUP
8723 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
8724 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8725 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8726 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
8727 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8728 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8729 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD;
8730 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
8731 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
8732 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
8733 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
8734 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
8735 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8;
8736 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
8737 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
8738 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
8739 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
8740 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
8741
8742 // VLD3LN
8743 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
8744 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8745 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8746 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
8747 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8748 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD;
8749 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
8750 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
8751 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
8752 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
8753 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8;
8754 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
8755 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
8756 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
8757 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
8758
8759 // VLD3
8760 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
8761 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8762 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8763 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
8764 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8765 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8766 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD;
8767 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
8768 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
8769 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD;
8770 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
8771 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
8772 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8;
8773 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
8774 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
8775 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8;
8776 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
8777 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
8778
8779 // VLD4LN
8780 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
8781 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8782 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8783 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8784 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8785 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD;
8786 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
8787 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
8788 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
8789 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
8790 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8;
8791 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
8792 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
8793 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
8794 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
8795
8796 // VLD4DUP
8797 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
8798 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8799 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8800 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
8801 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
8802 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8803 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD;
8804 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
8805 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
8806 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
8807 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
8808 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
8809 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8;
8810 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
8811 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
8812 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
8813 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
8814 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
8815
8816 // VLD4
8817 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
8818 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8819 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8820 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
8821 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8822 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8823 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD;
8824 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
8825 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
8826 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD;
8827 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
8828 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
8829 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8;
8830 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
8831 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
8832 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8;
8833 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
8834 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
8835 }
8836}
8837
8838bool ARMAsmParser::processInstruction(MCInst &Inst,
8839 const OperandVector &Operands,
8840 unsigned MnemonicOpsEndInd,
8841 MCStreamer &Out) {
8842 // Check if we have the wide qualifier, because if it's present we
8843 // must avoid selecting a 16-bit thumb instruction.
8844 bool HasWideQualifier = false;
8845 for (auto &Op : Operands) {
8846 ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
8847 if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
8848 HasWideQualifier = true;
8849 break;
8850 }
8851 }
8852
8853 switch (Inst.getOpcode()) {
8854 case ARM::VLLDM:
8855 case ARM::VLSTM: {
8856 // In some cases both T1 and T2 are valid, causing tablegen pick T1 instead
8857 // of T2
8858 if (Operands.size() ==
8859 MnemonicOpsEndInd + 2) { // a register list has been provided
8860 ARMOperand &Op = static_cast<ARMOperand &>(
8861 *Operands[MnemonicOpsEndInd + 1]); // the register list, a dpr_reglist
8862 assert(Op.isDPRRegList());
8863 auto &RegList = Op.getRegList();
8864 // When the register list is {d0-d31} the instruction has to be the T2
8865 // variant
8866 if (RegList.size() == 32) {
8867 const unsigned Opcode =
8868 (Inst.getOpcode() == ARM::VLLDM) ? ARM::VLLDM_T2 : ARM::VLSTM_T2;
8869 MCInst TmpInst;
8870 TmpInst.setOpcode(Opcode);
8871 TmpInst.addOperand(Inst.getOperand(0));
8872 TmpInst.addOperand(Inst.getOperand(1));
8873 TmpInst.addOperand(Inst.getOperand(2));
8874 TmpInst.addOperand(Inst.getOperand(3));
8875 Inst = TmpInst;
8876 return true;
8877 }
8878 }
8879 return false;
8880 }
8881 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
8882 case ARM::LDRT_POST:
8883 case ARM::LDRBT_POST: {
8884 const unsigned Opcode =
8885 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8886 : ARM::LDRBT_POST_IMM;
8887 MCInst TmpInst;
8888 TmpInst.setOpcode(Opcode);
8889 TmpInst.addOperand(Inst.getOperand(0));
8890 TmpInst.addOperand(Inst.getOperand(1));
8891 TmpInst.addOperand(Inst.getOperand(1));
8892 TmpInst.addOperand(MCOperand::createReg(0));
8893 TmpInst.addOperand(MCOperand::createImm(0));
8894 TmpInst.addOperand(Inst.getOperand(2));
8895 TmpInst.addOperand(Inst.getOperand(3));
8896 Inst = TmpInst;
8897 return true;
8898 }
8899 // Alias for 'ldr{sb,h,sh}t Rt, [Rn] {, #imm}' for ommitted immediate.
8900 case ARM::LDRSBTii:
8901 case ARM::LDRHTii:
8902 case ARM::LDRSHTii: {
8903 MCInst TmpInst;
8904
8905 if (Inst.getOpcode() == ARM::LDRSBTii)
8906 TmpInst.setOpcode(ARM::LDRSBTi);
8907 else if (Inst.getOpcode() == ARM::LDRHTii)
8908 TmpInst.setOpcode(ARM::LDRHTi);
8909 else if (Inst.getOpcode() == ARM::LDRSHTii)
8910 TmpInst.setOpcode(ARM::LDRSHTi);
8911 TmpInst.addOperand(Inst.getOperand(0));
8912 TmpInst.addOperand(Inst.getOperand(1));
8913 TmpInst.addOperand(Inst.getOperand(1));
8914 TmpInst.addOperand(MCOperand::createImm(256));
8915 TmpInst.addOperand(Inst.getOperand(2));
8916 Inst = TmpInst;
8917 return true;
8918 }
8919 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
8920 case ARM::STRT_POST:
8921 case ARM::STRBT_POST: {
8922 const unsigned Opcode =
8923 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
8924 : ARM::STRBT_POST_IMM;
8925 MCInst TmpInst;
8926 TmpInst.setOpcode(Opcode);
8927 TmpInst.addOperand(Inst.getOperand(1));
8928 TmpInst.addOperand(Inst.getOperand(0));
8929 TmpInst.addOperand(Inst.getOperand(1));
8930 TmpInst.addOperand(MCOperand::createReg(0));
8931 TmpInst.addOperand(MCOperand::createImm(0));
8932 TmpInst.addOperand(Inst.getOperand(2));
8933 TmpInst.addOperand(Inst.getOperand(3));
8934 Inst = TmpInst;
8935 return true;
8936 }
8937 // Alias for alternate form of 'ADR Rd, #imm' instruction.
8938 case ARM::ADDri: {
8939 if (Inst.getOperand(1).getReg() != ARM::PC ||
8940 Inst.getOperand(5).getReg() != 0 ||
8941 !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
8942 return false;
8943 MCInst TmpInst;
8944 TmpInst.setOpcode(ARM::ADR);
8945 TmpInst.addOperand(Inst.getOperand(0));
8946 if (Inst.getOperand(2).isImm()) {
8947 // Immediate (mod_imm) will be in its encoded form, we must unencode it
8948 // before passing it to the ADR instruction.
8949 unsigned Enc = Inst.getOperand(2).getImm();
8951 llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
8952 } else {
8953 // Turn PC-relative expression into absolute expression.
8954 // Reading PC provides the start of the current instruction + 8 and
8955 // the transform to adr is biased by that.
8956 MCSymbol *Dot = getContext().createTempSymbol();
8957 Out.emitLabel(Dot);
8958 const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
8959 const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
8961 getContext());
8962 const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
8963 const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
8964 getContext());
8965 const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
8966 getContext());
8967 TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
8968 }
8969 TmpInst.addOperand(Inst.getOperand(3));
8970 TmpInst.addOperand(Inst.getOperand(4));
8971 Inst = TmpInst;
8972 return true;
8973 }
8974 // Aliases for imm syntax of LDR instructions.
8975 case ARM::t2LDR_PRE_imm:
8976 case ARM::t2LDR_POST_imm: {
8977 MCInst TmpInst;
8978 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDR_PRE_imm ? ARM::t2LDR_PRE
8979 : ARM::t2LDR_POST);
8980 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8981 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8982 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8983 TmpInst.addOperand(Inst.getOperand(2)); // imm
8984 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8985 Inst = TmpInst;
8986 return true;
8987 }
8988 // Aliases for imm syntax of STR instructions.
8989 case ARM::t2STR_PRE_imm:
8990 case ARM::t2STR_POST_imm: {
8991 MCInst TmpInst;
8992 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STR_PRE_imm ? ARM::t2STR_PRE
8993 : ARM::t2STR_POST);
8994 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
8995 TmpInst.addOperand(Inst.getOperand(0)); // Rt
8996 TmpInst.addOperand(Inst.getOperand(1)); // Rn
8997 TmpInst.addOperand(Inst.getOperand(2)); // imm
8998 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8999 Inst = TmpInst;
9000 return true;
9001 }
9002 // Aliases for imm syntax of LDRB instructions.
9003 case ARM::t2LDRB_OFFSET_imm: {
9004 MCInst TmpInst;
9005 TmpInst.setOpcode(ARM::t2LDRBi8);
9006 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9007 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9008 TmpInst.addOperand(Inst.getOperand(2)); // imm
9009 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9010 Inst = TmpInst;
9011 return true;
9012 }
9013 case ARM::t2LDRB_PRE_imm:
9014 case ARM::t2LDRB_POST_imm: {
9015 MCInst TmpInst;
9016 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRB_PRE_imm
9017 ? ARM::t2LDRB_PRE
9018 : ARM::t2LDRB_POST);
9019 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9020 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9021 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9022 TmpInst.addOperand(Inst.getOperand(2)); // imm
9023 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9024 Inst = TmpInst;
9025 return true;
9026 }
9027 // Aliases for imm syntax of STRB instructions.
9028 case ARM::t2STRB_OFFSET_imm: {
9029 MCInst TmpInst;
9030 TmpInst.setOpcode(ARM::t2STRBi8);
9031 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9032 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9033 TmpInst.addOperand(Inst.getOperand(2)); // imm
9034 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9035 Inst = TmpInst;
9036 return true;
9037 }
9038 case ARM::t2STRB_PRE_imm:
9039 case ARM::t2STRB_POST_imm: {
9040 MCInst TmpInst;
9041 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRB_PRE_imm
9042 ? ARM::t2STRB_PRE
9043 : ARM::t2STRB_POST);
9044 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9045 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9046 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9047 TmpInst.addOperand(Inst.getOperand(2)); // imm
9048 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9049 Inst = TmpInst;
9050 return true;
9051 }
9052 // Aliases for imm syntax of LDRH instructions.
9053 case ARM::t2LDRH_OFFSET_imm: {
9054 MCInst TmpInst;
9055 TmpInst.setOpcode(ARM::t2LDRHi8);
9056 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9057 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9058 TmpInst.addOperand(Inst.getOperand(2)); // imm
9059 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9060 Inst = TmpInst;
9061 return true;
9062 }
9063 case ARM::t2LDRH_PRE_imm:
9064 case ARM::t2LDRH_POST_imm: {
9065 MCInst TmpInst;
9066 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRH_PRE_imm
9067 ? ARM::t2LDRH_PRE
9068 : ARM::t2LDRH_POST);
9069 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9070 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9071 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9072 TmpInst.addOperand(Inst.getOperand(2)); // imm
9073 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9074 Inst = TmpInst;
9075 return true;
9076 }
9077 // Aliases for imm syntax of STRH instructions.
9078 case ARM::t2STRH_OFFSET_imm: {
9079 MCInst TmpInst;
9080 TmpInst.setOpcode(ARM::t2STRHi8);
9081 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9082 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9083 TmpInst.addOperand(Inst.getOperand(2)); // imm
9084 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9085 Inst = TmpInst;
9086 return true;
9087 }
9088 case ARM::t2STRH_PRE_imm:
9089 case ARM::t2STRH_POST_imm: {
9090 MCInst TmpInst;
9091 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2STRH_PRE_imm
9092 ? ARM::t2STRH_PRE
9093 : ARM::t2STRH_POST);
9094 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9095 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9096 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9097 TmpInst.addOperand(Inst.getOperand(2)); // imm
9098 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9099 Inst = TmpInst;
9100 return true;
9101 }
9102 // Aliases for imm syntax of LDRSB instructions.
9103 case ARM::t2LDRSB_OFFSET_imm: {
9104 MCInst TmpInst;
9105 TmpInst.setOpcode(ARM::t2LDRSBi8);
9106 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9107 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9108 TmpInst.addOperand(Inst.getOperand(2)); // imm
9109 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9110 Inst = TmpInst;
9111 return true;
9112 }
9113 case ARM::t2LDRSB_PRE_imm:
9114 case ARM::t2LDRSB_POST_imm: {
9115 MCInst TmpInst;
9116 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSB_PRE_imm
9117 ? ARM::t2LDRSB_PRE
9118 : ARM::t2LDRSB_POST);
9119 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9120 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9121 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9122 TmpInst.addOperand(Inst.getOperand(2)); // imm
9123 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9124 Inst = TmpInst;
9125 return true;
9126 }
9127 // Aliases for imm syntax of LDRSH instructions.
9128 case ARM::t2LDRSH_OFFSET_imm: {
9129 MCInst TmpInst;
9130 TmpInst.setOpcode(ARM::t2LDRSHi8);
9131 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9132 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9133 TmpInst.addOperand(Inst.getOperand(2)); // imm
9134 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9135 Inst = TmpInst;
9136 return true;
9137 }
9138 case ARM::t2LDRSH_PRE_imm:
9139 case ARM::t2LDRSH_POST_imm: {
9140 MCInst TmpInst;
9141 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2LDRSH_PRE_imm
9142 ? ARM::t2LDRSH_PRE
9143 : ARM::t2LDRSH_POST);
9144 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9145 TmpInst.addOperand(Inst.getOperand(4)); // Rt_wb
9146 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9147 TmpInst.addOperand(Inst.getOperand(2)); // imm
9148 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9149 Inst = TmpInst;
9150 return true;
9151 }
9152 // Aliases for alternate PC+imm syntax of LDR instructions.
9153 case ARM::t2LDRpcrel:
9154 // Select the narrow version if the immediate will fit.
9155 if (Inst.getOperand(1).getImm() > 0 &&
9156 Inst.getOperand(1).getImm() <= 0xff &&
9157 !HasWideQualifier)
9158 Inst.setOpcode(ARM::tLDRpci);
9159 else
9160 Inst.setOpcode(ARM::t2LDRpci);
9161 return true;
9162 case ARM::t2LDRBpcrel:
9163 Inst.setOpcode(ARM::t2LDRBpci);
9164 return true;
9165 case ARM::t2LDRHpcrel:
9166 Inst.setOpcode(ARM::t2LDRHpci);
9167 return true;
9168 case ARM::t2LDRSBpcrel:
9169 Inst.setOpcode(ARM::t2LDRSBpci);
9170 return true;
9171 case ARM::t2LDRSHpcrel:
9172 Inst.setOpcode(ARM::t2LDRSHpci);
9173 return true;
9174 case ARM::LDRConstPool:
9175 case ARM::tLDRConstPool:
9176 case ARM::t2LDRConstPool: {
9177 // Pseudo instruction ldr rt, =immediate is converted to a
9178 // MOV rt, immediate if immediate is known and representable
9179 // otherwise we create a constant pool entry that we load from.
9180 MCInst TmpInst;
9181 if (Inst.getOpcode() == ARM::LDRConstPool)
9182 TmpInst.setOpcode(ARM::LDRi12);
9183 else if (Inst.getOpcode() == ARM::tLDRConstPool)
9184 TmpInst.setOpcode(ARM::tLDRpci);
9185 else if (Inst.getOpcode() == ARM::t2LDRConstPool)
9186 TmpInst.setOpcode(ARM::t2LDRpci);
9187 const ARMOperand &PoolOperand =
9188 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1]);
9189 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9190 // If SubExprVal is a constant we may be able to use a MOV
9191 if (isa<MCConstantExpr>(SubExprVal) &&
9192 Inst.getOperand(0).getReg() != ARM::PC &&
9193 Inst.getOperand(0).getReg() != ARM::SP) {
9194 int64_t Value =
9195 (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9196 bool UseMov = true;
9197 bool MovHasS = true;
9198 if (Inst.getOpcode() == ARM::LDRConstPool) {
9199 // ARM Constant
9200 if (ARM_AM::getSOImmVal(Value) != -1) {
9202 TmpInst.setOpcode(ARM::MOVi);
9203 }
9204 else if (ARM_AM::getSOImmVal(~Value) != -1) {
9206 TmpInst.setOpcode(ARM::MVNi);
9207 }
9208 else if (hasV6T2Ops() &&
9209 Value >=0 && Value < 65536) {
9210 TmpInst.setOpcode(ARM::MOVi16);
9211 MovHasS = false;
9212 }
9213 else
9214 UseMov = false;
9215 }
9216 else {
9217 // Thumb/Thumb2 Constant
9218 if (hasThumb2() &&
9220 TmpInst.setOpcode(ARM::t2MOVi);
9221 else if (hasThumb2() &&
9222 ARM_AM::getT2SOImmVal(~Value) != -1) {
9223 TmpInst.setOpcode(ARM::t2MVNi);
9224 Value = ~Value;
9225 }
9226 else if (hasV8MBaseline() &&
9227 Value >=0 && Value < 65536) {
9228 TmpInst.setOpcode(ARM::t2MOVi16);
9229 MovHasS = false;
9230 }
9231 else
9232 UseMov = false;
9233 }
9234 if (UseMov) {
9235 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9236 TmpInst.addOperand(MCOperand::createImm(Value)); // Immediate
9237 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9238 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9239 if (MovHasS)
9240 TmpInst.addOperand(MCOperand::createReg(0)); // S
9241 Inst = TmpInst;
9242 return true;
9243 }
9244 }
9245 // No opportunity to use MOV/MVN create constant pool
9246 const MCExpr *CPLoc =
9247 getTargetStreamer().addConstantPoolEntry(SubExprVal,
9248 PoolOperand.getStartLoc());
9249 TmpInst.addOperand(Inst.getOperand(0)); // Rt
9250 TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
9251 if (TmpInst.getOpcode() == ARM::LDRi12)
9252 TmpInst.addOperand(MCOperand::createImm(0)); // unused offset
9253 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9254 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9255 Inst = TmpInst;
9256 return true;
9257 }
9258 // Handle NEON VST complex aliases.
9259 case ARM::VST1LNdWB_register_Asm_8:
9260 case ARM::VST1LNdWB_register_Asm_16:
9261 case ARM::VST1LNdWB_register_Asm_32: {
9262 MCInst TmpInst;
9263 // Shuffle the operands around so the lane index operand is in the
9264 // right place.
9265 unsigned Spacing;
9266 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9267 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9268 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9269 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9270 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9271 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9272 TmpInst.addOperand(Inst.getOperand(1)); // lane
9273 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9274 TmpInst.addOperand(Inst.getOperand(6));
9275 Inst = TmpInst;
9276 return true;
9277 }
9278
9279 case ARM::VST2LNdWB_register_Asm_8:
9280 case ARM::VST2LNdWB_register_Asm_16:
9281 case ARM::VST2LNdWB_register_Asm_32:
9282 case ARM::VST2LNqWB_register_Asm_16:
9283 case ARM::VST2LNqWB_register_Asm_32: {
9284 MCInst TmpInst;
9285 // Shuffle the operands around so the lane index operand is in the
9286 // right place.
9287 unsigned Spacing;
9288 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9289 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9290 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9291 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9292 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9293 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9295 Spacing));
9296 TmpInst.addOperand(Inst.getOperand(1)); // lane
9297 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9298 TmpInst.addOperand(Inst.getOperand(6));
9299 Inst = TmpInst;
9300 return true;
9301 }
9302
9303 case ARM::VST3LNdWB_register_Asm_8:
9304 case ARM::VST3LNdWB_register_Asm_16:
9305 case ARM::VST3LNdWB_register_Asm_32:
9306 case ARM::VST3LNqWB_register_Asm_16:
9307 case ARM::VST3LNqWB_register_Asm_32: {
9308 MCInst TmpInst;
9309 // Shuffle the operands around so the lane index operand is in the
9310 // right place.
9311 unsigned Spacing;
9312 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9313 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9314 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9315 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9316 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9317 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9319 Spacing));
9321 Spacing * 2));
9322 TmpInst.addOperand(Inst.getOperand(1)); // lane
9323 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9324 TmpInst.addOperand(Inst.getOperand(6));
9325 Inst = TmpInst;
9326 return true;
9327 }
9328
9329 case ARM::VST4LNdWB_register_Asm_8:
9330 case ARM::VST4LNdWB_register_Asm_16:
9331 case ARM::VST4LNdWB_register_Asm_32:
9332 case ARM::VST4LNqWB_register_Asm_16:
9333 case ARM::VST4LNqWB_register_Asm_32: {
9334 MCInst TmpInst;
9335 // Shuffle the operands around so the lane index operand is in the
9336 // right place.
9337 unsigned Spacing;
9338 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9339 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9340 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9341 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9342 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9343 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9345 Spacing));
9347 Spacing * 2));
9349 Spacing * 3));
9350 TmpInst.addOperand(Inst.getOperand(1)); // lane
9351 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9352 TmpInst.addOperand(Inst.getOperand(6));
9353 Inst = TmpInst;
9354 return true;
9355 }
9356
9357 case ARM::VST1LNdWB_fixed_Asm_8:
9358 case ARM::VST1LNdWB_fixed_Asm_16:
9359 case ARM::VST1LNdWB_fixed_Asm_32: {
9360 MCInst TmpInst;
9361 // Shuffle the operands around so the lane index operand is in the
9362 // right place.
9363 unsigned Spacing;
9364 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9365 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9366 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9367 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9368 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9369 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9370 TmpInst.addOperand(Inst.getOperand(1)); // lane
9371 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9372 TmpInst.addOperand(Inst.getOperand(5));
9373 Inst = TmpInst;
9374 return true;
9375 }
9376
9377 case ARM::VST2LNdWB_fixed_Asm_8:
9378 case ARM::VST2LNdWB_fixed_Asm_16:
9379 case ARM::VST2LNdWB_fixed_Asm_32:
9380 case ARM::VST2LNqWB_fixed_Asm_16:
9381 case ARM::VST2LNqWB_fixed_Asm_32: {
9382 MCInst TmpInst;
9383 // Shuffle the operands around so the lane index operand is in the
9384 // right place.
9385 unsigned Spacing;
9386 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9387 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9388 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9389 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9390 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9391 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9393 Spacing));
9394 TmpInst.addOperand(Inst.getOperand(1)); // lane
9395 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9396 TmpInst.addOperand(Inst.getOperand(5));
9397 Inst = TmpInst;
9398 return true;
9399 }
9400
9401 case ARM::VST3LNdWB_fixed_Asm_8:
9402 case ARM::VST3LNdWB_fixed_Asm_16:
9403 case ARM::VST3LNdWB_fixed_Asm_32:
9404 case ARM::VST3LNqWB_fixed_Asm_16:
9405 case ARM::VST3LNqWB_fixed_Asm_32: {
9406 MCInst TmpInst;
9407 // Shuffle the operands around so the lane index operand is in the
9408 // right place.
9409 unsigned Spacing;
9410 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9411 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9412 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9413 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9414 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9415 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9417 Spacing));
9419 Spacing * 2));
9420 TmpInst.addOperand(Inst.getOperand(1)); // lane
9421 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9422 TmpInst.addOperand(Inst.getOperand(5));
9423 Inst = TmpInst;
9424 return true;
9425 }
9426
9427 case ARM::VST4LNdWB_fixed_Asm_8:
9428 case ARM::VST4LNdWB_fixed_Asm_16:
9429 case ARM::VST4LNdWB_fixed_Asm_32:
9430 case ARM::VST4LNqWB_fixed_Asm_16:
9431 case ARM::VST4LNqWB_fixed_Asm_32: {
9432 MCInst TmpInst;
9433 // Shuffle the operands around so the lane index operand is in the
9434 // right place.
9435 unsigned Spacing;
9436 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9437 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9438 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9439 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9440 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9441 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9443 Spacing));
9445 Spacing * 2));
9447 Spacing * 3));
9448 TmpInst.addOperand(Inst.getOperand(1)); // lane
9449 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9450 TmpInst.addOperand(Inst.getOperand(5));
9451 Inst = TmpInst;
9452 return true;
9453 }
9454
9455 case ARM::VST1LNdAsm_8:
9456 case ARM::VST1LNdAsm_16:
9457 case ARM::VST1LNdAsm_32: {
9458 MCInst TmpInst;
9459 // Shuffle the operands around so the lane index operand is in the
9460 // right place.
9461 unsigned Spacing;
9462 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9463 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9464 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9465 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9466 TmpInst.addOperand(Inst.getOperand(1)); // lane
9467 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9468 TmpInst.addOperand(Inst.getOperand(5));
9469 Inst = TmpInst;
9470 return true;
9471 }
9472
9473 case ARM::VST2LNdAsm_8:
9474 case ARM::VST2LNdAsm_16:
9475 case ARM::VST2LNdAsm_32:
9476 case ARM::VST2LNqAsm_16:
9477 case ARM::VST2LNqAsm_32: {
9478 MCInst TmpInst;
9479 // Shuffle the operands around so the lane index operand is in the
9480 // right place.
9481 unsigned Spacing;
9482 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9483 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9484 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9485 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9487 Spacing));
9488 TmpInst.addOperand(Inst.getOperand(1)); // lane
9489 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9490 TmpInst.addOperand(Inst.getOperand(5));
9491 Inst = TmpInst;
9492 return true;
9493 }
9494
9495 case ARM::VST3LNdAsm_8:
9496 case ARM::VST3LNdAsm_16:
9497 case ARM::VST3LNdAsm_32:
9498 case ARM::VST3LNqAsm_16:
9499 case ARM::VST3LNqAsm_32: {
9500 MCInst TmpInst;
9501 // Shuffle the operands around so the lane index operand is in the
9502 // right place.
9503 unsigned Spacing;
9504 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9505 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9506 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9507 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9509 Spacing));
9511 Spacing * 2));
9512 TmpInst.addOperand(Inst.getOperand(1)); // lane
9513 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9514 TmpInst.addOperand(Inst.getOperand(5));
9515 Inst = TmpInst;
9516 return true;
9517 }
9518
9519 case ARM::VST4LNdAsm_8:
9520 case ARM::VST4LNdAsm_16:
9521 case ARM::VST4LNdAsm_32:
9522 case ARM::VST4LNqAsm_16:
9523 case ARM::VST4LNqAsm_32: {
9524 MCInst TmpInst;
9525 // Shuffle the operands around so the lane index operand is in the
9526 // right place.
9527 unsigned Spacing;
9528 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
9529 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9530 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9531 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9533 Spacing));
9535 Spacing * 2));
9537 Spacing * 3));
9538 TmpInst.addOperand(Inst.getOperand(1)); // lane
9539 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9540 TmpInst.addOperand(Inst.getOperand(5));
9541 Inst = TmpInst;
9542 return true;
9543 }
9544
9545 // Handle NEON VLD complex aliases.
9546 case ARM::VLD1LNdWB_register_Asm_8:
9547 case ARM::VLD1LNdWB_register_Asm_16:
9548 case ARM::VLD1LNdWB_register_Asm_32: {
9549 MCInst TmpInst;
9550 // Shuffle the operands around so the lane index operand is in the
9551 // right place.
9552 unsigned Spacing;
9553 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9554 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9555 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9556 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9557 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9558 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9559 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9560 TmpInst.addOperand(Inst.getOperand(1)); // lane
9561 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9562 TmpInst.addOperand(Inst.getOperand(6));
9563 Inst = TmpInst;
9564 return true;
9565 }
9566
9567 case ARM::VLD2LNdWB_register_Asm_8:
9568 case ARM::VLD2LNdWB_register_Asm_16:
9569 case ARM::VLD2LNdWB_register_Asm_32:
9570 case ARM::VLD2LNqWB_register_Asm_16:
9571 case ARM::VLD2LNqWB_register_Asm_32: {
9572 MCInst TmpInst;
9573 // Shuffle the operands around so the lane index operand is in the
9574 // right place.
9575 unsigned Spacing;
9576 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9577 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9579 Spacing));
9580 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9581 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9582 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9583 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9584 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9586 Spacing));
9587 TmpInst.addOperand(Inst.getOperand(1)); // lane
9588 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9589 TmpInst.addOperand(Inst.getOperand(6));
9590 Inst = TmpInst;
9591 return true;
9592 }
9593
9594 case ARM::VLD3LNdWB_register_Asm_8:
9595 case ARM::VLD3LNdWB_register_Asm_16:
9596 case ARM::VLD3LNdWB_register_Asm_32:
9597 case ARM::VLD3LNqWB_register_Asm_16:
9598 case ARM::VLD3LNqWB_register_Asm_32: {
9599 MCInst TmpInst;
9600 // Shuffle the operands around so the lane index operand is in the
9601 // right place.
9602 unsigned Spacing;
9603 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9604 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9606 Spacing));
9608 Spacing * 2));
9609 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9610 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9611 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9612 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9613 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9615 Spacing));
9617 Spacing * 2));
9618 TmpInst.addOperand(Inst.getOperand(1)); // lane
9619 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9620 TmpInst.addOperand(Inst.getOperand(6));
9621 Inst = TmpInst;
9622 return true;
9623 }
9624
9625 case ARM::VLD4LNdWB_register_Asm_8:
9626 case ARM::VLD4LNdWB_register_Asm_16:
9627 case ARM::VLD4LNdWB_register_Asm_32:
9628 case ARM::VLD4LNqWB_register_Asm_16:
9629 case ARM::VLD4LNqWB_register_Asm_32: {
9630 MCInst TmpInst;
9631 // Shuffle the operands around so the lane index operand is in the
9632 // right place.
9633 unsigned Spacing;
9634 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9635 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9637 Spacing));
9639 Spacing * 2));
9641 Spacing * 3));
9642 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9643 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9644 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9645 TmpInst.addOperand(Inst.getOperand(4)); // Rm
9646 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9648 Spacing));
9650 Spacing * 2));
9652 Spacing * 3));
9653 TmpInst.addOperand(Inst.getOperand(1)); // lane
9654 TmpInst.addOperand(Inst.getOperand(5)); // CondCode
9655 TmpInst.addOperand(Inst.getOperand(6));
9656 Inst = TmpInst;
9657 return true;
9658 }
9659
9660 case ARM::VLD1LNdWB_fixed_Asm_8:
9661 case ARM::VLD1LNdWB_fixed_Asm_16:
9662 case ARM::VLD1LNdWB_fixed_Asm_32: {
9663 MCInst TmpInst;
9664 // Shuffle the operands around so the lane index operand is in the
9665 // right place.
9666 unsigned Spacing;
9667 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9668 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9669 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9670 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9671 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9672 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9673 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9674 TmpInst.addOperand(Inst.getOperand(1)); // lane
9675 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9676 TmpInst.addOperand(Inst.getOperand(5));
9677 Inst = TmpInst;
9678 return true;
9679 }
9680
9681 case ARM::VLD2LNdWB_fixed_Asm_8:
9682 case ARM::VLD2LNdWB_fixed_Asm_16:
9683 case ARM::VLD2LNdWB_fixed_Asm_32:
9684 case ARM::VLD2LNqWB_fixed_Asm_16:
9685 case ARM::VLD2LNqWB_fixed_Asm_32: {
9686 MCInst TmpInst;
9687 // Shuffle the operands around so the lane index operand is in the
9688 // right place.
9689 unsigned Spacing;
9690 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9691 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9693 Spacing));
9694 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9695 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9696 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9697 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9698 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9700 Spacing));
9701 TmpInst.addOperand(Inst.getOperand(1)); // lane
9702 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9703 TmpInst.addOperand(Inst.getOperand(5));
9704 Inst = TmpInst;
9705 return true;
9706 }
9707
9708 case ARM::VLD3LNdWB_fixed_Asm_8:
9709 case ARM::VLD3LNdWB_fixed_Asm_16:
9710 case ARM::VLD3LNdWB_fixed_Asm_32:
9711 case ARM::VLD3LNqWB_fixed_Asm_16:
9712 case ARM::VLD3LNqWB_fixed_Asm_32: {
9713 MCInst TmpInst;
9714 // Shuffle the operands around so the lane index operand is in the
9715 // right place.
9716 unsigned Spacing;
9717 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9718 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9720 Spacing));
9722 Spacing * 2));
9723 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9724 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9725 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9726 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9727 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9729 Spacing));
9731 Spacing * 2));
9732 TmpInst.addOperand(Inst.getOperand(1)); // lane
9733 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9734 TmpInst.addOperand(Inst.getOperand(5));
9735 Inst = TmpInst;
9736 return true;
9737 }
9738
9739 case ARM::VLD4LNdWB_fixed_Asm_8:
9740 case ARM::VLD4LNdWB_fixed_Asm_16:
9741 case ARM::VLD4LNdWB_fixed_Asm_32:
9742 case ARM::VLD4LNqWB_fixed_Asm_16:
9743 case ARM::VLD4LNqWB_fixed_Asm_32: {
9744 MCInst TmpInst;
9745 // Shuffle the operands around so the lane index operand is in the
9746 // right place.
9747 unsigned Spacing;
9748 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9749 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9751 Spacing));
9753 Spacing * 2));
9755 Spacing * 3));
9756 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
9757 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9758 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9759 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9760 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9762 Spacing));
9764 Spacing * 2));
9766 Spacing * 3));
9767 TmpInst.addOperand(Inst.getOperand(1)); // lane
9768 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9769 TmpInst.addOperand(Inst.getOperand(5));
9770 Inst = TmpInst;
9771 return true;
9772 }
9773
9774 case ARM::VLD1LNdAsm_8:
9775 case ARM::VLD1LNdAsm_16:
9776 case ARM::VLD1LNdAsm_32: {
9777 MCInst TmpInst;
9778 // Shuffle the operands around so the lane index operand is in the
9779 // right place.
9780 unsigned Spacing;
9781 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9782 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9783 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9784 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9785 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9786 TmpInst.addOperand(Inst.getOperand(1)); // lane
9787 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9788 TmpInst.addOperand(Inst.getOperand(5));
9789 Inst = TmpInst;
9790 return true;
9791 }
9792
9793 case ARM::VLD2LNdAsm_8:
9794 case ARM::VLD2LNdAsm_16:
9795 case ARM::VLD2LNdAsm_32:
9796 case ARM::VLD2LNqAsm_16:
9797 case ARM::VLD2LNqAsm_32: {
9798 MCInst TmpInst;
9799 // Shuffle the operands around so the lane index operand is in the
9800 // right place.
9801 unsigned Spacing;
9802 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9803 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9805 Spacing));
9806 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9807 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9808 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9810 Spacing));
9811 TmpInst.addOperand(Inst.getOperand(1)); // lane
9812 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9813 TmpInst.addOperand(Inst.getOperand(5));
9814 Inst = TmpInst;
9815 return true;
9816 }
9817
9818 case ARM::VLD3LNdAsm_8:
9819 case ARM::VLD3LNdAsm_16:
9820 case ARM::VLD3LNdAsm_32:
9821 case ARM::VLD3LNqAsm_16:
9822 case ARM::VLD3LNqAsm_32: {
9823 MCInst TmpInst;
9824 // Shuffle the operands around so the lane index operand is in the
9825 // right place.
9826 unsigned Spacing;
9827 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9828 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9830 Spacing));
9832 Spacing * 2));
9833 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9834 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9835 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9837 Spacing));
9839 Spacing * 2));
9840 TmpInst.addOperand(Inst.getOperand(1)); // lane
9841 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9842 TmpInst.addOperand(Inst.getOperand(5));
9843 Inst = TmpInst;
9844 return true;
9845 }
9846
9847 case ARM::VLD4LNdAsm_8:
9848 case ARM::VLD4LNdAsm_16:
9849 case ARM::VLD4LNdAsm_32:
9850 case ARM::VLD4LNqAsm_16:
9851 case ARM::VLD4LNqAsm_32: {
9852 MCInst TmpInst;
9853 // Shuffle the operands around so the lane index operand is in the
9854 // right place.
9855 unsigned Spacing;
9856 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9857 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9859 Spacing));
9861 Spacing * 2));
9863 Spacing * 3));
9864 TmpInst.addOperand(Inst.getOperand(2)); // Rn
9865 TmpInst.addOperand(Inst.getOperand(3)); // alignment
9866 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
9868 Spacing));
9870 Spacing * 2));
9872 Spacing * 3));
9873 TmpInst.addOperand(Inst.getOperand(1)); // lane
9874 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9875 TmpInst.addOperand(Inst.getOperand(5));
9876 Inst = TmpInst;
9877 return true;
9878 }
9879
9880 // VLD3DUP single 3-element structure to all lanes instructions.
9881 case ARM::VLD3DUPdAsm_8:
9882 case ARM::VLD3DUPdAsm_16:
9883 case ARM::VLD3DUPdAsm_32:
9884 case ARM::VLD3DUPqAsm_8:
9885 case ARM::VLD3DUPqAsm_16:
9886 case ARM::VLD3DUPqAsm_32: {
9887 MCInst TmpInst;
9888 unsigned Spacing;
9889 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9890 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9892 Spacing));
9894 Spacing * 2));
9895 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9896 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9897 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9898 TmpInst.addOperand(Inst.getOperand(4));
9899 Inst = TmpInst;
9900 return true;
9901 }
9902
9903 case ARM::VLD3DUPdWB_fixed_Asm_8:
9904 case ARM::VLD3DUPdWB_fixed_Asm_16:
9905 case ARM::VLD3DUPdWB_fixed_Asm_32:
9906 case ARM::VLD3DUPqWB_fixed_Asm_8:
9907 case ARM::VLD3DUPqWB_fixed_Asm_16:
9908 case ARM::VLD3DUPqWB_fixed_Asm_32: {
9909 MCInst TmpInst;
9910 unsigned Spacing;
9911 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9912 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9914 Spacing));
9916 Spacing * 2));
9917 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9918 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9919 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9920 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9921 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9922 TmpInst.addOperand(Inst.getOperand(4));
9923 Inst = TmpInst;
9924 return true;
9925 }
9926
9927 case ARM::VLD3DUPdWB_register_Asm_8:
9928 case ARM::VLD3DUPdWB_register_Asm_16:
9929 case ARM::VLD3DUPdWB_register_Asm_32:
9930 case ARM::VLD3DUPqWB_register_Asm_8:
9931 case ARM::VLD3DUPqWB_register_Asm_16:
9932 case ARM::VLD3DUPqWB_register_Asm_32: {
9933 MCInst TmpInst;
9934 unsigned Spacing;
9935 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9936 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9938 Spacing));
9940 Spacing * 2));
9941 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9942 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9943 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9944 TmpInst.addOperand(Inst.getOperand(3)); // Rm
9945 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9946 TmpInst.addOperand(Inst.getOperand(5));
9947 Inst = TmpInst;
9948 return true;
9949 }
9950
9951 // VLD3 multiple 3-element structure instructions.
9952 case ARM::VLD3dAsm_8:
9953 case ARM::VLD3dAsm_16:
9954 case ARM::VLD3dAsm_32:
9955 case ARM::VLD3qAsm_8:
9956 case ARM::VLD3qAsm_16:
9957 case ARM::VLD3qAsm_32: {
9958 MCInst TmpInst;
9959 unsigned Spacing;
9960 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9961 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9963 Spacing));
9965 Spacing * 2));
9966 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9967 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9968 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9969 TmpInst.addOperand(Inst.getOperand(4));
9970 Inst = TmpInst;
9971 return true;
9972 }
9973
9974 case ARM::VLD3dWB_fixed_Asm_8:
9975 case ARM::VLD3dWB_fixed_Asm_16:
9976 case ARM::VLD3dWB_fixed_Asm_32:
9977 case ARM::VLD3qWB_fixed_Asm_8:
9978 case ARM::VLD3qWB_fixed_Asm_16:
9979 case ARM::VLD3qWB_fixed_Asm_32: {
9980 MCInst TmpInst;
9981 unsigned Spacing;
9982 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
9983 TmpInst.addOperand(Inst.getOperand(0)); // Vd
9985 Spacing));
9987 Spacing * 2));
9988 TmpInst.addOperand(Inst.getOperand(1)); // Rn
9989 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
9990 TmpInst.addOperand(Inst.getOperand(2)); // alignment
9991 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
9992 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9993 TmpInst.addOperand(Inst.getOperand(4));
9994 Inst = TmpInst;
9995 return true;
9996 }
9997
9998 case ARM::VLD3dWB_register_Asm_8:
9999 case ARM::VLD3dWB_register_Asm_16:
10000 case ARM::VLD3dWB_register_Asm_32:
10001 case ARM::VLD3qWB_register_Asm_8:
10002 case ARM::VLD3qWB_register_Asm_16:
10003 case ARM::VLD3qWB_register_Asm_32: {
10004 MCInst TmpInst;
10005 unsigned Spacing;
10006 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10007 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10009 Spacing));
10011 Spacing * 2));
10012 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10013 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10014 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10015 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10016 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10017 TmpInst.addOperand(Inst.getOperand(5));
10018 Inst = TmpInst;
10019 return true;
10020 }
10021
10022 // VLD4DUP single 3-element structure to all lanes instructions.
10023 case ARM::VLD4DUPdAsm_8:
10024 case ARM::VLD4DUPdAsm_16:
10025 case ARM::VLD4DUPdAsm_32:
10026 case ARM::VLD4DUPqAsm_8:
10027 case ARM::VLD4DUPqAsm_16:
10028 case ARM::VLD4DUPqAsm_32: {
10029 MCInst TmpInst;
10030 unsigned Spacing;
10031 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10032 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10034 Spacing));
10036 Spacing * 2));
10038 Spacing * 3));
10039 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10040 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10041 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10042 TmpInst.addOperand(Inst.getOperand(4));
10043 Inst = TmpInst;
10044 return true;
10045 }
10046
10047 case ARM::VLD4DUPdWB_fixed_Asm_8:
10048 case ARM::VLD4DUPdWB_fixed_Asm_16:
10049 case ARM::VLD4DUPdWB_fixed_Asm_32:
10050 case ARM::VLD4DUPqWB_fixed_Asm_8:
10051 case ARM::VLD4DUPqWB_fixed_Asm_16:
10052 case ARM::VLD4DUPqWB_fixed_Asm_32: {
10053 MCInst TmpInst;
10054 unsigned Spacing;
10055 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10056 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10058 Spacing));
10060 Spacing * 2));
10062 Spacing * 3));
10063 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10064 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10065 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10066 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10067 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10068 TmpInst.addOperand(Inst.getOperand(4));
10069 Inst = TmpInst;
10070 return true;
10071 }
10072
10073 case ARM::VLD4DUPdWB_register_Asm_8:
10074 case ARM::VLD4DUPdWB_register_Asm_16:
10075 case ARM::VLD4DUPdWB_register_Asm_32:
10076 case ARM::VLD4DUPqWB_register_Asm_8:
10077 case ARM::VLD4DUPqWB_register_Asm_16:
10078 case ARM::VLD4DUPqWB_register_Asm_32: {
10079 MCInst TmpInst;
10080 unsigned Spacing;
10081 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10082 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10084 Spacing));
10086 Spacing * 2));
10088 Spacing * 3));
10089 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10090 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10091 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10092 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10093 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10094 TmpInst.addOperand(Inst.getOperand(5));
10095 Inst = TmpInst;
10096 return true;
10097 }
10098
10099 // VLD4 multiple 4-element structure instructions.
10100 case ARM::VLD4dAsm_8:
10101 case ARM::VLD4dAsm_16:
10102 case ARM::VLD4dAsm_32:
10103 case ARM::VLD4qAsm_8:
10104 case ARM::VLD4qAsm_16:
10105 case ARM::VLD4qAsm_32: {
10106 MCInst TmpInst;
10107 unsigned Spacing;
10108 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10109 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10111 Spacing));
10113 Spacing * 2));
10115 Spacing * 3));
10116 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10117 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10118 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10119 TmpInst.addOperand(Inst.getOperand(4));
10120 Inst = TmpInst;
10121 return true;
10122 }
10123
10124 case ARM::VLD4dWB_fixed_Asm_8:
10125 case ARM::VLD4dWB_fixed_Asm_16:
10126 case ARM::VLD4dWB_fixed_Asm_32:
10127 case ARM::VLD4qWB_fixed_Asm_8:
10128 case ARM::VLD4qWB_fixed_Asm_16:
10129 case ARM::VLD4qWB_fixed_Asm_32: {
10130 MCInst TmpInst;
10131 unsigned Spacing;
10132 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10133 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10135 Spacing));
10137 Spacing * 2));
10139 Spacing * 3));
10140 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10141 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10142 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10143 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10144 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10145 TmpInst.addOperand(Inst.getOperand(4));
10146 Inst = TmpInst;
10147 return true;
10148 }
10149
10150 case ARM::VLD4dWB_register_Asm_8:
10151 case ARM::VLD4dWB_register_Asm_16:
10152 case ARM::VLD4dWB_register_Asm_32:
10153 case ARM::VLD4qWB_register_Asm_8:
10154 case ARM::VLD4qWB_register_Asm_16:
10155 case ARM::VLD4qWB_register_Asm_32: {
10156 MCInst TmpInst;
10157 unsigned Spacing;
10158 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
10159 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10161 Spacing));
10163 Spacing * 2));
10165 Spacing * 3));
10166 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10167 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10168 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10169 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10170 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10171 TmpInst.addOperand(Inst.getOperand(5));
10172 Inst = TmpInst;
10173 return true;
10174 }
10175
10176 // VST3 multiple 3-element structure instructions.
10177 case ARM::VST3dAsm_8:
10178 case ARM::VST3dAsm_16:
10179 case ARM::VST3dAsm_32:
10180 case ARM::VST3qAsm_8:
10181 case ARM::VST3qAsm_16:
10182 case ARM::VST3qAsm_32: {
10183 MCInst TmpInst;
10184 unsigned Spacing;
10185 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10186 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10187 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10188 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10190 Spacing));
10192 Spacing * 2));
10193 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10194 TmpInst.addOperand(Inst.getOperand(4));
10195 Inst = TmpInst;
10196 return true;
10197 }
10198
10199 case ARM::VST3dWB_fixed_Asm_8:
10200 case ARM::VST3dWB_fixed_Asm_16:
10201 case ARM::VST3dWB_fixed_Asm_32:
10202 case ARM::VST3qWB_fixed_Asm_8:
10203 case ARM::VST3qWB_fixed_Asm_16:
10204 case ARM::VST3qWB_fixed_Asm_32: {
10205 MCInst TmpInst;
10206 unsigned Spacing;
10207 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10208 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10209 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10210 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10211 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10212 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10214 Spacing));
10216 Spacing * 2));
10217 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10218 TmpInst.addOperand(Inst.getOperand(4));
10219 Inst = TmpInst;
10220 return true;
10221 }
10222
10223 case ARM::VST3dWB_register_Asm_8:
10224 case ARM::VST3dWB_register_Asm_16:
10225 case ARM::VST3dWB_register_Asm_32:
10226 case ARM::VST3qWB_register_Asm_8:
10227 case ARM::VST3qWB_register_Asm_16:
10228 case ARM::VST3qWB_register_Asm_32: {
10229 MCInst TmpInst;
10230 unsigned Spacing;
10231 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10232 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10233 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10234 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10235 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10236 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10238 Spacing));
10240 Spacing * 2));
10241 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10242 TmpInst.addOperand(Inst.getOperand(5));
10243 Inst = TmpInst;
10244 return true;
10245 }
10246
10247 // VST4 multiple 3-element structure instructions.
10248 case ARM::VST4dAsm_8:
10249 case ARM::VST4dAsm_16:
10250 case ARM::VST4dAsm_32:
10251 case ARM::VST4qAsm_8:
10252 case ARM::VST4qAsm_16:
10253 case ARM::VST4qAsm_32: {
10254 MCInst TmpInst;
10255 unsigned Spacing;
10256 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10257 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10258 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10259 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10261 Spacing));
10263 Spacing * 2));
10265 Spacing * 3));
10266 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10267 TmpInst.addOperand(Inst.getOperand(4));
10268 Inst = TmpInst;
10269 return true;
10270 }
10271
10272 case ARM::VST4dWB_fixed_Asm_8:
10273 case ARM::VST4dWB_fixed_Asm_16:
10274 case ARM::VST4dWB_fixed_Asm_32:
10275 case ARM::VST4qWB_fixed_Asm_8:
10276 case ARM::VST4qWB_fixed_Asm_16:
10277 case ARM::VST4qWB_fixed_Asm_32: {
10278 MCInst TmpInst;
10279 unsigned Spacing;
10280 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10281 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10282 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10283 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10284 TmpInst.addOperand(MCOperand::createReg(0)); // Rm
10285 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10287 Spacing));
10289 Spacing * 2));
10291 Spacing * 3));
10292 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10293 TmpInst.addOperand(Inst.getOperand(4));
10294 Inst = TmpInst;
10295 return true;
10296 }
10297
10298 case ARM::VST4dWB_register_Asm_8:
10299 case ARM::VST4dWB_register_Asm_16:
10300 case ARM::VST4dWB_register_Asm_32:
10301 case ARM::VST4qWB_register_Asm_8:
10302 case ARM::VST4qWB_register_Asm_16:
10303 case ARM::VST4qWB_register_Asm_32: {
10304 MCInst TmpInst;
10305 unsigned Spacing;
10306 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
10307 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10308 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
10309 TmpInst.addOperand(Inst.getOperand(2)); // alignment
10310 TmpInst.addOperand(Inst.getOperand(3)); // Rm
10311 TmpInst.addOperand(Inst.getOperand(0)); // Vd
10313 Spacing));
10315 Spacing * 2));
10317 Spacing * 3));
10318 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10319 TmpInst.addOperand(Inst.getOperand(5));
10320 Inst = TmpInst;
10321 return true;
10322 }
10323
10324 // Handle encoding choice for the shift-immediate instructions.
10325 case ARM::t2LSLri:
10326 case ARM::t2LSRri:
10327 case ARM::t2ASRri:
10328 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10329 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10330 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10331 !HasWideQualifier) {
10332 unsigned NewOpc;
10333 switch (Inst.getOpcode()) {
10334 default: llvm_unreachable("unexpected opcode");
10335 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
10336 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
10337 case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
10338 }
10339 // The Thumb1 operands aren't in the same order. Awesome, eh?
10340 MCInst TmpInst;
10341 TmpInst.setOpcode(NewOpc);
10342 TmpInst.addOperand(Inst.getOperand(0));
10343 TmpInst.addOperand(Inst.getOperand(5));
10344 TmpInst.addOperand(Inst.getOperand(1));
10345 TmpInst.addOperand(Inst.getOperand(2));
10346 TmpInst.addOperand(Inst.getOperand(3));
10347 TmpInst.addOperand(Inst.getOperand(4));
10348 Inst = TmpInst;
10349 return true;
10350 }
10351 return false;
10352
10353 // Handle the Thumb2 mode MOV complex aliases.
10354 case ARM::t2MOVsr:
10355 case ARM::t2MOVSsr: {
10356 // Which instruction to expand to depends on the CCOut operand and
10357 // whether we're in an IT block if the register operands are low
10358 // registers.
10359 bool isNarrow = false;
10360 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10361 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10362 isARMLowRegister(Inst.getOperand(2).getReg()) &&
10363 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10364 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
10365 !HasWideQualifier)
10366 isNarrow = true;
10367 MCInst TmpInst;
10368 unsigned newOpc;
10369 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
10370 default: llvm_unreachable("unexpected opcode!");
10371 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
10372 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
10373 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
10374 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break;
10375 }
10376 TmpInst.setOpcode(newOpc);
10377 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10378 if (isNarrow)
10380 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10381 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10382 TmpInst.addOperand(Inst.getOperand(2)); // Rm
10383 TmpInst.addOperand(Inst.getOperand(4)); // CondCode
10384 TmpInst.addOperand(Inst.getOperand(5));
10385 if (!isNarrow)
10387 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10388 Inst = TmpInst;
10389 return true;
10390 }
10391 case ARM::t2MOVsi:
10392 case ARM::t2MOVSsi: {
10393 // Which instruction to expand to depends on the CCOut operand and
10394 // whether we're in an IT block if the register operands are low
10395 // registers.
10396 bool isNarrow = false;
10397 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10398 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10399 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
10400 !HasWideQualifier)
10401 isNarrow = true;
10402 MCInst TmpInst;
10403 unsigned newOpc;
10404 unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
10405 unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
10406 bool isMov = false;
10407 // MOV rd, rm, LSL #0 is actually a MOV instruction
10408 if (Shift == ARM_AM::lsl && Amount == 0) {
10409 isMov = true;
10410 // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
10411 // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
10412 // unpredictable in an IT block so the 32-bit encoding T3 has to be used
10413 // instead.
10414 if (inITBlock()) {
10415 isNarrow = false;
10416 }
10417 newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10418 } else {
10419 switch(Shift) {
10420 default: llvm_unreachable("unexpected opcode!");
10421 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
10422 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
10423 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
10424 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
10425 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
10426 }
10427 }
10428 if (Amount == 32) Amount = 0;
10429 TmpInst.setOpcode(newOpc);
10430 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10431 if (isNarrow && !isMov)
10433 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10434 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10435 if (newOpc != ARM::t2RRX && !isMov)
10436 TmpInst.addOperand(MCOperand::createImm(Amount));
10437 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10438 TmpInst.addOperand(Inst.getOperand(4));
10439 if (!isNarrow)
10441 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10442 Inst = TmpInst;
10443 return true;
10444 }
10445 // Handle the ARM mode MOV complex aliases.
10446 case ARM::ASRr:
10447 case ARM::LSRr:
10448 case ARM::LSLr:
10449 case ARM::RORr: {
10450 ARM_AM::ShiftOpc ShiftTy;
10451 switch(Inst.getOpcode()) {
10452 default: llvm_unreachable("unexpected opcode!");
10453 case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
10454 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
10455 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
10456 case ARM::RORr: ShiftTy = ARM_AM::ror; break;
10457 }
10458 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
10459 MCInst TmpInst;
10460 TmpInst.setOpcode(ARM::MOVsr);
10461 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10462 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10463 TmpInst.addOperand(Inst.getOperand(2)); // Rm
10464 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10465 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10466 TmpInst.addOperand(Inst.getOperand(4));
10467 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10468 Inst = TmpInst;
10469 return true;
10470 }
10471 case ARM::ASRi:
10472 case ARM::LSRi:
10473 case ARM::LSLi:
10474 case ARM::RORi: {
10475 ARM_AM::ShiftOpc ShiftTy;
10476 switch(Inst.getOpcode()) {
10477 default: llvm_unreachable("unexpected opcode!");
10478 case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
10479 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
10480 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
10481 case ARM::RORi: ShiftTy = ARM_AM::ror; break;
10482 }
10483 // A shift by zero is a plain MOVr, not a MOVsi.
10484 unsigned Amt = Inst.getOperand(2).getImm();
10485 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10486 // A shift by 32 should be encoded as 0 when permitted
10487 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
10488 Amt = 0;
10489 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
10490 MCInst TmpInst;
10491 TmpInst.setOpcode(Opc);
10492 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10493 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10494 if (Opc == ARM::MOVsi)
10495 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10496 TmpInst.addOperand(Inst.getOperand(3)); // CondCode
10497 TmpInst.addOperand(Inst.getOperand(4));
10498 TmpInst.addOperand(Inst.getOperand(5)); // cc_out
10499 Inst = TmpInst;
10500 return true;
10501 }
10502 case ARM::RRXi: {
10503 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
10504 MCInst TmpInst;
10505 TmpInst.setOpcode(ARM::MOVsi);
10506 TmpInst.addOperand(Inst.getOperand(0)); // Rd
10507 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10508 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
10509 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10510 TmpInst.addOperand(Inst.getOperand(3));
10511 TmpInst.addOperand(Inst.getOperand(4)); // cc_out
10512 Inst = TmpInst;
10513 return true;
10514 }
10515 case ARM::t2LDMIA_UPD: {
10516 // If this is a load of a single register, then we should use
10517 // a post-indexed LDR instruction instead, per the ARM ARM.
10518 if (Inst.getNumOperands() != 5)
10519 return false;
10520 MCInst TmpInst;
10521 TmpInst.setOpcode(ARM::t2LDR_POST);
10522 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10523 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10524 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10525 TmpInst.addOperand(MCOperand::createImm(4));
10526 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10527 TmpInst.addOperand(Inst.getOperand(3));
10528 Inst = TmpInst;
10529 return true;
10530 }
10531 case ARM::t2STMDB_UPD: {
10532 // If this is a store of a single register, then we should use
10533 // a pre-indexed STR instruction instead, per the ARM ARM.
10534 if (Inst.getNumOperands() != 5)
10535 return false;
10536 MCInst TmpInst;
10537 TmpInst.setOpcode(ARM::t2STR_PRE);
10538 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10539 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10540 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10541 TmpInst.addOperand(MCOperand::createImm(-4));
10542 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10543 TmpInst.addOperand(Inst.getOperand(3));
10544 Inst = TmpInst;
10545 return true;
10546 }
10547 case ARM::LDMIA_UPD:
10548 // If this is a load of a single register via a 'pop', then we should use
10549 // a post-indexed LDR instruction instead, per the ARM ARM.
10550 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
10551 Inst.getNumOperands() == 5) {
10552 MCInst TmpInst;
10553 TmpInst.setOpcode(ARM::LDR_POST_IMM);
10554 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10555 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10556 TmpInst.addOperand(Inst.getOperand(1)); // Rn
10557 TmpInst.addOperand(MCOperand::createReg(0)); // am2offset
10558 TmpInst.addOperand(MCOperand::createImm(4));
10559 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10560 TmpInst.addOperand(Inst.getOperand(3));
10561 Inst = TmpInst;
10562 return true;
10563 }
10564 break;
10565 case ARM::STMDB_UPD:
10566 // If this is a store of a single register via a 'push', then we should use
10567 // a pre-indexed STR instruction instead, per the ARM ARM.
10568 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
10569 Inst.getNumOperands() == 5) {
10570 MCInst TmpInst;
10571 TmpInst.setOpcode(ARM::STR_PRE_IMM);
10572 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
10573 TmpInst.addOperand(Inst.getOperand(4)); // Rt
10574 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
10575 TmpInst.addOperand(MCOperand::createImm(-4));
10576 TmpInst.addOperand(Inst.getOperand(2)); // CondCode
10577 TmpInst.addOperand(Inst.getOperand(3));
10578 Inst = TmpInst;
10579 }
10580 break;
10581 case ARM::t2ADDri12:
10582 case ARM::t2SUBri12:
10583 case ARM::t2ADDspImm12:
10584 case ARM::t2SUBspImm12: {
10585 // If the immediate fits for encoding T3 and the generic
10586 // mnemonic was used, encoding T3 is preferred.
10587 const StringRef Token = static_cast<ARMOperand &>(*Operands[0]).getToken();
10588 if ((Token != "add" && Token != "sub") ||
10589 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
10590 break;
10591 switch (Inst.getOpcode()) {
10592 case ARM::t2ADDri12:
10593 Inst.setOpcode(ARM::t2ADDri);
10594 break;
10595 case ARM::t2SUBri12:
10596 Inst.setOpcode(ARM::t2SUBri);
10597 break;
10598 case ARM::t2ADDspImm12:
10599 Inst.setOpcode(ARM::t2ADDspImm);
10600 break;
10601 case ARM::t2SUBspImm12:
10602 Inst.setOpcode(ARM::t2SUBspImm);
10603 break;
10604 }
10605
10606 Inst.addOperand(MCOperand::createReg(0)); // cc_out
10607 return true;
10608 }
10609 case ARM::tADDi8:
10610 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10611 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10612 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10613 // to encoding T1 if <Rd> is omitted."
10614 if (Inst.getOperand(3).isImm() &&
10615 (unsigned)Inst.getOperand(3).getImm() < 8 &&
10616 Operands.size() == MnemonicOpsEndInd + 3) {
10617 Inst.setOpcode(ARM::tADDi3);
10618 return true;
10619 }
10620 break;
10621 case ARM::tSUBi8:
10622 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
10623 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
10624 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
10625 // to encoding T1 if <Rd> is omitted."
10626 if ((unsigned)Inst.getOperand(3).getImm() < 8 &&
10627 Operands.size() == MnemonicOpsEndInd + 3) {
10628 Inst.setOpcode(ARM::tSUBi3);
10629 return true;
10630 }
10631 break;
10632 case ARM::t2ADDri:
10633 case ARM::t2SUBri: {
10634 // If the destination and first source operand are the same, and
10635 // the flags are compatible with the current IT status, use encoding T2
10636 // instead of T3. For compatibility with the system 'as'. Make sure the
10637 // wide encoding wasn't explicit.
10638 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
10639 !isARMLowRegister(Inst.getOperand(0).getReg()) ||
10640 (Inst.getOperand(2).isImm() &&
10641 (unsigned)Inst.getOperand(2).getImm() > 255) ||
10642 Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
10643 HasWideQualifier)
10644 break;
10645 MCInst TmpInst;
10646 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
10647 ARM::tADDi8 : ARM::tSUBi8);
10648 TmpInst.addOperand(Inst.getOperand(0));
10649 TmpInst.addOperand(Inst.getOperand(5));
10650 TmpInst.addOperand(Inst.getOperand(0));
10651 TmpInst.addOperand(Inst.getOperand(2));
10652 TmpInst.addOperand(Inst.getOperand(3));
10653 TmpInst.addOperand(Inst.getOperand(4));
10654 Inst = TmpInst;
10655 return true;
10656 }
10657 case ARM::t2ADDspImm:
10658 case ARM::t2SUBspImm: {
10659 // Prefer T1 encoding if possible
10660 if (Inst.getOperand(5).getReg() != 0 || HasWideQualifier)
10661 break;
10662 unsigned V = Inst.getOperand(2).getImm();
10663 if (V & 3 || V > ((1 << 7) - 1) << 2)
10664 break;
10665 MCInst TmpInst;
10666 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDspImm ? ARM::tADDspi
10667 : ARM::tSUBspi);
10668 TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // destination reg
10669 TmpInst.addOperand(MCOperand::createReg(ARM::SP)); // source reg
10670 TmpInst.addOperand(MCOperand::createImm(V / 4)); // immediate
10671 TmpInst.addOperand(Inst.getOperand(3)); // pred
10672 TmpInst.addOperand(Inst.getOperand(4));
10673 Inst = TmpInst;
10674 return true;
10675 }
10676 case ARM::t2ADDrr: {
10677 // If the destination and first source operand are the same, and
10678 // there's no setting of the flags, use encoding T2 instead of T3.
10679 // Note that this is only for ADD, not SUB. This mirrors the system
10680 // 'as' behaviour. Also take advantage of ADD being commutative.
10681 // Make sure the wide encoding wasn't explicit.
10682 bool Swap = false;
10683 auto DestReg = Inst.getOperand(0).getReg();
10684 bool Transform = DestReg == Inst.getOperand(1).getReg();
10685 if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
10686 Transform = true;
10687 Swap = true;
10688 }
10689 if (!Transform ||
10690 Inst.getOperand(5).getReg() != 0 ||
10691 HasWideQualifier)
10692 break;
10693 MCInst TmpInst;
10694 TmpInst.setOpcode(ARM::tADDhirr);
10695 TmpInst.addOperand(Inst.getOperand(0));
10696 TmpInst.addOperand(Inst.getOperand(0));
10697 TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
10698 TmpInst.addOperand(Inst.getOperand(3));
10699 TmpInst.addOperand(Inst.getOperand(4));
10700 Inst = TmpInst;
10701 return true;
10702 }
10703 case ARM::tADDrSP:
10704 // If the non-SP source operand and the destination operand are not the
10705 // same, we need to use the 32-bit encoding if it's available.
10706 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
10707 Inst.setOpcode(ARM::t2ADDrr);
10708 Inst.addOperand(MCOperand::createReg(0)); // cc_out
10709 return true;
10710 }
10711 break;
10712 case ARM::tB:
10713 // A Thumb conditional branch outside of an IT block is a tBcc.
10714 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
10715 Inst.setOpcode(ARM::tBcc);
10716 return true;
10717 }
10718 break;
10719 case ARM::t2B:
10720 // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
10721 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
10722 Inst.setOpcode(ARM::t2Bcc);
10723 return true;
10724 }
10725 break;
10726 case ARM::t2Bcc:
10727 // If the conditional is AL or we're in an IT block, we really want t2B.
10728 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
10729 Inst.setOpcode(ARM::t2B);
10730 return true;
10731 }
10732 break;
10733 case ARM::tBcc:
10734 // If the conditional is AL, we really want tB.
10735 if (Inst.getOperand(1).getImm() == ARMCC::AL) {
10736 Inst.setOpcode(ARM::tB);
10737 return true;
10738 }
10739 break;
10740 case ARM::tLDMIA: {
10741 // If the register list contains any high registers, or if the writeback
10742 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
10743 // instead if we're in Thumb2. Otherwise, this should have generated
10744 // an error in validateInstruction().
10745 unsigned Rn = Inst.getOperand(0).getReg();
10746 bool hasWritebackToken =
10747 (static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
10748 .isToken() &&
10749 static_cast<ARMOperand &>(*Operands[MnemonicOpsEndInd + 1])
10750 .getToken() == "!");
10751 bool listContainsBase;
10752 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
10753 (!listContainsBase && !hasWritebackToken) ||
10754 (listContainsBase && hasWritebackToken)) {
10755 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10756 assert(isThumbTwo());
10757 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10758 // If we're switching to the updating version, we need to insert
10759 // the writeback tied operand.
10760 if (hasWritebackToken)
10761 Inst.insert(Inst.begin(),
10763 return true;
10764 }
10765 break;
10766 }
10767 case ARM::tSTMIA_UPD: {
10768 // If the register list contains any high registers, we need to use
10769 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10770 // should have generated an error in validateInstruction().
10771 unsigned Rn = Inst.getOperand(0).getReg();
10772 bool listContainsBase;
10773 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
10774 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
10775 assert(isThumbTwo());
10776 Inst.setOpcode(ARM::t2STMIA_UPD);
10777 return true;
10778 }
10779 break;
10780 }
10781 case ARM::tPOP: {
10782 bool listContainsBase;
10783 // If the register list contains any high registers, we need to use
10784 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
10785 // should have generated an error in validateInstruction().
10786 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
10787 return false;
10788 assert(isThumbTwo());
10789 Inst.setOpcode(ARM::t2LDMIA_UPD);
10790 // Add the base register and writeback operands.
10791 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10792 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10793 return true;
10794 }
10795 case ARM::tPUSH: {
10796 bool listContainsBase;
10797 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
10798 return false;
10799 assert(isThumbTwo());
10800 Inst.setOpcode(ARM::t2STMDB_UPD);
10801 // Add the base register and writeback operands.
10802 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10803 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
10804 return true;
10805 }
10806 case ARM::t2MOVi:
10807 // If we can use the 16-bit encoding and the user didn't explicitly
10808 // request the 32-bit variant, transform it here.
10809 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10810 (Inst.getOperand(1).isImm() &&
10811 (unsigned)Inst.getOperand(1).getImm() <= 255) &&
10812 Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10813 !HasWideQualifier) {
10814 // The operands aren't in the same order for tMOVi8...
10815 MCInst TmpInst;
10816 TmpInst.setOpcode(ARM::tMOVi8);
10817 TmpInst.addOperand(Inst.getOperand(0));
10818 TmpInst.addOperand(Inst.getOperand(4));
10819 TmpInst.addOperand(Inst.getOperand(1));
10820 TmpInst.addOperand(Inst.getOperand(2));
10821 TmpInst.addOperand(Inst.getOperand(3));
10822 Inst = TmpInst;
10823 return true;
10824 }
10825 break;
10826
10827 case ARM::t2MOVr:
10828 // If we can use the 16-bit encoding and the user didn't explicitly
10829 // request the 32-bit variant, transform it here.
10830 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10831 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10832 Inst.getOperand(2).getImm() == ARMCC::AL &&
10833 Inst.getOperand(4).getReg() == ARM::CPSR &&
10834 !HasWideQualifier) {
10835 // The operands aren't the same for tMOV[S]r... (no cc_out)
10836 MCInst TmpInst;
10837 unsigned Op = Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr;
10838 TmpInst.setOpcode(Op);
10839 TmpInst.addOperand(Inst.getOperand(0));
10840 TmpInst.addOperand(Inst.getOperand(1));
10841 if (Op == ARM::tMOVr) {
10842 TmpInst.addOperand(Inst.getOperand(2));
10843 TmpInst.addOperand(Inst.getOperand(3));
10844 }
10845 Inst = TmpInst;
10846 return true;
10847 }
10848 break;
10849
10850 case ARM::t2SXTH:
10851 case ARM::t2SXTB:
10852 case ARM::t2UXTH:
10853 case ARM::t2UXTB:
10854 // If we can use the 16-bit encoding and the user didn't explicitly
10855 // request the 32-bit variant, transform it here.
10856 if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
10857 isARMLowRegister(Inst.getOperand(1).getReg()) &&
10858 Inst.getOperand(2).getImm() == 0 &&
10859 !HasWideQualifier) {
10860 unsigned NewOpc;
10861 switch (Inst.getOpcode()) {
10862 default: llvm_unreachable("Illegal opcode!");
10863 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
10864 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
10865 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
10866 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
10867 }
10868 // The operands aren't the same for thumb1 (no rotate operand).
10869 MCInst TmpInst;
10870 TmpInst.setOpcode(NewOpc);
10871 TmpInst.addOperand(Inst.getOperand(0));
10872 TmpInst.addOperand(Inst.getOperand(1));
10873 TmpInst.addOperand(Inst.getOperand(3));
10874 TmpInst.addOperand(Inst.getOperand(4));
10875 Inst = TmpInst;
10876 return true;
10877 }
10878 break;
10879
10880 case ARM::MOVsi: {
10882 // rrx shifts and asr/lsr of #32 is encoded as 0
10883 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
10884 return false;
10885 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
10886 // Shifting by zero is accepted as a vanilla 'MOVr'
10887 MCInst TmpInst;
10888 TmpInst.setOpcode(ARM::MOVr);
10889 TmpInst.addOperand(Inst.getOperand(0));
10890 TmpInst.addOperand(Inst.getOperand(1));
10891 TmpInst.addOperand(Inst.getOperand(3));
10892 TmpInst.addOperand(Inst.getOperand(4));
10893 TmpInst.addOperand(Inst.getOperand(5));
10894 Inst = TmpInst;
10895 return true;
10896 }
10897 return false;
10898 }
10899 case ARM::ANDrsi:
10900 case ARM::ORRrsi:
10901 case ARM::EORrsi:
10902 case ARM::BICrsi:
10903 case ARM::SUBrsi:
10904 case ARM::ADDrsi: {
10905 unsigned newOpc;
10907 if (SOpc == ARM_AM::rrx) return false;
10908 switch (Inst.getOpcode()) {
10909 default: llvm_unreachable("unexpected opcode!");
10910 case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
10911 case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
10912 case ARM::EORrsi: newOpc = ARM::EORrr; break;
10913 case ARM::BICrsi: newOpc = ARM::BICrr; break;
10914 case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
10915 case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
10916 }
10917 // If the shift is by zero, use the non-shifted instruction definition.
10918 // The exception is for right shifts, where 0 == 32
10919 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
10920 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
10921 MCInst TmpInst;
10922 TmpInst.setOpcode(newOpc);
10923 TmpInst.addOperand(Inst.getOperand(0));
10924 TmpInst.addOperand(Inst.getOperand(1));
10925 TmpInst.addOperand(Inst.getOperand(2));
10926 TmpInst.addOperand(Inst.getOperand(4));
10927 TmpInst.addOperand(Inst.getOperand(5));
10928 TmpInst.addOperand(Inst.getOperand(6));
10929 Inst = TmpInst;
10930 return true;
10931 }
10932 return false;
10933 }
10934 case ARM::ITasm:
10935 case ARM::t2IT: {
10936 // Set up the IT block state according to the IT instruction we just
10937 // matched.
10938 assert(!inITBlock() && "nested IT blocks?!");
10939 startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
10940 Inst.getOperand(1).getImm());
10941 break;
10942 }
10943 case ARM::t2LSLrr:
10944 case ARM::t2LSRrr:
10945 case ARM::t2ASRrr:
10946 case ARM::t2SBCrr:
10947 case ARM::t2RORrr:
10948 case ARM::t2BICrr:
10949 // Assemblers should use the narrow encodings of these instructions when permissible.
10950 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10951 isARMLowRegister(Inst.getOperand(2).getReg())) &&
10952 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
10953 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10954 !HasWideQualifier) {
10955 unsigned NewOpc;
10956 switch (Inst.getOpcode()) {
10957 default: llvm_unreachable("unexpected opcode");
10958 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
10959 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
10960 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
10961 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
10962 case ARM::t2RORrr: NewOpc = ARM::tROR; break;
10963 case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
10964 }
10965 MCInst TmpInst;
10966 TmpInst.setOpcode(NewOpc);
10967 TmpInst.addOperand(Inst.getOperand(0));
10968 TmpInst.addOperand(Inst.getOperand(5));
10969 TmpInst.addOperand(Inst.getOperand(1));
10970 TmpInst.addOperand(Inst.getOperand(2));
10971 TmpInst.addOperand(Inst.getOperand(3));
10972 TmpInst.addOperand(Inst.getOperand(4));
10973 Inst = TmpInst;
10974 return true;
10975 }
10976 return false;
10977
10978 case ARM::t2ANDrr:
10979 case ARM::t2EORrr:
10980 case ARM::t2ADCrr:
10981 case ARM::t2ORRrr:
10982 // Assemblers should use the narrow encodings of these instructions when permissible.
10983 // These instructions are special in that they are commutable, so shorter encodings
10984 // are available more often.
10985 if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
10986 isARMLowRegister(Inst.getOperand(2).getReg())) &&
10987 (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
10988 Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
10989 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
10990 !HasWideQualifier) {
10991 unsigned NewOpc;
10992 switch (Inst.getOpcode()) {
10993 default: llvm_unreachable("unexpected opcode");
10994 case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
10995 case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
10996 case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
10997 case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
10998 }
10999 MCInst TmpInst;
11000 TmpInst.setOpcode(NewOpc);
11001 TmpInst.addOperand(Inst.getOperand(0));
11002 TmpInst.addOperand(Inst.getOperand(5));
11003 if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
11004 TmpInst.addOperand(Inst.getOperand(1));
11005 TmpInst.addOperand(Inst.getOperand(2));
11006 } else {
11007 TmpInst.addOperand(Inst.getOperand(2));
11008 TmpInst.addOperand(Inst.getOperand(1));
11009 }
11010 TmpInst.addOperand(Inst.getOperand(3));
11011 TmpInst.addOperand(Inst.getOperand(4));
11012 Inst = TmpInst;
11013 return true;
11014 }
11015 return false;
11016 case ARM::MVE_VPST:
11017 case ARM::MVE_VPTv16i8:
11018 case ARM::MVE_VPTv8i16:
11019 case ARM::MVE_VPTv4i32:
11020 case ARM::MVE_VPTv16u8:
11021 case ARM::MVE_VPTv8u16:
11022 case ARM::MVE_VPTv4u32:
11023 case ARM::MVE_VPTv16s8:
11024 case ARM::MVE_VPTv8s16:
11025 case ARM::MVE_VPTv4s32:
11026 case ARM::MVE_VPTv4f32:
11027 case ARM::MVE_VPTv8f16:
11028 case ARM::MVE_VPTv16i8r:
11029 case ARM::MVE_VPTv8i16r:
11030 case ARM::MVE_VPTv4i32r:
11031 case ARM::MVE_VPTv16u8r:
11032 case ARM::MVE_VPTv8u16r:
11033 case ARM::MVE_VPTv4u32r:
11034 case ARM::MVE_VPTv16s8r:
11035 case ARM::MVE_VPTv8s16r:
11036 case ARM::MVE_VPTv4s32r:
11037 case ARM::MVE_VPTv4f32r:
11038 case ARM::MVE_VPTv8f16r: {
11039 assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
11040 MCOperand &MO = Inst.getOperand(0);
11041 VPTState.Mask = MO.getImm();
11042 VPTState.CurPosition = 0;
11043 break;
11044 }
11045 }
11046 return false;
11047}
11048
11049unsigned
11050ARMAsmParser::checkEarlyTargetMatchPredicate(MCInst &Inst,
11051 const OperandVector &Operands) {
11052 unsigned Opc = Inst.getOpcode();
11053 switch (Opc) {
11054 // Prevent the mov r8 r8 encoding for nop being selected when the v6/thumb 2
11055 // encoding is available.
11056 case ARM::tMOVr: {
11057 if (Operands[0]->isToken() &&
11058 static_cast<ARMOperand &>(*Operands[0]).getToken() == "nop" &&
11059 ((isThumb() && !isThumbOne()) || hasV6MOps())) {
11060 return Match_MnemonicFail;
11061 }
11062 }
11064 default:
11065 return Match_Success;
11066 }
11067}
11068
11069unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
11070 // 16-bit thumb arithmetic instructions either require or preclude the 'S'
11071 // suffix depending on whether they're in an IT block or not.
11072 unsigned Opc = Inst.getOpcode();
11073 const MCInstrDesc &MCID = MII.get(Opc);
11075 assert(MCID.hasOptionalDef() &&
11076 "optionally flag setting instruction missing optional def operand");
11077 assert(MCID.NumOperands == Inst.getNumOperands() &&
11078 "operand count mismatch!");
11079 bool IsCPSR = false;
11080 // Check if the instruction has CPSR set.
11081 for (unsigned OpNo = 0; OpNo < MCID.NumOperands; ++OpNo) {
11082 if (MCID.operands()[OpNo].isOptionalDef() &&
11083 Inst.getOperand(OpNo).isReg() &&
11084 Inst.getOperand(OpNo).getReg() == ARM::CPSR)
11085 IsCPSR = true;
11086 }
11087
11088 // If we're parsing Thumb1, reject it completely.
11089 if (isThumbOne() && !IsCPSR)
11090 return Match_RequiresFlagSetting;
11091 // If we're parsing Thumb2, which form is legal depends on whether we're
11092 // in an IT block.
11093 if (isThumbTwo() && !IsCPSR && !inITBlock())
11094 return Match_RequiresITBlock;
11095 if (isThumbTwo() && IsCPSR && inITBlock())
11096 return Match_RequiresNotITBlock;
11097 // LSL with zero immediate is not allowed in an IT block
11098 if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
11099 return Match_RequiresNotITBlock;
11100 } else if (isThumbOne()) {
11101 // Some high-register supporting Thumb1 encodings only allow both registers
11102 // to be from r0-r7 when in Thumb2.
11103 if (Opc == ARM::tADDhirr && !hasV6MOps() &&
11104 isARMLowRegister(Inst.getOperand(1).getReg()) &&
11106 return Match_RequiresThumb2;
11107 // Others only require ARMv6 or later.
11108 else if (Opc == ARM::tMOVr && !hasV6Ops() &&
11109 isARMLowRegister(Inst.getOperand(0).getReg()) &&
11111 return Match_RequiresV6;
11112 }
11113
11114 // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
11115 // than the loop below can handle, so it uses the GPRnopc register class and
11116 // we do SP handling here.
11117 if (Opc == ARM::t2MOVr && !hasV8Ops())
11118 {
11119 // SP as both source and destination is not allowed
11120 if (Inst.getOperand(0).getReg() == ARM::SP &&
11121 Inst.getOperand(1).getReg() == ARM::SP)
11122 return Match_RequiresV8;
11123 // When flags-setting SP as either source or destination is not allowed
11124 if (Inst.getOperand(4).getReg() == ARM::CPSR &&
11125 (Inst.getOperand(0).getReg() == ARM::SP ||
11126 Inst.getOperand(1).getReg() == ARM::SP))
11127 return Match_RequiresV8;
11128 }
11129
11130 switch (Inst.getOpcode()) {
11131 case ARM::VMRS:
11132 case ARM::VMSR:
11133 case ARM::VMRS_FPCXTS:
11134 case ARM::VMRS_FPCXTNS:
11135 case ARM::VMSR_FPCXTS:
11136 case ARM::VMSR_FPCXTNS:
11137 case ARM::VMRS_FPSCR_NZCVQC:
11138 case ARM::VMSR_FPSCR_NZCVQC:
11139 case ARM::FMSTAT:
11140 case ARM::VMRS_VPR:
11141 case ARM::VMRS_P0:
11142 case ARM::VMSR_VPR:
11143 case ARM::VMSR_P0:
11144 // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
11145 // ARMv8-A.
11146 if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
11147 (isThumb() && !hasV8Ops()))
11148 return Match_InvalidOperand;
11149 break;
11150 case ARM::t2TBB:
11151 case ARM::t2TBH:
11152 // Rn = sp is only allowed with ARMv8-A
11153 if (!hasV8Ops() && (Inst.getOperand(0).getReg() == ARM::SP))
11154 return Match_RequiresV8;
11155 break;
11156 case ARM::tMUL:
11157 // The second source operand must be the same register as the destination
11158 // operand.
11159 // FIXME: Ideally this would be handled by ARMGenAsmMatcher and
11160 // emitAsmTiedOperandConstraints.
11161 if (Inst.getOperand(0).getReg() != Inst.getOperand(3).getReg())
11162 return Match_InvalidTiedOperand;
11163 break;
11164 default:
11165 break;
11166 }
11167
11168 for (unsigned I = 0; I < MCID.NumOperands; ++I)
11169 if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) {
11170 // rGPRRegClass excludes PC, and also excluded SP before ARMv8
11171 const auto &Op = Inst.getOperand(I);
11172 if (!Op.isReg()) {
11173 // This can happen in awkward cases with tied operands, e.g. a
11174 // writeback load/store with a complex addressing mode in
11175 // which there's an output operand corresponding to the
11176 // updated written-back base register: the Tablegen-generated
11177 // AsmMatcher will have written a placeholder operand to that
11178 // slot in the form of an immediate 0, because it can't
11179 // generate the register part of the complex addressing-mode
11180 // operand ahead of time.
11181 continue;
11182 }
11183
11184 unsigned Reg = Op.getReg();
11185 if ((Reg == ARM::SP) && !hasV8Ops())
11186 return Match_RequiresV8;
11187 else if (Reg == ARM::PC)
11188 return Match_InvalidOperand;
11189 }
11190
11191 return Match_Success;
11192}
11193
11194namespace llvm {
11195
11196template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
11197 return true; // In an assembly source, no need to second-guess
11198}
11199
11200} // end namespace llvm
11201
11202// Returns true if Inst is unpredictable if it is in and IT block, but is not
11203// the last instruction in the block.
11204bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
11205 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11206
11207 // All branch & call instructions terminate IT blocks with the exception of
11208 // SVC.
11209 if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
11210 MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
11211 return true;
11212
11213 // Any arithmetic instruction which writes to the PC also terminates the IT
11214 // block.
11215 if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
11216 return true;
11217
11218 return false;
11219}
11220
11221unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
11223 bool MatchingInlineAsm,
11224 bool &EmitInITBlock,
11225 MCStreamer &Out) {
11226 // If we can't use an implicit IT block here, just match as normal.
11227 if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11228 return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11229
11230 // Try to match the instruction in an extension of the current IT block (if
11231 // there is one).
11232 if (inImplicitITBlock()) {
11233 extendImplicitITBlock(ITState.Cond);
11234 if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11235 Match_Success) {
11236 // The match succeded, but we still have to check that the instruction is
11237 // valid in this implicit IT block.
11238 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11239 if (MCID.isPredicable()) {
11240 ARMCC::CondCodes InstCond =
11242 .getImm();
11243 ARMCC::CondCodes ITCond = currentITCond();
11244 if (InstCond == ITCond) {
11245 EmitInITBlock = true;
11246 return Match_Success;
11247 } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
11248 invertCurrentITCondition();
11249 EmitInITBlock = true;
11250 return Match_Success;
11251 }
11252 }
11253 }
11254 rewindImplicitITPosition();
11255 }
11256
11257 // Finish the current IT block, and try to match outside any IT block.
11258 flushPendingInstructions(Out);
11259 unsigned PlainMatchResult =
11260 MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
11261 if (PlainMatchResult == Match_Success) {
11262 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11263 if (MCID.isPredicable()) {
11264 ARMCC::CondCodes InstCond =
11266 .getImm();
11267 // Some forms of the branch instruction have their own condition code
11268 // fields, so can be conditionally executed without an IT block.
11269 if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
11270 EmitInITBlock = false;
11271 return Match_Success;
11272 }
11273 if (InstCond == ARMCC::AL) {
11274 EmitInITBlock = false;
11275 return Match_Success;
11276 }
11277 } else {
11278 EmitInITBlock = false;
11279 return Match_Success;
11280 }
11281 }
11282
11283 // Try to match in a new IT block. The matcher doesn't check the actual
11284 // condition, so we create an IT block with a dummy condition, and fix it up
11285 // once we know the actual condition.
11286 startImplicitITBlock();
11287 if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
11288 Match_Success) {
11289 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
11290 if (MCID.isPredicable()) {
11291 ITState.Cond =
11293 .getImm();
11294 EmitInITBlock = true;
11295 return Match_Success;
11296 }
11297 }
11298 discardImplicitITBlock();
11299
11300 // If none of these succeed, return the error we got when trying to match
11301 // outside any IT blocks.
11302 EmitInITBlock = false;
11303 return PlainMatchResult;
11304}
11305
11306static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
11307 unsigned VariantID = 0);
11308
11309static const char *getSubtargetFeatureName(uint64_t Val);
11310bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
11313 bool MatchingInlineAsm) {
11314 MCInst Inst;
11315 unsigned MatchResult;
11316 bool PendConditionalInstruction = false;
11317
11319 MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
11320 PendConditionalInstruction, Out);
11321
11322 // Find the number of operators that are part of the Mnumonic (LHS).
11323 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
11324
11325 switch (MatchResult) {
11326 case Match_Success:
11327 LLVM_DEBUG(dbgs() << "Parsed as: ";
11328 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11329 dbgs() << "\n");
11330
11331 // Context sensitive operand constraints aren't handled by the matcher,
11332 // so check them here.
11333 if (validateInstruction(Inst, Operands, MnemonicOpsEndInd)) {
11334 // Still progress the IT block, otherwise one wrong condition causes
11335 // nasty cascading errors.
11336 forwardITPosition();
11337 forwardVPTPosition();
11338 return true;
11339 }
11340
11341 {
11342 // Some instructions need post-processing to, for example, tweak which
11343 // encoding is selected. Loop on it while changes happen so the
11344 // individual transformations can chain off each other. E.g.,
11345 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
11346 while (processInstruction(Inst, Operands, MnemonicOpsEndInd, Out))
11347 LLVM_DEBUG(dbgs() << "Changed to: ";
11348 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
11349 dbgs() << "\n");
11350 }
11351
11352 // Only move forward at the very end so that everything in validate
11353 // and process gets a consistent answer about whether we're in an IT
11354 // block.
11355 forwardITPosition();
11356 forwardVPTPosition();
11357
11358 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
11359 // doesn't actually encode.
11360 if (Inst.getOpcode() == ARM::ITasm)
11361 return false;
11362
11363 Inst.setLoc(IDLoc);
11364 if (PendConditionalInstruction) {
11365 PendingConditionalInsts.push_back(Inst);
11366 if (isITBlockFull() || isITBlockTerminator(Inst))
11367 flushPendingInstructions(Out);
11368 } else {
11369 Out.emitInstruction(Inst, getSTI());
11370 }
11371 return false;
11372 case Match_NearMisses:
11373 ReportNearMisses(NearMisses, IDLoc, Operands);
11374 return true;
11375 case Match_MnemonicFail: {
11376 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11377 std::string Suggestion = ARMMnemonicSpellCheck(
11378 ((ARMOperand &)*Operands[0]).getToken(), FBS);
11379 return Error(IDLoc, "invalid instruction" + Suggestion,
11380 ((ARMOperand &)*Operands[0]).getLocRange());
11381 }
11382 }
11383
11384 llvm_unreachable("Implement any new match types added!");
11385}
11386
11387/// parseDirective parses the arm specific directives
11388bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
11389 const MCContext::Environment Format = getContext().getObjectFileType();
11390 bool IsMachO = Format == MCContext::IsMachO;
11391 bool IsCOFF = Format == MCContext::IsCOFF;
11392
11393 std::string IDVal = DirectiveID.getIdentifier().lower();
11394 if (IDVal == ".word")
11395 parseLiteralValues(4, DirectiveID.getLoc());
11396 else if (IDVal == ".short" || IDVal == ".hword")
11397 parseLiteralValues(2, DirectiveID.getLoc());
11398 else if (IDVal == ".thumb")
11399 parseDirectiveThumb(DirectiveID.getLoc());
11400 else if (IDVal == ".arm")
11401 parseDirectiveARM(DirectiveID.getLoc());
11402 else if (IDVal == ".thumb_func")
11403 parseDirectiveThumbFunc(DirectiveID.getLoc());
11404 else if (IDVal == ".code")
11405 parseDirectiveCode(DirectiveID.getLoc());
11406 else if (IDVal == ".syntax")
11407 parseDirectiveSyntax(DirectiveID.getLoc());
11408 else if (IDVal == ".unreq")
11409 parseDirectiveUnreq(DirectiveID.getLoc());
11410 else if (IDVal == ".fnend")
11411 parseDirectiveFnEnd(DirectiveID.getLoc());
11412 else if (IDVal == ".cantunwind")
11413 parseDirectiveCantUnwind(DirectiveID.getLoc());
11414 else if (IDVal == ".personality")
11415 parseDirectivePersonality(DirectiveID.getLoc());
11416 else if (IDVal == ".handlerdata")
11417 parseDirectiveHandlerData(DirectiveID.getLoc());
11418 else if (IDVal == ".setfp")
11419 parseDirectiveSetFP(DirectiveID.getLoc());
11420 else if (IDVal == ".pad")
11421 parseDirectivePad(DirectiveID.getLoc());
11422 else if (IDVal == ".save")
11423 parseDirectiveRegSave(DirectiveID.getLoc(), false);
11424 else if (IDVal == ".vsave")
11425 parseDirectiveRegSave(DirectiveID.getLoc(), true);
11426 else if (IDVal == ".ltorg" || IDVal == ".pool")
11427 parseDirectiveLtorg(DirectiveID.getLoc());
11428 else if (IDVal == ".even")
11429 parseDirectiveEven(DirectiveID.getLoc());
11430 else if (IDVal == ".personalityindex")
11431 parseDirectivePersonalityIndex(DirectiveID.getLoc());
11432 else if (IDVal == ".unwind_raw")
11433 parseDirectiveUnwindRaw(DirectiveID.getLoc());
11434 else if (IDVal == ".movsp")
11435 parseDirectiveMovSP(DirectiveID.getLoc());
11436 else if (IDVal == ".arch_extension")
11437 parseDirectiveArchExtension(DirectiveID.getLoc());
11438 else if (IDVal == ".align")
11439 return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
11440 else if (IDVal == ".thumb_set")
11441 parseDirectiveThumbSet(DirectiveID.getLoc());
11442 else if (IDVal == ".inst")
11443 parseDirectiveInst(DirectiveID.getLoc());
11444 else if (IDVal == ".inst.n")
11445 parseDirectiveInst(DirectiveID.getLoc(), 'n');
11446 else if (IDVal == ".inst.w")
11447 parseDirectiveInst(DirectiveID.getLoc(), 'w');
11448 else if (!IsMachO && !IsCOFF) {
11449 if (IDVal == ".arch")
11450 parseDirectiveArch(DirectiveID.getLoc());
11451 else if (IDVal == ".cpu")
11452 parseDirectiveCPU(DirectiveID.getLoc());
11453 else if (IDVal == ".eabi_attribute")
11454 parseDirectiveEabiAttr(DirectiveID.getLoc());
11455 else if (IDVal == ".fpu")
11456 parseDirectiveFPU(DirectiveID.getLoc());
11457 else if (IDVal == ".fnstart")
11458 parseDirectiveFnStart(DirectiveID.getLoc());
11459 else if (IDVal == ".object_arch")
11460 parseDirectiveObjectArch(DirectiveID.getLoc());
11461 else if (IDVal == ".tlsdescseq")
11462 parseDirectiveTLSDescSeq(DirectiveID.getLoc());
11463 else
11464 return true;
11465 } else if (IsCOFF) {
11466 if (IDVal == ".seh_stackalloc")
11467 parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/false);
11468 else if (IDVal == ".seh_stackalloc_w")
11469 parseDirectiveSEHAllocStack(DirectiveID.getLoc(), /*Wide=*/true);
11470 else if (IDVal == ".seh_save_regs")
11471 parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/false);
11472 else if (IDVal == ".seh_save_regs_w")
11473 parseDirectiveSEHSaveRegs(DirectiveID.getLoc(), /*Wide=*/true);
11474 else if (IDVal == ".seh_save_sp")
11475 parseDirectiveSEHSaveSP(DirectiveID.getLoc());
11476 else if (IDVal == ".seh_save_fregs")
11477 parseDirectiveSEHSaveFRegs(DirectiveID.getLoc());
11478 else if (IDVal == ".seh_save_lr")
11479 parseDirectiveSEHSaveLR(DirectiveID.getLoc());
11480 else if (IDVal == ".seh_endprologue")
11481 parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/false);
11482 else if (IDVal == ".seh_endprologue_fragment")
11483 parseDirectiveSEHPrologEnd(DirectiveID.getLoc(), /*Fragment=*/true);
11484 else if (IDVal == ".seh_nop")
11485 parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/false);
11486 else if (IDVal == ".seh_nop_w")
11487 parseDirectiveSEHNop(DirectiveID.getLoc(), /*Wide=*/true);
11488 else if (IDVal == ".seh_startepilogue")
11489 parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/false);
11490 else if (IDVal == ".seh_startepilogue_cond")
11491 parseDirectiveSEHEpilogStart(DirectiveID.getLoc(), /*Condition=*/true);
11492 else if (IDVal == ".seh_endepilogue")
11493 parseDirectiveSEHEpilogEnd(DirectiveID.getLoc());
11494 else if (IDVal == ".seh_custom")
11495 parseDirectiveSEHCustom(DirectiveID.getLoc());
11496 else
11497 return true;
11498 } else
11499 return true;
11500 return false;
11501}
11502
11503/// parseLiteralValues
11504/// ::= .hword expression [, expression]*
11505/// ::= .short expression [, expression]*
11506/// ::= .word expression [, expression]*
11507bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
11508 auto parseOne = [&]() -> bool {
11509 const MCExpr *Value;
11510 if (getParser().parseExpression(Value))
11511 return true;
11512 getParser().getStreamer().emitValue(Value, Size, L);
11513 return false;
11514 };
11515 return (parseMany(parseOne));
11516}
11517
11518/// parseDirectiveThumb
11519/// ::= .thumb
11520bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
11521 if (parseEOL() || check(!hasThumb(), L, "target does not support Thumb mode"))
11522 return true;
11523
11524 if (!isThumb())
11525 SwitchMode();
11526
11527 getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11528 getParser().getStreamer().emitCodeAlignment(Align(2), &getSTI(), 0);
11529 return false;
11530}
11531
11532/// parseDirectiveARM
11533/// ::= .arm
11534bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
11535 if (parseEOL() || check(!hasARM(), L, "target does not support ARM mode"))
11536 return true;
11537
11538 if (isThumb())
11539 SwitchMode();
11540 getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11541 getParser().getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
11542 return false;
11543}
11544
11546ARMAsmParser::getVariantKindForName(StringRef Name) const {
11551 .Case("gotfuncdesc", MCSymbolRefExpr::VK_GOTFUNCDESC)
11553 .Case("gotofffuncdesc", MCSymbolRefExpr::VK_GOTOFFFUNCDESC)
11555 .Case("gottpoff_fdpic", MCSymbolRefExpr::VK_GOTTPOFF_FDPIC)
11561 .Case("secrel32", MCSymbolRefExpr::VK_SECREL)
11567 .Case("tlsgd_fdpic", MCSymbolRefExpr::VK_TLSGD_FDPIC)
11570 .Case("tlsldm_fdpic", MCSymbolRefExpr::VK_TLSLDM_FDPIC)
11574}
11575
11576void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc) {
11577 // We need to flush the current implicit IT block on a label, because it is
11578 // not legal to branch into an IT block.
11579 flushPendingInstructions(getStreamer());
11580}
11581
11582void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
11583 if (NextSymbolIsThumb) {
11584 getParser().getStreamer().emitThumbFunc(Symbol);
11585 NextSymbolIsThumb = false;
11586 }
11587}
11588
11589/// parseDirectiveThumbFunc
11590/// ::= .thumbfunc symbol_name
11591bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
11592 MCAsmParser &Parser = getParser();
11593 const auto Format = getContext().getObjectFileType();
11594 bool IsMachO = Format == MCContext::IsMachO;
11595
11596 // Darwin asm has (optionally) function name after .thumb_func direction
11597 // ELF doesn't
11598
11599 if (IsMachO) {
11600 if (Parser.getTok().is(AsmToken::Identifier) ||
11601 Parser.getTok().is(AsmToken::String)) {
11602 MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
11603 Parser.getTok().getIdentifier());
11604 getParser().getStreamer().emitThumbFunc(Func);
11605 Parser.Lex();
11606 if (parseEOL())
11607 return true;
11608 return false;
11609 }
11610 }
11611
11612 if (parseEOL())
11613 return true;
11614
11615 // .thumb_func implies .thumb
11616 if (!isThumb())
11617 SwitchMode();
11618
11619 getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11620
11621 NextSymbolIsThumb = true;
11622 return false;
11623}
11624
11625/// parseDirectiveSyntax
11626/// ::= .syntax unified | divided
11627bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
11628 MCAsmParser &Parser = getParser();
11629 const AsmToken &Tok = Parser.getTok();
11630 if (Tok.isNot(AsmToken::Identifier)) {
11631 Error(L, "unexpected token in .syntax directive");
11632 return false;
11633 }
11634
11635 StringRef Mode = Tok.getString();
11636 Parser.Lex();
11637 if (check(Mode == "divided" || Mode == "DIVIDED", L,
11638 "'.syntax divided' arm assembly not supported") ||
11639 check(Mode != "unified" && Mode != "UNIFIED", L,
11640 "unrecognized syntax mode in .syntax directive") ||
11641 parseEOL())
11642 return true;
11643
11644 // TODO tell the MC streamer the mode
11645 // getParser().getStreamer().Emit???();
11646 return false;
11647}
11648
11649/// parseDirectiveCode
11650/// ::= .code 16 | 32
11651bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
11652 MCAsmParser &Parser = getParser();
11653 const AsmToken &Tok = Parser.getTok();
11654 if (Tok.isNot(AsmToken::Integer))
11655 return Error(L, "unexpected token in .code directive");
11656 int64_t Val = Parser.getTok().getIntVal();
11657 if (Val != 16 && Val != 32) {
11658 Error(L, "invalid operand to .code directive");
11659 return false;
11660 }
11661 Parser.Lex();
11662
11663 if (parseEOL())
11664 return true;
11665
11666 if (Val == 16) {
11667 if (!hasThumb())
11668 return Error(L, "target does not support Thumb mode");
11669
11670 if (!isThumb())
11671 SwitchMode();
11672 getParser().getStreamer().emitAssemblerFlag(MCAF_Code16);
11673 } else {
11674 if (!hasARM())
11675 return Error(L, "target does not support ARM mode");
11676
11677 if (isThumb())
11678 SwitchMode();
11679 getParser().getStreamer().emitAssemblerFlag(MCAF_Code32);
11680 }
11681
11682 return false;
11683}
11684
11685/// parseDirectiveReq
11686/// ::= name .req registername
11687bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
11688 MCAsmParser &Parser = getParser();
11689 Parser.Lex(); // Eat the '.req' token.
11691 SMLoc SRegLoc, ERegLoc;
11692 if (check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11693 "register name expected") ||
11694 parseEOL())
11695 return true;
11696
11697 if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
11698 return Error(SRegLoc,
11699 "redefinition of '" + Name + "' does not match original.");
11700
11701 return false;
11702}
11703
11704/// parseDirectiveUneq
11705/// ::= .unreq registername
11706bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
11707 MCAsmParser &Parser = getParser();
11708 if (Parser.getTok().isNot(AsmToken::Identifier))
11709 return Error(L, "unexpected input in .unreq directive.");
11710 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
11711 Parser.Lex(); // Eat the identifier.
11712 return parseEOL();
11713}
11714
11715// After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
11716// before, if supported by the new target, or emit mapping symbols for the mode
11717// switch.
11718void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
11719 if (WasThumb != isThumb()) {
11720 if (WasThumb && hasThumb()) {
11721 // Stay in Thumb mode
11722 SwitchMode();
11723 } else if (!WasThumb && hasARM()) {
11724 // Stay in ARM mode
11725 SwitchMode();
11726 } else {
11727 // Mode switch forced, because the new arch doesn't support the old mode.
11728 getParser().getStreamer().emitAssemblerFlag(isThumb() ? MCAF_Code16
11729 : MCAF_Code32);
11730 // Warn about the implcit mode switch. GAS does not switch modes here,
11731 // but instead stays in the old mode, reporting an error on any following
11732 // instructions as the mode does not exist on the target.
11733 Warning(Loc, Twine("new target does not support ") +
11734 (WasThumb ? "thumb" : "arm") + " mode, switching to " +
11735 (!WasThumb ? "thumb" : "arm") + " mode");
11736 }
11737 }
11738}
11739
11740/// parseDirectiveArch
11741/// ::= .arch token
11742bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
11743 StringRef Arch = getParser().parseStringToEndOfStatement().trim();
11745
11746 if (ID == ARM::ArchKind::INVALID)
11747 return Error(L, "Unknown arch name");
11748
11749 bool WasThumb = isThumb();
11750 Triple T;
11751 MCSubtargetInfo &STI = copySTI();
11752 STI.setDefaultFeatures("", /*TuneCPU*/ "",
11753 ("+" + ARM::getArchName(ID)).str());
11754 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11755 FixModeAfterArchChange(WasThumb, L);
11756
11757 getTargetStreamer().emitArch(ID);
11758 return false;
11759}
11760
11761/// parseDirectiveEabiAttr
11762/// ::= .eabi_attribute int, int [, "str"]
11763/// ::= .eabi_attribute Tag_name, int [, "str"]
11764bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
11765 MCAsmParser &Parser = getParser();
11766 int64_t Tag;
11767 SMLoc TagLoc;
11768 TagLoc = Parser.getTok().getLoc();
11769 if (Parser.getTok().is(AsmToken::Identifier)) {
11770 StringRef Name = Parser.getTok().getIdentifier();
11771 std::optional<unsigned> Ret = ELFAttrs::attrTypeFromString(
11773 if (!Ret) {
11774 Error(TagLoc, "attribute name not recognised: " + Name);
11775 return false;
11776 }
11777 Tag = *Ret;
11778 Parser.Lex();
11779 } else {
11780 const MCExpr *AttrExpr;
11781
11782 TagLoc = Parser.getTok().getLoc();
11783 if (Parser.parseExpression(AttrExpr))
11784 return true;
11785
11786 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
11787 if (check(!CE, TagLoc, "expected numeric constant"))
11788 return true;
11789
11790 Tag = CE->getValue();
11791 }
11792
11793 if (Parser.parseComma())
11794 return true;
11795
11796 StringRef StringValue = "";
11797 bool IsStringValue = false;
11798
11799 int64_t IntegerValue = 0;
11800 bool IsIntegerValue = false;
11801
11803 IsStringValue = true;
11804 else if (Tag == ARMBuildAttrs::compatibility) {
11805 IsStringValue = true;
11806 IsIntegerValue = true;
11807 } else if (Tag < 32 || Tag % 2 == 0)
11808 IsIntegerValue = true;
11809 else if (Tag % 2 == 1)
11810 IsStringValue = true;
11811 else
11812 llvm_unreachable("invalid tag type");
11813
11814 if (IsIntegerValue) {
11815 const MCExpr *ValueExpr;
11816 SMLoc ValueExprLoc = Parser.getTok().getLoc();
11817 if (Parser.parseExpression(ValueExpr))
11818 return true;
11819
11820 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
11821 if (!CE)
11822 return Error(ValueExprLoc, "expected numeric constant");
11823 IntegerValue = CE->getValue();
11824 }
11825
11827 if (Parser.parseComma())
11828 return true;
11829 }
11830
11831 std::string EscapedValue;
11832 if (IsStringValue) {
11833 if (Parser.getTok().isNot(AsmToken::String))
11834 return Error(Parser.getTok().getLoc(), "bad string constant");
11835
11837 if (Parser.parseEscapedString(EscapedValue))
11838 return Error(Parser.getTok().getLoc(), "bad escaped string constant");
11839
11840 StringValue = EscapedValue;
11841 } else {
11842 StringValue = Parser.getTok().getStringContents();
11843 Parser.Lex();
11844 }
11845 }
11846
11847 if (Parser.parseEOL())
11848 return true;
11849
11850 if (IsIntegerValue && IsStringValue) {
11852 getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
11853 } else if (IsIntegerValue)
11854 getTargetStreamer().emitAttribute(Tag, IntegerValue);
11855 else if (IsStringValue)
11856 getTargetStreamer().emitTextAttribute(Tag, StringValue);
11857 return false;
11858}
11859
11860/// parseDirectiveCPU
11861/// ::= .cpu str
11862bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
11863 StringRef CPU = getParser().parseStringToEndOfStatement().trim();
11864 getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
11865
11866 // FIXME: This is using table-gen data, but should be moved to
11867 // ARMTargetParser once that is table-gen'd.
11868 if (!getSTI().isCPUStringValid(CPU))
11869 return Error(L, "Unknown CPU name");
11870
11871 bool WasThumb = isThumb();
11872 MCSubtargetInfo &STI = copySTI();
11873 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
11874 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11875 FixModeAfterArchChange(WasThumb, L);
11876
11877 return false;
11878}
11879
11880/// parseDirectiveFPU
11881/// ::= .fpu str
11882bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
11883 SMLoc FPUNameLoc = getTok().getLoc();
11884 StringRef FPU = getParser().parseStringToEndOfStatement().trim();
11885
11887 std::vector<StringRef> Features;
11888 if (!ARM::getFPUFeatures(ID, Features))
11889 return Error(FPUNameLoc, "Unknown FPU name");
11890
11891 MCSubtargetInfo &STI = copySTI();
11892 for (auto Feature : Features)
11893 STI.ApplyFeatureFlag(Feature);
11894 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
11895
11896 getTargetStreamer().emitFPU(ID);
11897 return false;
11898}
11899
11900/// parseDirectiveFnStart
11901/// ::= .fnstart
11902bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
11903 if (parseEOL())
11904 return true;
11905
11906 if (UC.hasFnStart()) {
11907 Error(L, ".fnstart starts before the end of previous one");
11908 UC.emitFnStartLocNotes();
11909 return true;
11910 }
11911
11912 // Reset the unwind directives parser state
11913 UC.reset();
11914
11915 getTargetStreamer().emitFnStart();
11916
11917 UC.recordFnStart(L);
11918 return false;
11919}
11920
11921/// parseDirectiveFnEnd
11922/// ::= .fnend
11923bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
11924 if (parseEOL())
11925 return true;
11926 // Check the ordering of unwind directives
11927 if (!UC.hasFnStart())
11928 return Error(L, ".fnstart must precede .fnend directive");
11929
11930 // Reset the unwind directives parser state
11931 getTargetStreamer().emitFnEnd();
11932
11933 UC.reset();
11934 return false;
11935}
11936
11937/// parseDirectiveCantUnwind
11938/// ::= .cantunwind
11939bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
11940 if (parseEOL())
11941 return true;
11942
11943 UC.recordCantUnwind(L);
11944 // Check the ordering of unwind directives
11945 if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
11946 return true;
11947
11948 if (UC.hasHandlerData()) {
11949 Error(L, ".cantunwind can't be used with .handlerdata directive");
11950 UC.emitHandlerDataLocNotes();
11951 return true;
11952 }
11953 if (UC.hasPersonality()) {
11954 Error(L, ".cantunwind can't be used with .personality directive");
11955 UC.emitPersonalityLocNotes();
11956 return true;
11957 }
11958
11959 getTargetStreamer().emitCantUnwind();
11960 return false;
11961}
11962
11963/// parseDirectivePersonality
11964/// ::= .personality name
11965bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
11966 MCAsmParser &Parser = getParser();
11967 bool HasExistingPersonality = UC.hasPersonality();
11968
11969 // Parse the name of the personality routine
11970 if (Parser.getTok().isNot(AsmToken::Identifier))
11971 return Error(L, "unexpected input in .personality directive.");
11972 StringRef Name(Parser.getTok().getIdentifier());
11973 Parser.Lex();
11974
11975 if (parseEOL())
11976 return true;
11977
11978 UC.recordPersonality(L);
11979
11980 // Check the ordering of unwind directives
11981 if (!UC.hasFnStart())
11982 return Error(L, ".fnstart must precede .personality directive");
11983 if (UC.cantUnwind()) {
11984 Error(L, ".personality can't be used with .cantunwind directive");
11985 UC.emitCantUnwindLocNotes();
11986 return true;
11987 }
11988 if (UC.hasHandlerData()) {
11989 Error(L, ".personality must precede .handlerdata directive");
11990 UC.emitHandlerDataLocNotes();
11991 return true;
11992 }
11993 if (HasExistingPersonality) {
11994 Error(L, "multiple personality directives");
11995 UC.emitPersonalityLocNotes();
11996 return true;
11997 }
11998
11999 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
12000 getTargetStreamer().emitPersonality(PR);
12001 return false;
12002}
12003
12004/// parseDirectiveHandlerData
12005/// ::= .handlerdata
12006bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
12007 if (parseEOL())
12008 return true;
12009
12010 UC.recordHandlerData(L);
12011 // Check the ordering of unwind directives
12012 if (!UC.hasFnStart())
12013 return Error(L, ".fnstart must precede .personality directive");
12014 if (UC.cantUnwind()) {
12015 Error(L, ".handlerdata can't be used with .cantunwind directive");
12016 UC.emitCantUnwindLocNotes();
12017 return true;
12018 }
12019
12020 getTargetStreamer().emitHandlerData();
12021 return false;
12022}
12023
12024/// parseDirectiveSetFP
12025/// ::= .setfp fpreg, spreg [, offset]
12026bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
12027 MCAsmParser &Parser = getParser();
12028 // Check the ordering of unwind directives
12029 if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
12030 check(UC.hasHandlerData(), L,
12031 ".setfp must precede .handlerdata directive"))
12032 return true;
12033
12034 // Parse fpreg
12035 SMLoc FPRegLoc = Parser.getTok().getLoc();
12036 int FPReg = tryParseRegister();
12037
12038 if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
12039 Parser.parseComma())
12040 return true;
12041
12042 // Parse spreg
12043 SMLoc SPRegLoc = Parser.getTok().getLoc();
12044 int SPReg = tryParseRegister();
12045 if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
12046 check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
12047 "register should be either $sp or the latest fp register"))
12048 return true;
12049
12050 // Update the frame pointer register
12051 UC.saveFPReg(FPReg);
12052
12053 // Parse offset
12054 int64_t Offset = 0;
12055 if (Parser.parseOptionalToken(AsmToken::Comma)) {
12056 if (Parser.getTok().isNot(AsmToken::Hash) &&
12057 Parser.getTok().isNot(AsmToken::Dollar))
12058 return Error(Parser.getTok().getLoc(), "'#' expected");
12059 Parser.Lex(); // skip hash token.
12060
12061 const MCExpr *OffsetExpr;
12062 SMLoc ExLoc = Parser.getTok().getLoc();
12063 SMLoc EndLoc;
12064 if (getParser().parseExpression(OffsetExpr, EndLoc))
12065 return Error(ExLoc, "malformed setfp offset");
12066 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12067 if (check(!CE, ExLoc, "setfp offset must be an immediate"))
12068 return true;
12069 Offset = CE->getValue();
12070 }
12071
12072 if (Parser.parseEOL())
12073 return true;
12074
12075 getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
12076 static_cast<unsigned>(SPReg), Offset);
12077 return false;
12078}
12079
12080/// parseDirective
12081/// ::= .pad offset
12082bool ARMAsmParser::parseDirectivePad(SMLoc L) {
12083 MCAsmParser &Parser = getParser();
12084 // Check the ordering of unwind directives
12085 if (!UC.hasFnStart())
12086 return Error(L, ".fnstart must precede .pad directive");
12087 if (UC.hasHandlerData())
12088 return Error(L, ".pad must precede .handlerdata directive");
12089
12090 // Parse the offset
12091 if (Parser.getTok().isNot(AsmToken::Hash) &&
12092 Parser.getTok().isNot(AsmToken::Dollar))
12093 return Error(Parser.getTok().getLoc(), "'#' expected");
12094 Parser.Lex(); // skip hash token.
12095
12096 const MCExpr *OffsetExpr;
12097 SMLoc ExLoc = Parser.getTok().getLoc();
12098 SMLoc EndLoc;
12099 if (getParser().parseExpression(OffsetExpr, EndLoc))
12100 return Error(ExLoc, "malformed pad offset");
12101 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12102 if (!CE)
12103 return Error(ExLoc, "pad offset must be an immediate");
12104
12105 if (parseEOL())
12106 return true;
12107
12108 getTargetStreamer().emitPad(CE->getValue());
12109 return false;
12110}
12111
12112/// parseDirectiveRegSave
12113/// ::= .save { registers }
12114/// ::= .vsave { registers }
12115bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
12116 // Check the ordering of unwind directives
12117 if (!UC.hasFnStart())
12118 return Error(L, ".fnstart must precede .save or .vsave directives");
12119 if (UC.hasHandlerData())
12120 return Error(L, ".save or .vsave must precede .handlerdata directive");
12121
12122 // RAII object to make sure parsed operands are deleted.
12124
12125 // Parse the register list
12126 if (parseRegisterList(Operands, true, true) || parseEOL())
12127 return true;
12128 ARMOperand &Op = (ARMOperand &)*Operands[0];
12129 if (!IsVector && !Op.isRegList())
12130 return Error(L, ".save expects GPR registers");
12131 if (IsVector && !Op.isDPRRegList())
12132 return Error(L, ".vsave expects DPR registers");
12133
12134 getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
12135 return false;
12136}
12137
12138/// parseDirectiveInst
12139/// ::= .inst opcode [, ...]
12140/// ::= .inst.n opcode [, ...]
12141/// ::= .inst.w opcode [, ...]
12142bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
12143 int Width = 4;
12144
12145 if (isThumb()) {
12146 switch (Suffix) {
12147 case 'n':
12148 Width = 2;
12149 break;
12150 case 'w':
12151 break;
12152 default:
12153 Width = 0;
12154 break;
12155 }
12156 } else {
12157 if (Suffix)
12158 return Error(Loc, "width suffixes are invalid in ARM mode");
12159 }
12160
12161 auto parseOne = [&]() -> bool {
12162 const MCExpr *Expr;
12163 if (getParser().parseExpression(Expr))
12164 return true;
12165 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
12166 if (!Value) {
12167 return Error(Loc, "expected constant expression");
12168 }
12169
12170 char CurSuffix = Suffix;
12171 switch (Width) {
12172 case 2:
12173 if (Value->getValue() > 0xffff)
12174 return Error(Loc, "inst.n operand is too big, use inst.w instead");
12175 break;
12176 case 4:
12177 if (Value->getValue() > 0xffffffff)
12178 return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
12179 " operand is too big");
12180 break;
12181 case 0:
12182 // Thumb mode, no width indicated. Guess from the opcode, if possible.
12183 if (Value->getValue() < 0xe800)
12184 CurSuffix = 'n';
12185 else if (Value->getValue() >= 0xe8000000)
12186 CurSuffix = 'w';
12187 else
12188 return Error(Loc, "cannot determine Thumb instruction size, "
12189 "use inst.n/inst.w instead");
12190 break;
12191 default:
12192 llvm_unreachable("only supported widths are 2 and 4");
12193 }
12194
12195 getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
12196 forwardITPosition();
12197 forwardVPTPosition();
12198 return false;
12199 };
12200
12201 if (parseOptionalToken(AsmToken::EndOfStatement))
12202 return Error(Loc, "expected expression following directive");
12203 if (parseMany(parseOne))
12204 return true;
12205 return false;
12206}
12207
12208/// parseDirectiveLtorg
12209/// ::= .ltorg | .pool
12210bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
12211 if (parseEOL())
12212 return true;
12213 getTargetStreamer().emitCurrentConstantPool();
12214 return false;
12215}
12216
12217bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
12218 const MCSection *Section = getStreamer().getCurrentSectionOnly();
12219
12220 if (parseEOL())
12221 return true;
12222
12223 if (!Section) {
12224 getStreamer().initSections(false, getSTI());
12225 Section = getStreamer().getCurrentSectionOnly();
12226 }
12227
12228 assert(Section && "must have section to emit alignment");
12229 if (Section->useCodeAlign())
12230 getStreamer().emitCodeAlignment(Align(2), &getSTI());
12231 else
12232 getStreamer().emitValueToAlignment(Align(2));
12233
12234 return false;
12235}
12236
12237/// parseDirectivePersonalityIndex
12238/// ::= .personalityindex index
12239bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
12240 MCAsmParser &Parser = getParser();
12241 bool HasExistingPersonality = UC.hasPersonality();
12242
12243 const MCExpr *IndexExpression;
12244 SMLoc IndexLoc = Parser.getTok().getLoc();
12245 if (Parser.parseExpression(IndexExpression) || parseEOL()) {
12246 return true;
12247 }
12248
12249 UC.recordPersonalityIndex(L);
12250
12251 if (!UC.hasFnStart()) {
12252 return Error(L, ".fnstart must precede .personalityindex directive");
12253 }
12254 if (UC.cantUnwind()) {
12255 Error(L, ".personalityindex cannot be used with .cantunwind");
12256 UC.emitCantUnwindLocNotes();
12257 return true;
12258 }
12259 if (UC.hasHandlerData()) {
12260 Error(L, ".personalityindex must precede .handlerdata directive");
12261 UC.emitHandlerDataLocNotes();
12262 return true;
12263 }
12264 if (HasExistingPersonality) {
12265 Error(L, "multiple personality directives");
12266 UC.emitPersonalityLocNotes();
12267 return true;
12268 }
12269
12270 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
12271 if (!CE)
12272 return Error(IndexLoc, "index must be a constant number");
12273 if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
12274 return Error(IndexLoc,
12275 "personality routine index should be in range [0-3]");
12276
12277 getTargetStreamer().emitPersonalityIndex(CE->getValue());
12278 return false;
12279}
12280
12281/// parseDirectiveUnwindRaw
12282/// ::= .unwind_raw offset, opcode [, opcode...]
12283bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
12284 MCAsmParser &Parser = getParser();
12285 int64_t StackOffset;
12286 const MCExpr *OffsetExpr;
12287 SMLoc OffsetLoc = getLexer().getLoc();
12288
12289 if (!UC.hasFnStart())
12290 return Error(L, ".fnstart must precede .unwind_raw directives");
12291 if (getParser().parseExpression(OffsetExpr))
12292 return Error(OffsetLoc, "expected expression");
12293
12294 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12295 if (!CE)
12296 return Error(OffsetLoc, "offset must be a constant");
12297
12298 StackOffset = CE->getValue();
12299
12300 if (Parser.parseComma())
12301 return true;
12302
12304
12305 auto parseOne = [&]() -> bool {
12306 const MCExpr *OE = nullptr;
12307 SMLoc OpcodeLoc = getLexer().getLoc();
12308 if (check(getLexer().is(AsmToken::EndOfStatement) ||
12309 Parser.parseExpression(OE),
12310 OpcodeLoc, "expected opcode expression"))
12311 return true;
12312 const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
12313 if (!OC)
12314 return Error(OpcodeLoc, "opcode value must be a constant");
12315 const int64_t Opcode = OC->getValue();
12316 if (Opcode & ~0xff)
12317 return Error(OpcodeLoc, "invalid opcode");
12318 Opcodes.push_back(uint8_t(Opcode));
12319 return false;
12320 };
12321
12322 // Must have at least 1 element
12323 SMLoc OpcodeLoc = getLexer().getLoc();
12324 if (parseOptionalToken(AsmToken::EndOfStatement))
12325 return Error(OpcodeLoc, "expected opcode expression");
12326 if (parseMany(parseOne))
12327 return true;
12328
12329 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
12330 return false;
12331}
12332
12333/// parseDirectiveTLSDescSeq
12334/// ::= .tlsdescseq tls-variable
12335bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
12336 MCAsmParser &Parser = getParser();
12337
12338 if (getLexer().isNot(AsmToken::Identifier))
12339 return TokError("expected variable after '.tlsdescseq' directive");
12340
12341 const MCSymbolRefExpr *SRE =
12344 Lex();
12345
12346 if (parseEOL())
12347 return true;
12348
12349 getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12350 return false;
12351}
12352
12353/// parseDirectiveMovSP
12354/// ::= .movsp reg [, #offset]
12355bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
12356 MCAsmParser &Parser = getParser();
12357 if (!UC.hasFnStart())
12358 return Error(L, ".fnstart must precede .movsp directives");
12359 if (UC.getFPReg() != ARM::SP)
12360 return Error(L, "unexpected .movsp directive");
12361
12362 SMLoc SPRegLoc = Parser.getTok().getLoc();
12363 int SPReg = tryParseRegister();
12364 if (SPReg == -1)
12365 return Error(SPRegLoc, "register expected");
12366 if (SPReg == ARM::SP || SPReg == ARM::PC)
12367 return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
12368
12369 int64_t Offset = 0;
12370 if (Parser.parseOptionalToken(AsmToken::Comma)) {
12371 if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
12372 return true;
12373
12374 const MCExpr *OffsetExpr;
12375 SMLoc OffsetLoc = Parser.getTok().getLoc();
12376
12377 if (Parser.parseExpression(OffsetExpr))
12378 return Error(OffsetLoc, "malformed offset expression");
12379
12380 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
12381 if (!CE)
12382 return Error(OffsetLoc, "offset must be an immediate constant");
12383
12384 Offset = CE->getValue();
12385 }
12386
12387 if (parseEOL())
12388 return true;
12389
12390 getTargetStreamer().emitMovSP(SPReg, Offset);
12391 UC.saveFPReg(SPReg);
12392
12393 return false;
12394}
12395
12396/// parseDirectiveObjectArch
12397/// ::= .object_arch name
12398bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
12399 MCAsmParser &Parser = getParser();
12400 if (getLexer().isNot(AsmToken::Identifier))
12401 return Error(getLexer().getLoc(), "unexpected token");
12402
12403 StringRef Arch = Parser.getTok().getString();
12404 SMLoc ArchLoc = Parser.getTok().getLoc();
12405 Lex();
12406
12408
12409 if (ID == ARM::ArchKind::INVALID)
12410 return Error(ArchLoc, "unknown architecture '" + Arch + "'");
12411 if (parseToken(AsmToken::EndOfStatement))
12412 return true;
12413
12414 getTargetStreamer().emitObjectArch(ID);
12415 return false;
12416}
12417
12418/// parseDirectiveAlign
12419/// ::= .align
12420bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
12421 // NOTE: if this is not the end of the statement, fall back to the target
12422 // agnostic handling for this directive which will correctly handle this.
12423 if (parseOptionalToken(AsmToken::EndOfStatement)) {
12424 // '.align' is target specifically handled to mean 2**2 byte alignment.
12425 const MCSection *Section = getStreamer().getCurrentSectionOnly();
12426 assert(Section && "must have section to emit alignment");
12427 if (Section->useCodeAlign())
12428 getStreamer().emitCodeAlignment(Align(4), &getSTI(), 0);
12429 else
12430 getStreamer().emitValueToAlignment(Align(4), 0, 1, 0);
12431 return false;
12432 }
12433 return true;
12434}
12435
12436/// parseDirectiveThumbSet
12437/// ::= .thumb_set name, value
12438bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
12439 MCAsmParser &Parser = getParser();
12440
12442 if (check(Parser.parseIdentifier(Name),
12443 "expected identifier after '.thumb_set'") ||
12444 Parser.parseComma())
12445 return true;
12446
12447 MCSymbol *Sym;
12448 const MCExpr *Value;
12449 if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
12450 Parser, Sym, Value))
12451 return true;
12452
12453 getTargetStreamer().emitThumbSet(Sym, Value);
12454 return false;
12455}
12456
12457/// parseDirectiveSEHAllocStack
12458/// ::= .seh_stackalloc
12459/// ::= .seh_stackalloc_w
12460bool ARMAsmParser::parseDirectiveSEHAllocStack(SMLoc L, bool Wide) {
12461 int64_t Size;
12462 if (parseImmExpr(Size))
12463 return true;
12464 getTargetStreamer().emitARMWinCFIAllocStack(Size, Wide);
12465 return false;
12466}
12467
12468/// parseDirectiveSEHSaveRegs
12469/// ::= .seh_save_regs
12470/// ::= .seh_save_regs_w
12471bool ARMAsmParser::parseDirectiveSEHSaveRegs(SMLoc L, bool Wide) {
12473
12474 if (parseRegisterList(Operands) || parseEOL())
12475 return true;
12476 ARMOperand &Op = (ARMOperand &)*Operands[0];
12477 if (!Op.isRegList())
12478 return Error(L, ".seh_save_regs{_w} expects GPR registers");
12479 const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12480 uint32_t Mask = 0;
12481 for (size_t i = 0; i < RegList.size(); ++i) {
12482 unsigned Reg = MRI->getEncodingValue(RegList[i]);
12483 if (Reg == 15) // pc -> lr
12484 Reg = 14;
12485 if (Reg == 13)
12486 return Error(L, ".seh_save_regs{_w} can't include SP");
12487 assert(Reg < 16U && "Register out of range");
12488 unsigned Bit = (1u << Reg);
12489 Mask |= Bit;
12490 }
12491 if (!Wide && (Mask & 0x1f00) != 0)
12492 return Error(L,
12493 ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12494 getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12495 return false;
12496}
12497
12498/// parseDirectiveSEHSaveSP
12499/// ::= .seh_save_sp
12500bool ARMAsmParser::parseDirectiveSEHSaveSP(SMLoc L) {
12501 int Reg = tryParseRegister();
12502 if (Reg == -1 || !MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12503 return Error(L, "expected GPR");
12504 unsigned Index = MRI->getEncodingValue(Reg);
12505 if (Index > 14 || Index == 13)
12506 return Error(L, "invalid register for .seh_save_sp");
12507 getTargetStreamer().emitARMWinCFISaveSP(Index);
12508 return false;
12509}
12510
12511/// parseDirectiveSEHSaveFRegs
12512/// ::= .seh_save_fregs
12513bool ARMAsmParser::parseDirectiveSEHSaveFRegs(SMLoc L) {
12515
12516 if (parseRegisterList(Operands) || parseEOL())
12517 return true;
12518 ARMOperand &Op = (ARMOperand &)*Operands[0];
12519 if (!Op.isDPRRegList())
12520 return Error(L, ".seh_save_fregs expects DPR registers");
12521 const SmallVectorImpl<unsigned> &RegList = Op.getRegList();
12522 uint32_t Mask = 0;
12523 for (size_t i = 0; i < RegList.size(); ++i) {
12524 unsigned Reg = MRI->getEncodingValue(RegList[i]);
12525 assert(Reg < 32U && "Register out of range");
12526 unsigned Bit = (1u << Reg);
12527 Mask |= Bit;
12528 }
12529
12530 if (Mask == 0)
12531 return Error(L, ".seh_save_fregs missing registers");
12532
12533 unsigned First = 0;
12534 while ((Mask & 1) == 0) {
12535 First++;
12536 Mask >>= 1;
12537 }
12538 if (((Mask + 1) & Mask) != 0)
12539 return Error(L,
12540 ".seh_save_fregs must take a contiguous range of registers");
12541 unsigned Last = First;
12542 while ((Mask & 2) != 0) {
12543 Last++;
12544 Mask >>= 1;
12545 }
12546 if (First < 16 && Last >= 16)
12547 return Error(L, ".seh_save_fregs must be all d0-d15 or d16-d31");
12548 getTargetStreamer().emitARMWinCFISaveFRegs(First, Last);
12549 return false;
12550}
12551
12552/// parseDirectiveSEHSaveLR
12553/// ::= .seh_save_lr
12554bool ARMAsmParser::parseDirectiveSEHSaveLR(SMLoc L) {
12555 int64_t Offset;
12556 if (parseImmExpr(Offset))
12557 return true;
12558 getTargetStreamer().emitARMWinCFISaveLR(Offset);
12559 return false;
12560}
12561
12562/// parseDirectiveSEHPrologEnd
12563/// ::= .seh_endprologue
12564/// ::= .seh_endprologue_fragment
12565bool ARMAsmParser::parseDirectiveSEHPrologEnd(SMLoc L, bool Fragment) {
12566 getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12567 return false;
12568}
12569
12570/// parseDirectiveSEHNop
12571/// ::= .seh_nop
12572/// ::= .seh_nop_w
12573bool ARMAsmParser::parseDirectiveSEHNop(SMLoc L, bool Wide) {
12574 getTargetStreamer().emitARMWinCFINop(Wide);
12575 return false;
12576}
12577
12578/// parseDirectiveSEHEpilogStart
12579/// ::= .seh_startepilogue
12580/// ::= .seh_startepilogue_cond
12581bool ARMAsmParser::parseDirectiveSEHEpilogStart(SMLoc L, bool Condition) {
12582 unsigned CC = ARMCC::AL;
12583 if (Condition) {
12584 MCAsmParser &Parser = getParser();
12585 SMLoc S = Parser.getTok().getLoc();
12586 const AsmToken &Tok = Parser.getTok();
12587 if (!Tok.is(AsmToken::Identifier))
12588 return Error(S, ".seh_startepilogue_cond missing condition");
12590 if (CC == ~0U)
12591 return Error(S, "invalid condition");
12592 Parser.Lex(); // Eat the token.
12593 }
12594
12595 getTargetStreamer().emitARMWinCFIEpilogStart(CC);
12596 return false;
12597}
12598
12599/// parseDirectiveSEHEpilogEnd
12600/// ::= .seh_endepilogue
12601bool ARMAsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
12602 getTargetStreamer().emitARMWinCFIEpilogEnd();
12603 return false;
12604}
12605
12606/// parseDirectiveSEHCustom
12607/// ::= .seh_custom
12608bool ARMAsmParser::parseDirectiveSEHCustom(SMLoc L) {
12609 unsigned Opcode = 0;
12610 do {
12611 int64_t Byte;
12612 if (parseImmExpr(Byte))
12613 return true;
12614 if (Byte > 0xff || Byte < 0)
12615 return Error(L, "Invalid byte value in .seh_custom");
12616 if (Opcode > 0x00ffffff)
12617 return Error(L, "Too many bytes in .seh_custom");
12618 // Store the bytes as one big endian number in Opcode. In a multi byte
12619 // opcode sequence, the first byte can't be zero.
12620 Opcode = (Opcode << 8) | Byte;
12621 } while (parseOptionalToken(AsmToken::Comma));
12622 getTargetStreamer().emitARMWinCFICustom(Opcode);
12623 return false;
12624}
12625
12626/// Force static initialization.
12632}
12633
12634#define GET_REGISTER_MATCHER
12635#define GET_SUBTARGET_FEATURE_NAME
12636#define GET_MATCHER_IMPLEMENTATION
12637#define GET_MNEMONIC_SPELL_CHECKER
12638#include "ARMGenAsmMatcher.inc"
12639
12640// Some diagnostics need to vary with subtarget features, so they are handled
12641// here. For example, the DPR class has either 16 or 32 registers, depending
12642// on the FPU available.
12643const char *
12644ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12645 switch (MatchError) {
12646 // rGPR contains sp starting with ARMv8.
12647 case Match_rGPR:
12648 return hasV8Ops() ? "operand must be a register in range [r0, r14]"
12649 : "operand must be a register in range [r0, r12] or r14";
12650 // DPR contains 16 registers for some FPUs, and 32 for others.
12651 case Match_DPR:
12652 return hasD32() ? "operand must be a register in range [d0, d31]"
12653 : "operand must be a register in range [d0, d15]";
12654 case Match_DPR_RegList:
12655 return hasD32() ? "operand must be a list of registers in range [d0, d31]"
12656 : "operand must be a list of registers in range [d0, d15]";
12657
12658 // For all other diags, use the static string from tablegen.
12659 default:
12660 return getMatchKindDiag(MatchError);
12661 }
12662}
12663
12664// Process the list of near-misses, throwing away ones we don't want to report
12665// to the user, and converting the rest to a source location and string that
12666// should be reported.
12667void
12668ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
12669 SmallVectorImpl<NearMissMessage> &NearMissesOut,
12670 SMLoc IDLoc, OperandVector &Operands) {
12671 // TODO: If operand didn't match, sub in a dummy one and run target
12672 // predicate, so that we can avoid reporting near-misses that are invalid?
12673 // TODO: Many operand types dont have SuperClasses set, so we report
12674 // redundant ones.
12675 // TODO: Some operands are superclasses of registers (e.g.
12676 // MCK_RegShiftedImm), we don't have any way to represent that currently.
12677 // TODO: This is not all ARM-specific, can some of it be factored out?
12678
12679 // Record some information about near-misses that we have already seen, so
12680 // that we can avoid reporting redundant ones. For example, if there are
12681 // variants of an instruction that take 8- and 16-bit immediates, we want
12682 // to only report the widest one.
12683 std::multimap<unsigned, unsigned> OperandMissesSeen;
12684 SmallSet<FeatureBitset, 4> FeatureMissesSeen;
12685 bool ReportedTooFewOperands = false;
12686
12687 unsigned MnemonicOpsEndInd = getMnemonicOpsEndInd(Operands);
12688
12689 // Process the near-misses in reverse order, so that we see more general ones
12690 // first, and so can avoid emitting more specific ones.
12691 for (NearMissInfo &I : reverse(NearMissesIn)) {
12692 switch (I.getKind()) {
12694 SMLoc OperandLoc =
12695 ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
12696 const char *OperandDiag =
12697 getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
12698
12699 // If we have already emitted a message for a superclass, don't also report
12700 // the sub-class. We consider all operand classes that we don't have a
12701 // specialised diagnostic for to be equal for the propose of this check,
12702 // so that we don't report the generic error multiple times on the same
12703 // operand.
12704 unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
12705 auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
12706 if (std::any_of(PrevReports.first, PrevReports.second,
12707 [DupCheckMatchClass](
12708 const std::pair<unsigned, unsigned> Pair) {
12709 if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12710 return Pair.second == DupCheckMatchClass;
12711 else
12712 return isSubclass((MatchClassKind)DupCheckMatchClass,
12713 (MatchClassKind)Pair.second);
12714 }))
12715 break;
12716 OperandMissesSeen.insert(
12717 std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
12718
12719 NearMissMessage Message;
12720 Message.Loc = OperandLoc;
12721 if (OperandDiag) {
12722 Message.Message = OperandDiag;
12723 } else if (I.getOperandClass() == InvalidMatchClass) {
12724 Message.Message = "too many operands for instruction";
12725 } else {
12726 Message.Message = "invalid operand for instruction";
12727 LLVM_DEBUG(
12728 dbgs() << "Missing diagnostic string for operand class "
12729 << getMatchClassName((MatchClassKind)I.getOperandClass())
12730 << I.getOperandClass() << ", error " << I.getOperandError()
12731 << ", opcode " << MII.getName(I.getOpcode()) << "\n");
12732 }
12733 NearMissesOut.emplace_back(Message);
12734 break;
12735 }
12737 const FeatureBitset &MissingFeatures = I.getFeatures();
12738 // Don't report the same set of features twice.
12739 if (FeatureMissesSeen.count(MissingFeatures))
12740 break;
12741 FeatureMissesSeen.insert(MissingFeatures);
12742
12743 // Special case: don't report a feature set which includes arm-mode for
12744 // targets that don't have ARM mode.
12745 if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
12746 break;
12747 // Don't report any near-misses that both require switching instruction
12748 // set, and adding other subtarget features.
12749 if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
12750 MissingFeatures.count() > 1)
12751 break;
12752 if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
12753 MissingFeatures.count() > 1)
12754 break;
12755 if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
12756 (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
12757 Feature_IsThumbBit})).any())
12758 break;
12759 if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
12760 break;
12761
12762 NearMissMessage Message;
12763 Message.Loc = IDLoc;
12764 raw_svector_ostream OS(Message.Message);
12765
12766 OS << "instruction requires:";
12767 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
12768 if (MissingFeatures.test(i))
12769 OS << ' ' << getSubtargetFeatureName(i);
12770
12771 NearMissesOut.emplace_back(Message);
12772
12773 break;
12774 }
12776 NearMissMessage Message;
12777 Message.Loc = IDLoc;
12778 switch (I.getPredicateError()) {
12779 case Match_RequiresNotITBlock:
12780 Message.Message = "flag setting instruction only valid outside IT block";
12781 break;
12782 case Match_RequiresITBlock:
12783 Message.Message = "instruction only valid inside IT block";
12784 break;
12785 case Match_RequiresV6:
12786 Message.Message = "instruction variant requires ARMv6 or later";
12787 break;
12788 case Match_RequiresThumb2:
12789 Message.Message = "instruction variant requires Thumb2";
12790 break;
12791 case Match_RequiresV8:
12792 Message.Message = "instruction variant requires ARMv8 or later";
12793 break;
12794 case Match_RequiresFlagSetting:
12795 Message.Message = "no flag-preserving variant of this instruction available";
12796 break;
12797 case Match_InvalidTiedOperand: {
12798 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[0]);
12799 if (Op.isToken() && Op.getToken() == "mul") {
12800 Message.Message = "destination register must match a source register";
12801 Message.Loc = Operands[MnemonicOpsEndInd]->getStartLoc();
12802 } else {
12803 llvm_unreachable("Match_InvalidTiedOperand only used for tMUL.");
12804 }
12805 break;
12806 }
12807 case Match_InvalidOperand:
12808 Message.Message = "invalid operand for instruction";
12809 break;
12810 default:
12811 llvm_unreachable("Unhandled target predicate error");
12812 break;
12813 }
12814 NearMissesOut.emplace_back(Message);
12815 break;
12816 }
12818 if (!ReportedTooFewOperands) {
12819 SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
12820 NearMissesOut.emplace_back(NearMissMessage{
12821 EndLoc, StringRef("too few operands for instruction")});
12822 ReportedTooFewOperands = true;
12823 }
12824 break;
12825 }
12827 // This should never leave the matcher.
12828 llvm_unreachable("not a near-miss");
12829 break;
12830 }
12831 }
12832}
12833
12834void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
12835 SMLoc IDLoc, OperandVector &Operands) {
12837 FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
12838
12839 if (Messages.size() == 0) {
12840 // No near-misses were found, so the best we can do is "invalid
12841 // instruction".
12842 Error(IDLoc, "invalid instruction");
12843 } else if (Messages.size() == 1) {
12844 // One near miss was found, report it as the sole error.
12845 Error(Messages[0].Loc, Messages[0].Message);
12846 } else {
12847 // More than one near miss, so report a generic "invalid instruction"
12848 // error, followed by notes for each of the near-misses.
12849 Error(IDLoc, "invalid instruction, any one of the following would fix this:");
12850 for (auto &M : Messages) {
12851 Note(M.Loc, M.Message);
12852 }
12853 }
12854}
12855
12856bool ARMAsmParser::enableArchExtFeature(StringRef Name, SMLoc &ExtLoc) {
12857 // FIXME: This structure should be moved inside ARMTargetParser
12858 // when we start to table-generate them, and we can use the ARM
12859 // flags below, that were generated by table-gen.
12860 static const struct {
12861 const uint64_t Kind;
12862 const FeatureBitset ArchCheck;
12863 const FeatureBitset Features;
12864 } Extensions[] = {
12865 {ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12866 {ARM::AEK_AES,
12867 {Feature_HasV8Bit},
12868 {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12870 {Feature_HasV8Bit},
12871 {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12873 {Feature_HasV8Bit},
12874 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12876 {Feature_HasV8_1MMainlineBit},
12877 {ARM::HasMVEFloatOps}},
12878 {ARM::AEK_FP,
12879 {Feature_HasV8Bit},
12880 {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12882 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12883 {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12884 {ARM::AEK_MP,
12885 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12886 {ARM::FeatureMP}},
12888 {Feature_HasV8Bit},
12889 {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12890 {ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12891 // FIXME: Only available in A-class, isel not predicated
12892 {ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12894 {Feature_HasV8_2aBit},
12895 {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12896 {ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12897 {ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12898 {ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12899 // FIXME: Unsupported extensions.
12900 {ARM::AEK_OS, {}, {}},
12901 {ARM::AEK_IWMMXT, {}, {}},
12902 {ARM::AEK_IWMMXT2, {}, {}},
12903 {ARM::AEK_MAVERICK, {}, {}},
12904 {ARM::AEK_XSCALE, {}, {}},
12905 };
12906 bool EnableFeature = !Name.consume_front_insensitive("no");
12908 if (FeatureKind == ARM::AEK_INVALID)
12909 return Error(ExtLoc, "unknown architectural extension: " + Name);
12910
12911 for (const auto &Extension : Extensions) {
12912 if (Extension.Kind != FeatureKind)
12913 continue;
12914
12915 if (Extension.Features.none())
12916 return Error(ExtLoc, "unsupported architectural extension: " + Name);
12917
12918 if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
12919 return Error(ExtLoc, "architectural extension '" + Name +
12920 "' is not "
12921 "allowed for the current base architecture");
12922
12923 MCSubtargetInfo &STI = copySTI();
12924 if (EnableFeature) {
12926 } else {
12928 }
12929 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
12930 setAvailableFeatures(Features);
12931 return true;
12932 }
12933 return false;
12934}
12935
12936/// parseDirectiveArchExtension
12937/// ::= .arch_extension [no]feature
12938bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
12939
12940 MCAsmParser &Parser = getParser();
12941
12942 if (getLexer().isNot(AsmToken::Identifier))
12943 return Error(getLexer().getLoc(), "expected architecture extension name");
12944
12945 StringRef Name = Parser.getTok().getString();
12946 SMLoc ExtLoc = Parser.getTok().getLoc();
12947 Lex();
12948
12949 if (parseEOL())
12950 return true;
12951
12952 if (Name == "nocrypto") {
12953 enableArchExtFeature("nosha2", ExtLoc);
12954 enableArchExtFeature("noaes", ExtLoc);
12955 }
12956
12957 if (enableArchExtFeature(Name, ExtLoc))
12958 return false;
12959
12960 return Error(ExtLoc, "unknown architectural extension: " + Name);
12961}
12962
12963// Define this matcher function after the auto-generated include so we
12964// have the match class enum definitions.
12965unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
12966 unsigned Kind) {
12967 ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
12968 // If the kind is a token for a literal immediate, check if our asm
12969 // operand matches. This is for InstAliases which have a fixed-value
12970 // immediate in the syntax.
12971 switch (Kind) {
12972 default: break;
12973 case MCK__HASH_0:
12974 if (Op.isImm())
12975 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12976 if (CE->getValue() == 0)
12977 return Match_Success;
12978 break;
12979 case MCK__HASH_8:
12980 if (Op.isImm())
12981 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12982 if (CE->getValue() == 8)
12983 return Match_Success;
12984 break;
12985 case MCK__HASH_16:
12986 if (Op.isImm())
12987 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
12988 if (CE->getValue() == 16)
12989 return Match_Success;
12990 break;
12991 case MCK_ModImm:
12992 if (Op.isImm()) {
12993 const MCExpr *SOExpr = Op.getImm();
12994 int64_t Value;
12995 if (!SOExpr->evaluateAsAbsolute(Value))
12996 return Match_Success;
12997 assert((Value >= std::numeric_limits<int32_t>::min() &&
12998 Value <= std::numeric_limits<uint32_t>::max()) &&
12999 "expression value must be representable in 32 bits");
13000 }
13001 break;
13002 case MCK_rGPR:
13003 if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
13004 return Match_Success;
13005 return Match_rGPR;
13006 // Note: This mutates the operand which could cause issues for future
13007 // matches if this one fails later.
13008 // It would be better to do this in addVecList but as this doesn't have access
13009 // to MRI this isn't possible.
13010 // If trying to match a VecListDPair with a Q register, convert Q to list.
13011 case MCK_VecListDPair:
13012 if (Op.isQReg() && !hasMVE()) {
13013 auto DPair = getDRegFromQReg(Op.getReg());
13014 DPair = MRI->getMatchingSuperReg(
13015 DPair, ARM::dsub_0, &ARMMCRegisterClasses[ARM::DPairRegClassID]);
13016 Op.setVecListDPair(DPair);
13017 return Match_Success;
13018 }
13019 return Match_InvalidOperand;
13020 // Note: This mutates the operand (see above).
13021 // If trying to match a VecListDPair with a D register, convert D singleton
13022 // list.
13023 case MCK_VecListOneD:
13024 if (Op.isDReg() && !hasMVE()) {
13025 Op.setVecListOneD(Op.getReg());
13026 return Match_Success;
13027 }
13028 return Match_InvalidOperand;
13029 }
13030 return Match_InvalidOperand;
13031}
13032
13033bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
13034 StringRef ExtraToken) {
13035 if (!hasMVE())
13036 return false;
13037
13038 if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
13039 (Mnemonic.starts_with("vldrh") && Mnemonic != "vldrhi") ||
13040 (Mnemonic.starts_with("vmov") &&
13041 !(ExtraToken == ".f16" || ExtraToken == ".32" || ExtraToken == ".16" ||
13042 ExtraToken == ".8")) ||
13043 (Mnemonic.starts_with("vrint") && Mnemonic != "vrintr") ||
13044 (Mnemonic.starts_with("vstrh") && Mnemonic != "vstrhi"))
13045 return true;
13046
13047 const char *predicable_prefixes[] = {
13048 "vabav", "vabd", "vabs", "vadc", "vadd",
13049 "vaddlv", "vaddv", "vand", "vbic", "vbrsr",
13050 "vcadd", "vcls", "vclz", "vcmla", "vcmp",
13051 "vcmul", "vctp", "vcvt", "vddup", "vdup",
13052 "vdwdup", "veor", "vfma", "vfmas", "vfms",
13053 "vhadd", "vhcadd", "vhsub", "vidup", "viwdup",
13054 "vldrb", "vldrd", "vldrw", "vmax", "vmaxa",
13055 "vmaxav", "vmaxnm", "vmaxnma", "vmaxnmav", "vmaxnmv",
13056 "vmaxv", "vmin", "vminav", "vminnm", "vminnmav",
13057 "vminnmv", "vminv", "vmla", "vmladav", "vmlaldav",
13058 "vmlalv", "vmlas", "vmlav", "vmlsdav", "vmlsldav",
13059 "vmovlb", "vmovlt", "vmovnb", "vmovnt", "vmul",
13060 "vmvn", "vneg", "vorn", "vorr", "vpnot",
13061 "vpsel", "vqabs", "vqadd", "vqdmladh", "vqdmlah",
13062 "vqdmlash", "vqdmlsdh", "vqdmulh", "vqdmull", "vqmovn",
13063 "vqmovun", "vqneg", "vqrdmladh", "vqrdmlah", "vqrdmlash",
13064 "vqrdmlsdh", "vqrdmulh", "vqrshl", "vqrshrn", "vqrshrun",
13065 "vqshl", "vqshrn", "vqshrun", "vqsub", "vrev16",
13066 "vrev32", "vrev64", "vrhadd", "vrmlaldavh", "vrmlalvh",
13067 "vrmlsldavh", "vrmulh", "vrshl", "vrshr", "vrshrn",
13068 "vsbc", "vshl", "vshlc", "vshll", "vshr",
13069 "vshrn", "vsli", "vsri", "vstrb", "vstrd",
13070 "vstrw", "vsub"};
13071
13072 return std::any_of(
13073 std::begin(predicable_prefixes), std::end(predicable_prefixes),
13074 [&Mnemonic](const char *prefix) { return Mnemonic.starts_with(prefix); });
13075}
13076
13077std::unique_ptr<ARMOperand> ARMAsmParser::defaultCondCodeOp() {
13078 return ARMOperand::CreateCondCode(ARMCC::AL, SMLoc());
13079}
13080
13081std::unique_ptr<ARMOperand> ARMAsmParser::defaultCCOutOp() {
13082 return ARMOperand::CreateCCOut(0, SMLoc());
13083}
13084
13085std::unique_ptr<ARMOperand> ARMAsmParser::defaultVPTPredOp() {
13086 return ARMOperand::CreateVPTPred(ARMVCC::None, SMLoc());
13087}
unsigned const MachineRegisterInfo * MRI
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static unsigned getNextRegister(unsigned Reg)
static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing)
static bool instIsBreakpoint(const MCInst &Inst)
unsigned findCCOutInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo, unsigned Reg, unsigned HiReg, bool &containsReg)
static bool isDataTypeToken(StringRef Tok)
}
static MCRegister MatchRegisterName(StringRef Name)
static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing)
unsigned getRegListInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
static const char * getSubtargetFeatureName(uint64_t Val)
static bool isVectorPredicable(const MCInstrDesc &MCID)
static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp)
MatchCoprocessorOperandName - Try to parse an coprocessor related instruction with a symbolic operand...
static void applyMnemonicAliases(StringRef &Mnemonic, const FeatureBitset &Features, unsigned VariantID)
void removeCCOut(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT)
static bool insertNoDuplicates(SmallVectorImpl< std::pair< unsigned, unsigned > > &Regs, unsigned Enc, unsigned Reg)
static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID)
static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp)
bool operandsContainWide(OperandVector &Operands, unsigned MnemonicOpsEndInd)
static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg)
void removeCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser()
Force static initialization.
static unsigned getMnemonicOpsEndInd(const OperandVector &Operands)
static bool isARMMCExpr(MCParsedAsmOperand &MCOp)
unsigned findCondCodeInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
void removeVPTCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool isThumb(const MCSubtargetInfo &STI)
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Register getFPReg(const CSKYSubtarget &STI)
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:693
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
Definition: Compiler.h:301
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:135
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define check(cond)
static cl::opt< bool > AddBuildAttributes("hexagon-add-build-attributes")
#define op(i)
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
unsigned Reg
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t High
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Pre allocate WWM Registers
static cl::opt< std::set< SPIRV::Extension::Extension >, false, SPIRVExtensionsParser > Extensions("spirv-ext", cl::desc("Specify list of enabled SPIR-V extensions"))
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
This file implements the SmallBitVector class.
This file defines the SmallSet class.
This file defines the SmallVector class.
StringSet - A set-like wrapper for the StringMap.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
Class for arbitrary precision integers.
Definition: APInt.h:76
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1491
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=ARM::NoRegAltName)
VariantKind getKind() const
getOpcode - Get the kind of this expression.
Definition: ARMMCExpr.h:76
static const ARMMCExpr * create(VariantKind Kind, const MCExpr *Expr, MCContext &Ctx)
Definition: ARMMCExpr.cpp:17
Target independent representation for an assembler token.
Definition: MCAsmMacro.h:21
SMLoc getLoc() const
Definition: MCAsmLexer.cpp:26
int64_t getIntVal() const
Definition: MCAsmMacro.h:115
bool isNot(TokenKind K) const
Definition: MCAsmMacro.h:83
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition: MCAsmMacro.h:110
StringRef getStringContents() const
Get the contents of a string token (without quotes).
Definition: MCAsmMacro.h:90
bool is(TokenKind K) const
Definition: MCAsmMacro.h:82
SMLoc getEndLoc() const
Definition: MCAsmLexer.cpp:30
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition: MCAsmMacro.h:99
This class represents an Operation in the Expression.
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Base class for user error types.
Definition: Error.h:352
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Container class for subtarget features.
constexpr bool test(unsigned I) const
size_t count() const
constexpr size_t size() const
Generic assembler lexer interface, for use by target specific assembly lexers.
Definition: MCAsmLexer.h:37
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition: MCAsmLexer.h:111
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Generic assembler parser interface, for use by target specific assembly parsers.
Definition: MCAsmParser.h:123
bool parseToken(AsmToken::TokenKind T, const Twine &Msg="unexpected token")
Definition: MCAsmParser.cpp:63
virtual bool parseEscapedString(std::string &Data)=0
Parse the current token as a string which may include escaped characters and return the string conten...
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual void Note(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a note at the location L, with the message Msg.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
Definition: MCAsmParser.cpp:40
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
bool parseOptionalToken(AsmToken::TokenKind T)
Attempt to parse and consume token, returning true on success.
Definition: MCAsmParser.cpp:80
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual bool Warning(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a warning at the location L, with the message Msg.
bool Error(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)
Return an error at the location L, with the message Msg.
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:536
int64_t getValue() const
Definition: MCExpr.h:173
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:194
Context object for machine code objects.
Definition: MCContext.h:76
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:448
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
@ Constant
Constant expressions.
Definition: MCExpr.h:39
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCRegisterInfo *RegInfo=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
Definition: MCInst.cpp:81
unsigned getNumOperands() const
Definition: MCInst.h:208
void setLoc(SMLoc loc)
Definition: MCInst.h:203
unsigned getOpcode() const
Definition: MCInst.h:198
iterator insert(iterator I, const MCOperand &Op)
Definition: MCInst.h:224
void addOperand(const MCOperand Op)
Definition: MCInst.h:210
iterator begin()
Definition: MCInst.h:219
void setOpcode(unsigned Op)
Definition: MCInst.h:197
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:206
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
bool isIndirectBranch() const
Return true if this is an indirect branch, such as a branch through a register.
Definition: MCInstrDesc.h:311
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
Definition: MCInstrDesc.h:609
bool hasDefOfPhysReg(const MCInst &MI, unsigned Reg, const MCRegisterInfo &RI) const
Return true if this instruction defines the specified physical register, either explicitly or implici...
Definition: MCInstrDesc.cpp:40
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
Definition: MCInstrDesc.h:265
unsigned short NumOperands
Definition: MCInstrDesc.h:206
bool isBranch() const
Returns true if this is a conditional, unconditional, or indirect branch.
Definition: MCInstrDesc.h:307
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
Definition: MCInstrDesc.h:219
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:338
bool isCall() const
Return true if the instruction is a call.
Definition: MCInstrDesc.h:288
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
Definition: MCInstrDesc.h:301
bool isReturn() const
Return true if the instruction is a return.
Definition: MCInstrDesc.h:276
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
static MCOperand createReg(unsigned Reg)
Definition: MCInst.h:134
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:162
int64_t getImm() const
Definition: MCInst.h:80
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:141
bool isImm() const
Definition: MCInst.h:62
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
bool isReg() const
Definition: MCInst.h:61
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual SMLoc getStartLoc() const =0
getStartLoc - Get the location of the first token of this operand.
virtual bool isReg() const =0
isReg - Is this a register operand?
virtual bool isMem() const =0
isMem - Is this a memory operand?
virtual MCRegister getReg() const =0
virtual void print(raw_ostream &OS) const =0
print - Print a debug representation of the operand to the given stream.
virtual bool isToken() const =0
isToken - Is this a token operand?
virtual bool isImm() const =0
isImm - Is this an immediate operand?
virtual SMLoc getEndLoc() const =0
getEndLoc - Get the location of the last token of this operand.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
unsigned getNumRegs() const
getNumRegs - Return the number of registers in this class.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:39
Streaming machine code generation interface.
Definition: MCStreamer.h:212
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
Definition: MCStreamer.cpp:424
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:304
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const FeatureBitset & getFeatureBits() const
FeatureBitset ApplyFeatureFlag(StringRef FS)
Apply a feature flag and return the re-computed feature bits, including all feature bits implied by t...
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:192
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
Definition: MCExpr.h:397
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:40
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual void onLabelParsed(MCSymbol *Symbol)
MCSubtargetInfo & copySTI()
Create a copy of STI and return a non-const reference to it.
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual unsigned checkEarlyTargetMatchPredicate(MCInst &Inst, const OperandVector &Operands)
Validate the instruction match against any complex target predicates before rendering any operands to...
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual void flushPendingInstructions(MCStreamer &Out)
Ensure that all previously parsed instructions have been emitted to the output streamer,...
void setAvailableFeatures(const FeatureBitset &Value)
virtual MCSymbolRefExpr::VariantKind getVariantKindForName(StringRef Name) const
const MCSubtargetInfo & getSTI() const
virtual void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc)
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
ParseInstruction - Parse one assembly instruction.
virtual unsigned checkTargetMatchPredicate(MCInst &Inst)
checkTargetMatchPredicate - Validate the instruction match against any complex target predicates not ...
virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
MatchAndEmitInstruction - Recognize a series of operands of a parsed instruction as an actual MCInst ...
Target specific streamer interface.
Definition: MCStreamer.h:93
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
Represents a location in source code.
Definition: SMLoc.h:23
static SMLoc getFromPointer(const char *Ptr)
Definition: SMLoc.h:36
constexpr const char * getPointer() const
Definition: SMLoc.h:34
Represents a range in source code.
Definition: SMLoc.h:48
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
typename SuperClass::const_iterator const_iterator
Definition: SmallVector.h:591
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition: StringMap.h:128
iterator end()
Definition: StringMap.h:221
iterator find(StringRef Key)
Definition: StringMap.h:234
size_type count(StringRef Key) const
count - Return 1 if the element is in the map, 0 otherwise.
Definition: StringMap.h:277
void erase(iterator I)
Definition: StringMap.h:415
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition: StringMap.h:307
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:567
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:680
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition: StringRef.h:811
std::string lower() const
Definition: StringRef.cpp:111
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:271
static constexpr size_t npos
Definition: StringRef.h:52
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition: StringRef.h:170
StringSet - A wrapper for StringMap that provides set-like functionality.
Definition: StringSet.h:23
std::pair< typename Base::iterator, bool > insert(StringRef key)
Definition: StringSet.h:38
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
LLVM Value Representation.
Definition: Value.h:74
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:185
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:690
This class provides various memory handling functions that manipulate MemoryBlock instances.
Definition: Memory.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const TagNameMap & getARMAttributeTags()
static CondCodes getOppositeCondition(CondCodes CC)
Definition: ARMBaseInfo.h:48
@ ThumbArithFlagSetting
Definition: ARMBaseInfo.h:414
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
unsigned encodeNEONi16splat(unsigned Value)
float getFPImmFloat(unsigned Imm)
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO, unsigned IdxMode=0)
unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset)
getAM5Opc - This function encodes the addrmode5 opc field.
ShiftOpc getSORegShOp(unsigned Op)
bool isNEONi16splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getAM5FP16Opc(AddrOpc Opc, unsigned char Offset)
getAM5FP16Opc - This function encodes the addrmode5fp16 opc field.
unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset, unsigned IdxMode=0)
getAM3Opc - This function encodes the addrmode3 opc field.
bool isNEONi32splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
unsigned encodeNEONi32splat(unsigned Value)
Encode NEON 32 bits Splat immediate for instructions like VBIC/VORR.
const StringRef getShiftOpcStr(ShiftOpc Op)
static const char * IFlagsToString(unsigned val)
Definition: ARMBaseInfo.h:37
bool getFPUFeatures(FPUKind FPUKind, std::vector< StringRef > &Features)
StringRef getArchName(ArchKind AK)
uint64_t parseArchExt(StringRef ArchExt)
ArchKind parseArch(StringRef Arch)
bool isVpred(OperandType op)
FPUKind parseFPU(StringRef FPU)
bool isCDECoproc(size_t Coproc, const MCSubtargetInfo &STI)
@ D16
Only 16 D registers.
constexpr bool any(E Val)
Definition: BitmaskEnum.h:141
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
std::optional< unsigned > attrTypeFromString(StringRef tag, TagNameMap tagNameMap)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Definition: MCInstrDesc.h:148
bool parseAssignmentExpression(StringRef Name, bool allow_redef, MCAsmParser &Parser, MCSymbol *&Symbol, const MCExpr *&Value)
Parse a value expression and return whether it can be assigned to a symbol with the given name.
Definition: AsmParser.cpp:6404
@ CE
Windows NT (Windows on ARM)
Reg
All possible values of the reg field in the ModR/M byte.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:718
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
NodeAddr< FuncNode * > Func
Definition: RDFGraph.h:393
Format
The format used for serializing/deserializing remarks.
Definition: RemarkFormat.h:25
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
static const char * ARMVPTPredToString(ARMVCC::VPTCodes CC)
Definition: ARMBaseInfo.h:130
@ Offset
Definition: DWP.cpp:456
@ Length
Definition: DWP.cpp:456
int popcount(T Value) noexcept
Count the number of set bits in a value.
Definition: bit.h:385
Target & getTheThumbBETarget()
static unsigned ARMCondCodeFromString(StringRef CC)
Definition: ARMBaseInfo.h:167
const ARMInstrTable ARMDescs
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
static bool isARMLowRegister(unsigned Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
Definition: ARMBaseInfo.h:160
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:428
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
Definition: STLExtras.h:1911
bool IsCPSRDead< MCInst >(const MCInst *Instr)
static bool isValidCoprocessorNumber(unsigned Num, const FeatureBitset &featureBits)
isValidCoprocessorNumber - decide whether an explicit coprocessor number is legal in generic instruct...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ MCAF_Code16
.code16 (X86) / .code 16 (ARM)
Definition: MCDirectives.h:56
@ MCAF_Code32
.code32 (X86) / .code 32 (ARM)
Definition: MCDirectives.h:57
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1758
static unsigned ARMVectorCondCodeFromString(StringRef CC)
Definition: ARMBaseInfo.h:139
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
Definition: ARMBaseInfo.h:146
Target & getTheARMLETarget()
Target & getTheARMBETarget()
Target & getTheThumbLETarget()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
const FeatureBitset Features
MCInstrDesc Insts[4445]
MCOperandInfo OperandInfo[3026]
MCPhysReg ImplicitOps[130]
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Holds functions to get, set or test bitfields.
Definition: Bitfields.h:212
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...