71#define DEBUG_TYPE "asm-parser"
78enum class ImplicitItModeTy {
Always,
Never, ARMOnly, ThumbOnly };
81 "arm-implicit-it",
cl::init(ImplicitItModeTy::ARMOnly),
82 cl::desc(
"Allow conditional instructions outside of an IT block"),
84 "Accept in both ISAs, emit implicit ITs in Thumb"),
86 "Warn in ARM, reject in Thumb"),
88 "Accept in ARM, reject in Thumb"),
89 clEnumValN(ImplicitItModeTy::ThumbOnly,
"thumb",
90 "Warn in ARM, emit implicit ITs in Thumb")));
95enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
97static inline unsigned extractITMaskBit(
unsigned Mask,
unsigned Position) {
104 return (Mask >> (5 - Position) & 1);
113 Locs PersonalityLocs;
114 Locs PersonalityIndexLocs;
115 Locs HandlerDataLocs;
121 bool hasFnStart()
const {
return !FnStartLocs.empty(); }
122 bool cantUnwind()
const {
return !CantUnwindLocs.empty(); }
123 bool hasHandlerData()
const {
return !HandlerDataLocs.empty(); }
125 bool hasPersonality()
const {
126 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
129 void recordFnStart(
SMLoc L) { FnStartLocs.push_back(L); }
130 void recordCantUnwind(
SMLoc L) { CantUnwindLocs.push_back(L); }
131 void recordPersonality(
SMLoc L) { PersonalityLocs.push_back(L); }
132 void recordHandlerData(
SMLoc L) { HandlerDataLocs.push_back(L); }
133 void recordPersonalityIndex(
SMLoc L) { PersonalityIndexLocs.push_back(L); }
135 void saveFPReg(
MCRegister Reg) { FPReg = Reg; }
138 void emitFnStartLocNotes()
const {
139 for (
const SMLoc &Loc : FnStartLocs)
140 Parser.
Note(Loc,
".fnstart was specified here");
143 void emitCantUnwindLocNotes()
const {
144 for (
const SMLoc &Loc : CantUnwindLocs)
145 Parser.
Note(Loc,
".cantunwind was specified here");
148 void emitHandlerDataLocNotes()
const {
149 for (
const SMLoc &Loc : HandlerDataLocs)
150 Parser.
Note(Loc,
".handlerdata was specified here");
153 void emitPersonalityLocNotes()
const {
155 PE = PersonalityLocs.end(),
156 PII = PersonalityIndexLocs.begin(),
157 PIE = PersonalityIndexLocs.end();
158 PI != PE || PII != PIE;) {
159 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
160 Parser.
Note(*PI++,
".personality was specified here");
161 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
162 Parser.
Note(*PII++,
".personalityindex was specified here");
165 "at the same location");
170 FnStartLocs = Locs();
171 CantUnwindLocs = Locs();
172 PersonalityLocs = Locs();
173 HandlerDataLocs = Locs();
174 PersonalityIndexLocs = Locs();
180class ARMMnemonicSets {
191 return CDE.
count(Mnemonic);
196 bool isVPTPredicableCDEInstr(
StringRef Mnemonic) {
199 return CDEWithVPTSuffix.
count(Mnemonic);
204 bool isITPredicableCDEInstr(
StringRef Mnemonic) {
214 bool isCDEDualRegInstr(
StringRef Mnemonic) {
217 return Mnemonic ==
"cx1d" || Mnemonic ==
"cx1da" ||
218 Mnemonic ==
"cx2d" || Mnemonic ==
"cx2da" ||
219 Mnemonic ==
"cx3d" || Mnemonic ==
"cx3da";
224 for (
StringRef Mnemonic: {
"cx1",
"cx1a",
"cx1d",
"cx1da",
225 "cx2",
"cx2a",
"cx2d",
"cx2da",
226 "cx3",
"cx3a",
"cx3d",
"cx3da", })
227 CDE.insert(Mnemonic);
229 {
"vcx1",
"vcx1a",
"vcx2",
"vcx2a",
"vcx3",
"vcx3a"}) {
230 CDE.insert(Mnemonic);
231 CDEWithVPTSuffix.insert(Mnemonic);
232 CDEWithVPTSuffix.insert(std::string(Mnemonic) +
"t");
233 CDEWithVPTSuffix.insert(std::string(Mnemonic) +
"e");
244 "do not have a target streamer");
252 bool NextSymbolIsThumb;
254 bool useImplicitITThumb()
const {
255 return ImplicitItMode == ImplicitItModeTy::Always ||
256 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
259 bool useImplicitITARM()
const {
260 return ImplicitItMode == ImplicitItModeTy::Always ||
261 ImplicitItMode == ImplicitItModeTy::ARMOnly;
276 unsigned CurPosition;
292 if (!inImplicitITBlock()) {
306 for (
const MCInst &Inst : PendingConditionalInsts) {
309 PendingConditionalInsts.clear();
313 ITState.CurPosition = ~0U;
316 bool inITBlock() {
return ITState.CurPosition != ~0U; }
317 bool inExplicitITBlock() {
return inITBlock() && ITState.IsExplicit; }
318 bool inImplicitITBlock() {
return inITBlock() && !ITState.IsExplicit; }
320 bool lastInITBlock() {
324 void forwardITPosition() {
325 if (!inITBlock())
return;
330 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
331 ITState.CurPosition = ~0U;
335 void rewindImplicitITPosition() {
336 assert(inImplicitITBlock());
337 assert(ITState.CurPosition > 1);
338 ITState.CurPosition--;
340 unsigned NewMask = 0;
341 NewMask |= ITState.Mask & (0xC << TZ);
342 NewMask |= 0x2 << TZ;
343 ITState.Mask = NewMask;
348 void discardImplicitITBlock() {
349 assert(inImplicitITBlock());
350 assert(ITState.CurPosition == 1);
351 ITState.CurPosition = ~0U;
356 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
362 void invertCurrentITCondition() {
363 if (ITState.CurPosition == 1) {
366 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
371 bool isITBlockFull() {
372 return inITBlock() && (ITState.Mask & 1);
378 assert(inImplicitITBlock());
383 unsigned NewMask = 0;
385 NewMask |= ITState.Mask & (0xE << TZ);
387 NewMask |= (
Cond != ITState.Cond) << TZ;
389 NewMask |= 1 << (TZ - 1);
390 ITState.Mask = NewMask;
394 void startImplicitITBlock() {
398 ITState.CurPosition = 1;
399 ITState.IsExplicit =
false;
410 ITState.CurPosition = 0;
411 ITState.IsExplicit =
true;
416 unsigned CurPosition;
418 bool inVPTBlock() {
return VPTState.CurPosition != ~0U; }
419 void forwardVPTPosition() {
420 if (!inVPTBlock())
return;
422 if (++VPTState.CurPosition == 5 - TZ)
423 VPTState.CurPosition = ~0U;
439 unsigned MnemonicOpsEndInd,
unsigned ListIndex,
440 bool IsARPop =
false);
442 unsigned MnemonicOpsEndInd,
unsigned ListIndex);
447 std::optional<ARM_AM::ShiftOpc> tryParseShiftToken();
448 bool parseRegisterList(
OperandVector &,
bool EnforceOrder =
true,
449 bool AllowRAAC =
false,
bool IsLazyLoadStore =
false,
450 bool IsVSCCLRM =
false);
453 bool parseImmExpr(int64_t &Out);
456 unsigned &ShiftAmount);
457 bool parseLiteralValues(
unsigned Size,
SMLoc L);
458 bool parseDirectiveThumb(
SMLoc L);
459 bool parseDirectiveARM(
SMLoc L);
460 bool parseDirectiveThumbFunc(
SMLoc L);
461 bool parseDirectiveCode(
SMLoc L);
462 bool parseDirectiveSyntax(
SMLoc L);
464 bool parseDirectiveUnreq(
SMLoc L);
465 bool parseDirectiveArch(
SMLoc L);
466 bool parseDirectiveEabiAttr(
SMLoc L);
467 bool parseDirectiveCPU(
SMLoc L);
468 bool parseDirectiveFPU(
SMLoc L);
469 bool parseDirectiveFnStart(
SMLoc L);
470 bool parseDirectiveFnEnd(
SMLoc L);
471 bool parseDirectiveCantUnwind(
SMLoc L);
472 bool parseDirectivePersonality(
SMLoc L);
473 bool parseDirectiveHandlerData(
SMLoc L);
474 bool parseDirectiveSetFP(
SMLoc L);
475 bool parseDirectivePad(
SMLoc L);
476 bool parseDirectiveRegSave(
SMLoc L,
bool IsVector);
477 bool parseDirectiveInst(
SMLoc L,
char Suffix =
'\0');
478 bool parseDirectiveLtorg(
SMLoc L);
479 bool parseDirectiveEven(
SMLoc L);
480 bool parseDirectivePersonalityIndex(
SMLoc L);
481 bool parseDirectiveUnwindRaw(
SMLoc L);
482 bool parseDirectiveTLSDescSeq(
SMLoc L);
483 bool parseDirectiveMovSP(
SMLoc L);
484 bool parseDirectiveObjectArch(
SMLoc L);
485 bool parseDirectiveArchExtension(
SMLoc L);
486 bool parseDirectiveAlign(
SMLoc L);
487 bool parseDirectiveThumbSet(
SMLoc L);
489 bool parseDirectiveSEHAllocStack(
SMLoc L,
bool Wide);
490 bool parseDirectiveSEHSaveRegs(
SMLoc L,
bool Wide);
491 bool parseDirectiveSEHSaveSP(
SMLoc L);
492 bool parseDirectiveSEHSaveFRegs(
SMLoc L);
493 bool parseDirectiveSEHSaveLR(
SMLoc L);
494 bool parseDirectiveSEHPrologEnd(
SMLoc L,
bool Fragment);
495 bool parseDirectiveSEHNop(
SMLoc L,
bool Wide);
496 bool parseDirectiveSEHEpilogStart(
SMLoc L,
bool Condition);
497 bool parseDirectiveSEHEpilogEnd(
SMLoc L);
498 bool parseDirectiveSEHCustom(
SMLoc L);
500 std::unique_ptr<ARMOperand> defaultCondCodeOp();
501 std::unique_ptr<ARMOperand> defaultCCOutOp();
502 std::unique_ptr<ARMOperand> defaultVPTPredOp();
508 bool &CarrySetting,
unsigned &ProcessorIMod,
511 StringRef FullInst,
bool &CanAcceptCarrySet,
512 bool &CanAcceptPredicationCode,
513 bool &CanAcceptVPTPredicationCode);
516 void tryConvertingToTwoOperandForm(
StringRef Mnemonic,
519 unsigned MnemonicOpsEndInd);
522 unsigned MnemonicOpsEndInd);
529 bool isThumbOne()
const {
533 bool isThumbTwo()
const {
537 bool hasThumb()
const {
541 bool hasThumb2()
const {
545 bool hasV6Ops()
const {
549 bool hasV6T2Ops()
const {
553 bool hasV6MOps()
const {
557 bool hasV7Ops()
const {
561 bool hasV8Ops()
const {
565 bool hasV8MBaseline()
const {
569 bool hasV8MMainline()
const {
572 bool hasV8_1MMainline()
const {
575 bool hasMVEFloat()
const {
578 bool hasCDE()
const {
581 bool has8MSecExt()
const {
585 bool hasARM()
const {
589 bool hasDSP()
const {
593 bool hasD32()
const {
597 bool hasV8_1aOps()
const {
601 bool hasRAS()
const {
607 auto FB = ComputeAvailableFeatures(STI.
ToggleFeature(ARM::ModeThumb));
611 void FixModeAfterArchChange(
bool WasThumb,
SMLoc Loc);
613 bool isMClass()
const {
620#define GET_ASSEMBLER_HEADER
621#include "ARMGenAsmMatcher.inc"
652 ParseStatus parseVectorLane(VectorLaneTy &LaneKind,
unsigned &Index,
661 unsigned MnemonicOpsEndInd);
664 bool shouldOmitVectorPredicateOperand(
StringRef Mnemonic,
666 unsigned MnemonicOpsEndInd);
667 bool isITBlockTerminator(
MCInst &Inst)
const;
670 unsigned MnemonicOpsEndInd);
672 bool ARMMode,
bool Writeback,
673 unsigned MnemonicOpsEndInd);
676 enum ARMMatchResultTy {
678 Match_RequiresNotITBlock,
680 Match_RequiresThumb2,
682 Match_RequiresFlagSetting,
683#define GET_OPERAND_DIAGNOSTIC_TYPES
684#include "ARMGenAsmMatcher.inc"
701 getTargetStreamer().emitTargetAttributes(STI);
704 ITState.CurPosition = ~0
U;
706 VPTState.CurPosition = ~0
U;
708 NextSymbolIsThumb =
false;
714 SMLoc &EndLoc)
override;
720 unsigned Kind)
override;
729 bool MatchingInlineAsm)
override;
732 bool MatchingInlineAsm,
bool &EmitInITBlock,
735 struct NearMissMessage {
740 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
755 const MCInstrDesc &getInstrDesc(
unsigned int Opcode)
const {
756 return MII.get(Opcode);
763 return MRI->getSubReg(QReg, ARM::dsub_0);
782 k_InstSyncBarrierOpt,
783 k_TraceSyncBarrierOpt,
792 k_RegisterListWithAPSR,
795 k_FPSRegisterListWithVPR,
796 k_FPDRegisterListWithVPR,
798 k_VectorListAllLanes,
805 k_ConstantPoolImmediate,
806 k_BitfieldDescriptor,
810 SMLoc StartLoc, EndLoc, AlignmentLoc;
813 ARMAsmParser *Parser;
827 struct CoprocOptionOp {
869 struct VectorListOp {
876 struct VectorIndexOp {
895 unsigned isNegative : 1;
898 struct PostIdxRegOp {
905 struct ShifterImmOp {
910 struct RegShiftedRegOp {
917 struct RegShiftedImmOp {
941 struct CoprocOptionOp CoprocOption;
942 struct MBOptOp MBOpt;
943 struct ISBOptOp ISBOpt;
944 struct TSBOptOp TSBOpt;
945 struct ITMaskOp ITMask;
947 struct MMaskOp MMask;
948 struct BankedRegOp BankedReg;
951 struct VectorListOp VectorList;
952 struct VectorIndexOp VectorIndex;
955 struct PostIdxRegOp PostIdxReg;
956 struct ShifterImmOp ShifterImm;
957 struct RegShiftedRegOp RegShiftedReg;
958 struct RegShiftedImmOp RegShiftedImm;
959 struct RotImmOp RotImm;
960 struct ModImmOp ModImm;
965 ARMOperand(KindTy K, ARMAsmParser &Parser) :
Kind(
K), Parser(&Parser) {}
978 SMLoc getAlignmentLoc()
const {
979 assert(Kind == k_Memory &&
"Invalid access!");
984 assert(Kind == k_CondCode &&
"Invalid access!");
989 assert(isVPTPred() &&
"Invalid access!");
993 unsigned getCoproc()
const {
994 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) &&
"Invalid access!");
999 assert(Kind == k_Token &&
"Invalid access!");
1004 assert((Kind == k_Register || Kind == k_CCOut) &&
"Invalid access!");
1009 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
1010 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
1011 Kind == k_FPSRegisterListWithVPR ||
1012 Kind == k_FPDRegisterListWithVPR) &&
1017 const MCExpr *getImm()
const {
1022 const MCExpr *getConstantPoolImm()
const {
1023 assert(isConstantPoolImm() &&
"Invalid access!");
1027 unsigned getVectorIndex()
const {
1028 assert(Kind == k_VectorIndex &&
"Invalid access!");
1029 return VectorIndex.Val;
1033 assert(Kind == k_MemBarrierOpt &&
"Invalid access!");
1038 assert(Kind == k_InstSyncBarrierOpt &&
"Invalid access!");
1043 assert(Kind == k_TraceSyncBarrierOpt &&
"Invalid access!");
1048 assert(Kind == k_ProcIFlags &&
"Invalid access!");
1052 unsigned getMSRMask()
const {
1053 assert(Kind == k_MSRMask &&
"Invalid access!");
1057 unsigned getBankedReg()
const {
1058 assert(Kind == k_BankedReg &&
"Invalid access!");
1059 return BankedReg.Val;
1062 bool isCoprocNum()
const {
return Kind == k_CoprocNum; }
1063 bool isCoprocReg()
const {
return Kind == k_CoprocReg; }
1064 bool isCoprocOption()
const {
return Kind == k_CoprocOption; }
1065 bool isCondCode()
const {
return Kind == k_CondCode; }
1066 bool isVPTPred()
const {
return Kind == k_VPTPred; }
1067 bool isCCOut()
const {
return Kind == k_CCOut; }
1068 bool isITMask()
const {
return Kind == k_ITCondMask; }
1069 bool isITCondCode()
const {
return Kind == k_CondCode; }
1070 bool isImm()
const override {
1071 return Kind == k_Immediate;
1074 bool isARMBranchTarget()
const {
1075 if (!
isImm())
return false;
1077 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1078 return CE->getValue() % 4 == 0;
1083 bool isThumbBranchTarget()
const {
1084 if (!
isImm())
return false;
1086 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1087 return CE->getValue() % 2 == 0;
1093 template<
unsigned w
idth,
unsigned scale>
1094 bool isUnsignedOffset()
const {
1095 if (!
isImm())
return false;
1096 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1098 int64_t Val =
CE->getValue();
1100 int64_t
Max =
Align * ((1LL << width) - 1);
1101 return ((Val %
Align) == 0) && (Val >= 0) && (Val <= Max);
1108 template<
unsigned w
idth,
unsigned scale>
1109 bool isSignedOffset()
const {
1110 if (!
isImm())
return false;
1111 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1113 int64_t Val =
CE->getValue();
1115 int64_t
Max =
Align * ((1LL << (width-1)) - 1);
1116 int64_t Min = -
Align * (1LL << (width-1));
1117 return ((Val %
Align) == 0) && (Val >= Min) && (Val <= Max);
1124 bool isLEOffset()
const {
1125 if (!
isImm())
return false;
1126 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1128 int64_t Val =
CE->getValue();
1129 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1138 bool isThumbMemPC()
const {
1141 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1143 if (!CE)
return false;
1144 Val =
CE->getValue();
1146 else if (isGPRMem()) {
1147 if(!
Memory.OffsetImm ||
Memory.OffsetRegNum)
return false;
1148 if(
Memory.BaseRegNum != ARM::PC)
return false;
1149 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
1150 Val =
CE->getValue();
1155 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1158 bool isFPImm()
const {
1159 if (!
isImm())
return false;
1161 if (!CE || !isUInt<32>(
CE->getValue()))
1167 template<
int64_t N,
int64_t M>
1168 bool isImmediate()
const {
1169 if (!
isImm())
return false;
1171 if (!CE)
return false;
1172 int64_t
Value =
CE->getValue();
1176 template<
int64_t N,
int64_t M>
1177 bool isImmediateS4()
const {
1178 if (!
isImm())
return false;
1180 if (!CE)
return false;
1181 int64_t
Value =
CE->getValue();
1184 template<
int64_t N,
int64_t M>
1185 bool isImmediateS2()
const {
1186 if (!
isImm())
return false;
1188 if (!CE)
return false;
1189 int64_t
Value =
CE->getValue();
1192 bool isFBits16()
const {
1193 return isImmediate<0, 17>();
1195 bool isFBits32()
const {
1196 return isImmediate<1, 33>();
1198 bool isImm8s4()
const {
1199 return isImmediateS4<-1020, 1020>();
1201 bool isImm7s4()
const {
1202 return isImmediateS4<-508, 508>();
1204 bool isImm7Shift0()
const {
1205 return isImmediate<-127, 127>();
1207 bool isImm7Shift1()
const {
1208 return isImmediateS2<-255, 255>();
1210 bool isImm7Shift2()
const {
1211 return isImmediateS4<-511, 511>();
1213 bool isImm7()
const {
1214 return isImmediate<-127, 127>();
1216 bool isImm0_1020s4()
const {
1217 return isImmediateS4<0, 1020>();
1219 bool isImm0_508s4()
const {
1220 return isImmediateS4<0, 508>();
1222 bool isImm0_508s4Neg()
const {
1223 if (!
isImm())
return false;
1225 if (!CE)
return false;
1226 int64_t
Value = -
CE->getValue();
1231 bool isImm0_4095Neg()
const {
1232 if (!
isImm())
return false;
1234 if (!CE)
return false;
1239 if ((
CE->getValue() >> 32) > 0)
return false;
1244 bool isImm0_7()
const {
1245 return isImmediate<0, 7>();
1248 bool isImm1_16()
const {
1249 return isImmediate<1, 16>();
1252 bool isImm1_32()
const {
1253 return isImmediate<1, 32>();
1256 bool isImm8_255()
const {
1257 return isImmediate<8, 255>();
1260 bool isImm0_255Expr()
const {
1268 int64_t
Value =
CE->getValue();
1269 return isUInt<8>(
Value);
1272 bool isImm256_65535Expr()
const {
1273 if (!
isImm())
return false;
1277 if (!CE)
return true;
1278 int64_t
Value =
CE->getValue();
1282 bool isImm0_65535Expr()
const {
1283 if (!
isImm())
return false;
1287 if (!CE)
return true;
1288 int64_t
Value =
CE->getValue();
1292 bool isImm24bit()
const {
1293 return isImmediate<0, 0xffffff + 1>();
1296 bool isImmThumbSR()
const {
1297 return isImmediate<1, 33>();
1300 bool isPKHLSLImm()
const {
1301 return isImmediate<0, 32>();
1304 bool isPKHASRImm()
const {
1305 return isImmediate<0, 33>();
1308 bool isAdrLabel()
const {
1311 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1315 if (!
isImm())
return false;
1317 if (!CE)
return false;
1318 int64_t
Value =
CE->getValue();
1323 bool isT2SOImm()
const {
1326 if (
isImm() && !isa<MCConstantExpr>(getImm())) {
1329 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1333 if (!
isImm())
return false;
1335 if (!CE)
return false;
1336 int64_t
Value =
CE->getValue();
1340 bool isT2SOImmNot()
const {
1341 if (!
isImm())
return false;
1343 if (!CE)
return false;
1344 int64_t
Value =
CE->getValue();
1349 bool isT2SOImmNeg()
const {
1350 if (!
isImm())
return false;
1352 if (!CE)
return false;
1353 int64_t
Value =
CE->getValue();
1359 bool isSetEndImm()
const {
1360 if (!
isImm())
return false;
1362 if (!CE)
return false;
1363 int64_t
Value =
CE->getValue();
1367 bool isReg()
const override {
return Kind == k_Register; }
1368 bool isRegList()
const {
return Kind == k_RegisterList; }
1369 bool isRegListWithAPSR()
const {
1370 return Kind == k_RegisterListWithAPSR ||
Kind == k_RegisterList;
1372 bool isDReg()
const {
1374 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
Reg.RegNum);
1376 bool isQReg()
const {
1378 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
Reg.RegNum);
1380 bool isDPRRegList()
const {
return Kind == k_DPRRegisterList; }
1381 bool isSPRRegList()
const {
return Kind == k_SPRRegisterList; }
1382 bool isFPSRegListWithVPR()
const {
return Kind == k_FPSRegisterListWithVPR; }
1383 bool isFPDRegListWithVPR()
const {
return Kind == k_FPDRegisterListWithVPR; }
1384 bool isToken()
const override {
return Kind == k_Token; }
1385 bool isMemBarrierOpt()
const {
return Kind == k_MemBarrierOpt; }
1386 bool isInstSyncBarrierOpt()
const {
return Kind == k_InstSyncBarrierOpt; }
1387 bool isTraceSyncBarrierOpt()
const {
return Kind == k_TraceSyncBarrierOpt; }
1388 bool isMem()
const override {
1389 return isGPRMem() || isMVEMem();
1391 bool isMVEMem()
const {
1392 if (Kind != k_Memory)
1395 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.BaseRegNum) &&
1396 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
Memory.BaseRegNum))
1398 if (
Memory.OffsetRegNum &&
1399 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1404 bool isGPRMem()
const {
1405 if (Kind != k_Memory)
1408 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.BaseRegNum))
1410 if (
Memory.OffsetRegNum &&
1411 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.OffsetRegNum))
1415 bool isShifterImm()
const {
return Kind == k_ShifterImmediate; }
1416 bool isRegShiftedReg()
const {
1417 return Kind == k_ShiftedRegister &&
1418 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1419 RegShiftedReg.SrcReg) &&
1420 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1421 RegShiftedReg.ShiftReg);
1423 bool isRegShiftedImm()
const {
1424 return Kind == k_ShiftedImmediate &&
1425 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1426 RegShiftedImm.SrcReg);
1428 bool isRotImm()
const {
return Kind == k_RotateImmediate; }
1430 template<
unsigned Min,
unsigned Max>
1431 bool isPowerTwoInRange()
const {
1432 if (!
isImm())
return false;
1434 if (!CE)
return false;
1435 int64_t
Value =
CE->getValue();
1439 bool isModImm()
const {
return Kind == k_ModifiedImmediate; }
1441 bool isModImmNot()
const {
1442 if (!
isImm())
return false;
1444 if (!CE)
return false;
1445 int64_t
Value =
CE->getValue();
1449 bool isModImmNeg()
const {
1450 if (!
isImm())
return false;
1452 if (!CE)
return false;
1453 int64_t
Value =
CE->getValue();
1458 bool isThumbModImmNeg1_7()
const {
1459 if (!
isImm())
return false;
1461 if (!CE)
return false;
1462 int32_t
Value = -(int32_t)
CE->getValue();
1466 bool isThumbModImmNeg8_255()
const {
1467 if (!
isImm())
return false;
1469 if (!CE)
return false;
1470 int32_t
Value = -(int32_t)
CE->getValue();
1474 bool isConstantPoolImm()
const {
return Kind == k_ConstantPoolImmediate; }
1475 bool isBitfield()
const {
return Kind == k_BitfieldDescriptor; }
1476 bool isPostIdxRegShifted()
const {
1477 return Kind == k_PostIndexRegister &&
1478 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1480 bool isPostIdxReg()
const {
1483 bool isMemNoOffset(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1487 return !
Memory.OffsetRegNum &&
Memory.OffsetImm ==
nullptr &&
1488 (alignOK ||
Memory.Alignment == Alignment);
1490 bool isMemNoOffsetT2(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1494 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1499 return !
Memory.OffsetRegNum &&
Memory.OffsetImm ==
nullptr &&
1500 (alignOK ||
Memory.Alignment == Alignment);
1502 bool isMemNoOffsetT2NoSp(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1506 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].
contains(
1511 return !
Memory.OffsetRegNum &&
Memory.OffsetImm ==
nullptr &&
1512 (alignOK ||
Memory.Alignment == Alignment);
1514 bool isMemNoOffsetT(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1518 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].
contains(
1523 return !
Memory.OffsetRegNum &&
Memory.OffsetImm ==
nullptr &&
1524 (alignOK ||
Memory.Alignment == Alignment);
1526 bool isMemPCRelImm12()
const {
1527 if (!isGPRMem() ||
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1530 if (
Memory.BaseRegNum != ARM::PC)
1533 if (!
Memory.OffsetImm)
return true;
1534 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1535 int64_t Val =
CE->getValue();
1536 return (Val > -4096 && Val < 4096) ||
1537 (Val == std::numeric_limits<int32_t>::min());
1542 bool isAlignedMemory()
const {
1543 return isMemNoOffset(
true);
1546 bool isAlignedMemoryNone()
const {
1547 return isMemNoOffset(
false, 0);
1550 bool isDupAlignedMemoryNone()
const {
1551 return isMemNoOffset(
false, 0);
1554 bool isAlignedMemory16()
const {
1555 if (isMemNoOffset(
false, 2))
1557 return isMemNoOffset(
false, 0);
1560 bool isDupAlignedMemory16()
const {
1561 if (isMemNoOffset(
false, 2))
1563 return isMemNoOffset(
false, 0);
1566 bool isAlignedMemory32()
const {
1567 if (isMemNoOffset(
false, 4))
1569 return isMemNoOffset(
false, 0);
1572 bool isDupAlignedMemory32()
const {
1573 if (isMemNoOffset(
false, 4))
1575 return isMemNoOffset(
false, 0);
1578 bool isAlignedMemory64()
const {
1579 if (isMemNoOffset(
false, 8))
1581 return isMemNoOffset(
false, 0);
1584 bool isDupAlignedMemory64()
const {
1585 if (isMemNoOffset(
false, 8))
1587 return isMemNoOffset(
false, 0);
1590 bool isAlignedMemory64or128()
const {
1591 if (isMemNoOffset(
false, 8))
1593 if (isMemNoOffset(
false, 16))
1595 return isMemNoOffset(
false, 0);
1598 bool isDupAlignedMemory64or128()
const {
1599 if (isMemNoOffset(
false, 8))
1601 if (isMemNoOffset(
false, 16))
1603 return isMemNoOffset(
false, 0);
1606 bool isAlignedMemory64or128or256()
const {
1607 if (isMemNoOffset(
false, 8))
1609 if (isMemNoOffset(
false, 16))
1611 if (isMemNoOffset(
false, 32))
1613 return isMemNoOffset(
false, 0);
1616 bool isAddrMode2()
const {
1617 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1619 if (
Memory.OffsetRegNum)
return true;
1621 if (!
Memory.OffsetImm)
return true;
1622 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1623 int64_t Val =
CE->getValue();
1624 return Val > -4096 && Val < 4096;
1629 bool isAM2OffsetImm()
const {
1630 if (!
isImm())
return false;
1633 if (!CE)
return false;
1634 int64_t Val =
CE->getValue();
1635 return (Val == std::numeric_limits<int32_t>::min()) ||
1636 (Val > -4096 && Val < 4096);
1639 bool isAddrMode3()
const {
1643 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1645 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1649 if (
Memory.OffsetRegNum)
return true;
1651 if (!
Memory.OffsetImm)
return true;
1652 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1653 int64_t Val =
CE->getValue();
1656 return (Val > -256 && Val < 256) ||
1657 Val == std::numeric_limits<int32_t>::min();
1662 bool isAM3Offset()
const {
1669 if (!CE)
return false;
1670 int64_t Val =
CE->getValue();
1672 return (Val > -256 && Val < 256) ||
1673 Val == std::numeric_limits<int32_t>::min();
1676 bool isAddrMode5()
const {
1680 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1682 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1684 if (
Memory.OffsetRegNum)
return false;
1686 if (!
Memory.OffsetImm)
return true;
1687 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1688 int64_t Val =
CE->getValue();
1689 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1690 Val == std::numeric_limits<int32_t>::min();
1695 bool isAddrMode5FP16()
const {
1699 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1701 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1703 if (
Memory.OffsetRegNum)
return false;
1705 if (!
Memory.OffsetImm)
return true;
1706 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1707 int64_t Val =
CE->getValue();
1708 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1709 Val == std::numeric_limits<int32_t>::min();
1714 bool isMemTBB()
const {
1715 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1721 bool isMemTBH()
const {
1722 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1729 bool isMemRegOffset()
const {
1730 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1735 bool isT2MemRegOffset()
const {
1736 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1747 bool isMemThumbRR()
const {
1750 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1757 bool isMemThumbRIs4()
const {
1758 if (!isGPRMem() ||
Memory.OffsetRegNum ||
1762 if (!
Memory.OffsetImm)
return true;
1763 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1764 int64_t Val =
CE->getValue();
1765 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1770 bool isMemThumbRIs2()
const {
1771 if (!isGPRMem() ||
Memory.OffsetRegNum ||
1775 if (!
Memory.OffsetImm)
return true;
1776 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1777 int64_t Val =
CE->getValue();
1778 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1783 bool isMemThumbRIs1()
const {
1784 if (!isGPRMem() ||
Memory.OffsetRegNum ||
1788 if (!
Memory.OffsetImm)
return true;
1789 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1790 int64_t Val =
CE->getValue();
1791 return Val >= 0 && Val <= 31;
1796 bool isMemThumbSPI()
const {
1797 if (!isGPRMem() ||
Memory.OffsetRegNum ||
Memory.BaseRegNum != ARM::SP ||
1801 if (!
Memory.OffsetImm)
return true;
1802 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1803 int64_t Val =
CE->getValue();
1804 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1809 bool isMemImm8s4Offset()
const {
1813 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1815 if (!isGPRMem() ||
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1818 if (!
Memory.OffsetImm)
return true;
1819 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1820 int64_t Val =
CE->getValue();
1822 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1823 Val == std::numeric_limits<int32_t>::min();
1828 bool isMemImm7s4Offset()
const {
1832 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1834 if (!isGPRMem() ||
Memory.OffsetRegNum ||
Memory.Alignment != 0 ||
1835 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1839 if (!
Memory.OffsetImm)
return true;
1840 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1841 int64_t Val =
CE->getValue();
1843 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1848 bool isMemImm0_1020s4Offset()
const {
1849 if (!isGPRMem() ||
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1852 if (!
Memory.OffsetImm)
return true;
1853 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1854 int64_t Val =
CE->getValue();
1855 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1860 bool isMemImm8Offset()
const {
1861 if (!isGPRMem() ||
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1864 if (
Memory.BaseRegNum == ARM::PC)
return false;
1866 if (!
Memory.OffsetImm)
return true;
1867 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1868 int64_t Val =
CE->getValue();
1869 return (Val == std::numeric_limits<int32_t>::min()) ||
1870 (Val > -256 && Val < 256);
1875 template<
unsigned Bits,
unsigned RegClassID>
1876 bool isMemImm7ShiftedOffset()
const {
1877 if (!isGPRMem() ||
Memory.OffsetRegNum ||
Memory.Alignment != 0 ||
1878 !ARMMCRegisterClasses[RegClassID].contains(
Memory.BaseRegNum))
1884 if (!
Memory.OffsetImm)
return true;
1885 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1886 int64_t Val =
CE->getValue();
1890 if (Val == INT32_MIN)
1893 unsigned Divisor = 1U <<
Bits;
1896 if (Val % Divisor != 0)
1901 return (Val >= -127 && Val <= 127);
1906 template <
int shift>
bool isMemRegRQOffset()
const {
1907 if (!isMVEMem() ||
Memory.OffsetImm !=
nullptr ||
Memory.Alignment != 0)
1910 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1913 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1927 template <
int shift>
bool isMemRegQOffset()
const {
1928 if (!isMVEMem() ||
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1931 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1937 static_assert(shift < 56,
1938 "Such that we dont shift by a value higher than 62");
1939 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1940 int64_t Val =
CE->getValue();
1943 if ((Val & ((1U << shift) - 1)) != 0)
1949 int64_t
Range = (1U << (7 + shift)) - 1;
1950 return (Val == INT32_MIN) || (Val > -
Range && Val <
Range);
1955 bool isMemPosImm8Offset()
const {
1956 if (!isGPRMem() ||
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1959 if (!
Memory.OffsetImm)
return true;
1960 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1961 int64_t Val =
CE->getValue();
1962 return Val >= 0 && Val < 256;
1967 bool isMemNegImm8Offset()
const {
1968 if (!isGPRMem() ||
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1971 if (
Memory.BaseRegNum == ARM::PC)
return false;
1973 if (!
Memory.OffsetImm)
return false;
1974 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1975 int64_t Val =
CE->getValue();
1976 return (Val == std::numeric_limits<int32_t>::min()) ||
1977 (Val > -256 && Val < 0);
1982 bool isMemUImm12Offset()
const {
1983 if (!isGPRMem() ||
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1986 if (!
Memory.OffsetImm)
return true;
1987 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1988 int64_t Val =
CE->getValue();
1989 return (Val >= 0 && Val < 4096);
1994 bool isMemImm12Offset()
const {
1999 if (
isImm() && !isa<MCConstantExpr>(getImm()))
2002 if (!isGPRMem() ||
Memory.OffsetRegNum ||
Memory.Alignment != 0)
2005 if (!
Memory.OffsetImm)
return true;
2006 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
2007 int64_t Val =
CE->getValue();
2008 return (Val > -4096 && Val < 4096) ||
2009 (Val == std::numeric_limits<int32_t>::min());
2016 bool isConstPoolAsmImm()
const {
2019 return (isConstantPoolImm());
2022 bool isPostIdxImm8()
const {
2023 if (!
isImm())
return false;
2025 if (!CE)
return false;
2026 int64_t Val =
CE->getValue();
2027 return (Val > -256 && Val < 256) ||
2028 (Val == std::numeric_limits<int32_t>::min());
2031 bool isPostIdxImm8s4()
const {
2032 if (!
isImm())
return false;
2034 if (!CE)
return false;
2035 int64_t Val =
CE->getValue();
2036 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2037 (Val == std::numeric_limits<int32_t>::min());
2040 bool isMSRMask()
const {
return Kind == k_MSRMask; }
2041 bool isBankedReg()
const {
return Kind == k_BankedReg; }
2042 bool isProcIFlags()
const {
return Kind == k_ProcIFlags; }
2045 bool isAnyVectorList()
const {
2046 return Kind == k_VectorList ||
Kind == k_VectorListAllLanes ||
2047 Kind == k_VectorListIndexed;
2050 bool isVectorList()
const {
return Kind == k_VectorList; }
2052 bool isSingleSpacedVectorList()
const {
2053 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2056 bool isDoubleSpacedVectorList()
const {
2057 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2060 bool isVecListOneD()
const {
2062 if (isDReg() && !Parser->hasMVE())
2064 if (!isSingleSpacedVectorList())
return false;
2065 return VectorList.Count == 1;
2068 bool isVecListTwoMQ()
const {
2069 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2070 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2074 bool isVecListDPair()
const {
2077 if (isQReg() && !Parser->hasMVE())
2079 if (!isSingleSpacedVectorList())
return false;
2080 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2084 bool isVecListThreeD()
const {
2085 if (!isSingleSpacedVectorList())
return false;
2086 return VectorList.Count == 3;
2089 bool isVecListFourD()
const {
2090 if (!isSingleSpacedVectorList())
return false;
2091 return VectorList.Count == 4;
2094 bool isVecListDPairSpaced()
const {
2095 if (Kind != k_VectorList)
return false;
2096 if (isSingleSpacedVectorList())
return false;
2097 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2101 bool isVecListThreeQ()
const {
2102 if (!isDoubleSpacedVectorList())
return false;
2103 return VectorList.Count == 3;
2106 bool isVecListFourQ()
const {
2107 if (!isDoubleSpacedVectorList())
return false;
2108 return VectorList.Count == 4;
2111 bool isVecListFourMQ()
const {
2112 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2113 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2117 bool isSingleSpacedVectorAllLanes()
const {
2118 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2121 bool isDoubleSpacedVectorAllLanes()
const {
2122 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2125 bool isVecListOneDAllLanes()
const {
2126 if (!isSingleSpacedVectorAllLanes())
return false;
2127 return VectorList.Count == 1;
2130 bool isVecListDPairAllLanes()
const {
2131 if (!isSingleSpacedVectorAllLanes())
return false;
2132 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2136 bool isVecListDPairSpacedAllLanes()
const {
2137 if (!isDoubleSpacedVectorAllLanes())
return false;
2138 return VectorList.Count == 2;
2141 bool isVecListThreeDAllLanes()
const {
2142 if (!isSingleSpacedVectorAllLanes())
return false;
2143 return VectorList.Count == 3;
2146 bool isVecListThreeQAllLanes()
const {
2147 if (!isDoubleSpacedVectorAllLanes())
return false;
2148 return VectorList.Count == 3;
2151 bool isVecListFourDAllLanes()
const {
2152 if (!isSingleSpacedVectorAllLanes())
return false;
2153 return VectorList.Count == 4;
2156 bool isVecListFourQAllLanes()
const {
2157 if (!isDoubleSpacedVectorAllLanes())
return false;
2158 return VectorList.Count == 4;
2161 bool isSingleSpacedVectorIndexed()
const {
2162 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2165 bool isDoubleSpacedVectorIndexed()
const {
2166 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2169 bool isVecListOneDByteIndexed()
const {
2170 if (!isSingleSpacedVectorIndexed())
return false;
2171 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2174 bool isVecListOneDHWordIndexed()
const {
2175 if (!isSingleSpacedVectorIndexed())
return false;
2176 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2179 bool isVecListOneDWordIndexed()
const {
2180 if (!isSingleSpacedVectorIndexed())
return false;
2181 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2184 bool isVecListTwoDByteIndexed()
const {
2185 if (!isSingleSpacedVectorIndexed())
return false;
2186 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2189 bool isVecListTwoDHWordIndexed()
const {
2190 if (!isSingleSpacedVectorIndexed())
return false;
2191 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2194 bool isVecListTwoQWordIndexed()
const {
2195 if (!isDoubleSpacedVectorIndexed())
return false;
2196 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2199 bool isVecListTwoQHWordIndexed()
const {
2200 if (!isDoubleSpacedVectorIndexed())
return false;
2201 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2204 bool isVecListTwoDWordIndexed()
const {
2205 if (!isSingleSpacedVectorIndexed())
return false;
2206 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2209 bool isVecListThreeDByteIndexed()
const {
2210 if (!isSingleSpacedVectorIndexed())
return false;
2211 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2214 bool isVecListThreeDHWordIndexed()
const {
2215 if (!isSingleSpacedVectorIndexed())
return false;
2216 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2219 bool isVecListThreeQWordIndexed()
const {
2220 if (!isDoubleSpacedVectorIndexed())
return false;
2221 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2224 bool isVecListThreeQHWordIndexed()
const {
2225 if (!isDoubleSpacedVectorIndexed())
return false;
2226 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2229 bool isVecListThreeDWordIndexed()
const {
2230 if (!isSingleSpacedVectorIndexed())
return false;
2231 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2234 bool isVecListFourDByteIndexed()
const {
2235 if (!isSingleSpacedVectorIndexed())
return false;
2236 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2239 bool isVecListFourDHWordIndexed()
const {
2240 if (!isSingleSpacedVectorIndexed())
return false;
2241 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2244 bool isVecListFourQWordIndexed()
const {
2245 if (!isDoubleSpacedVectorIndexed())
return false;
2246 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2249 bool isVecListFourQHWordIndexed()
const {
2250 if (!isDoubleSpacedVectorIndexed())
return false;
2251 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2254 bool isVecListFourDWordIndexed()
const {
2255 if (!isSingleSpacedVectorIndexed())
return false;
2256 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2259 bool isVectorIndex()
const {
return Kind == k_VectorIndex; }
2261 template <
unsigned NumLanes>
2262 bool isVectorIndexInRange()
const {
2263 if (Kind != k_VectorIndex)
return false;
2264 return VectorIndex.Val < NumLanes;
2267 bool isVectorIndex8()
const {
return isVectorIndexInRange<8>(); }
2268 bool isVectorIndex16()
const {
return isVectorIndexInRange<4>(); }
2269 bool isVectorIndex32()
const {
return isVectorIndexInRange<2>(); }
2270 bool isVectorIndex64()
const {
return isVectorIndexInRange<1>(); }
2272 template<
int PermittedValue,
int OtherPermittedValue>
2273 bool isMVEPairVectorIndex()
const {
2274 if (Kind != k_VectorIndex)
return false;
2275 return VectorIndex.Val == PermittedValue ||
2276 VectorIndex.Val == OtherPermittedValue;
2279 bool isNEONi8splat()
const {
2280 if (!
isImm())
return false;
2283 if (!CE)
return false;
2284 int64_t
Value =
CE->getValue();
2291 if (isNEONByteReplicate(2))
2297 if (!CE)
return false;
2298 unsigned Value =
CE->getValue();
2302 bool isNEONi16splatNot()
const {
2307 if (!CE)
return false;
2308 unsigned Value =
CE->getValue();
2313 if (isNEONByteReplicate(4))
2319 if (!CE)
return false;
2320 unsigned Value =
CE->getValue();
2324 bool isNEONi32splatNot()
const {
2329 if (!CE)
return false;
2330 unsigned Value =
CE->getValue();
2334 static bool isValidNEONi32vmovImm(int64_t
Value) {
2337 return ((
Value & 0xffffffffffffff00) == 0) ||
2338 ((
Value & 0xffffffffffff00ff) == 0) ||
2339 ((
Value & 0xffffffffff00ffff) == 0) ||
2340 ((
Value & 0xffffffff00ffffff) == 0) ||
2341 ((
Value & 0xffffffffffff00ff) == 0xff) ||
2342 ((
Value & 0xffffffffff00ffff) == 0xffff);
2345 bool isNEONReplicate(
unsigned Width,
unsigned NumElems,
bool Inv)
const {
2346 assert((Width == 8 || Width == 16 || Width == 32) &&
2347 "Invalid element width");
2348 assert(NumElems * Width <= 64 &&
"Invalid result width");
2356 int64_t
Value =
CE->getValue();
2364 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2366 if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2369 for (
unsigned i = 1; i < NumElems; ++i) {
2371 if ((
Value & Mask) != Elem)
2377 bool isNEONByteReplicate(
unsigned NumBytes)
const {
2378 return isNEONReplicate(8, NumBytes,
false);
2381 static void checkNeonReplicateArgs(
unsigned FromW,
unsigned ToW) {
2382 assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2383 "Invalid source width");
2384 assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2385 "Invalid destination width");
2386 assert(FromW < ToW &&
"ToW is not less than FromW");
2389 template<
unsigned FromW,
unsigned ToW>
2390 bool isNEONmovReplicate()
const {
2391 checkNeonReplicateArgs(FromW, ToW);
2392 if (ToW == 64 && isNEONi64splat())
2394 return isNEONReplicate(FromW, ToW / FromW,
false);
2397 template<
unsigned FromW,
unsigned ToW>
2398 bool isNEONinvReplicate()
const {
2399 checkNeonReplicateArgs(FromW, ToW);
2400 return isNEONReplicate(FromW, ToW / FromW,
true);
2403 bool isNEONi32vmov()
const {
2404 if (isNEONByteReplicate(4))
2412 return isValidNEONi32vmovImm(
CE->getValue());
2415 bool isNEONi32vmovNeg()
const {
2416 if (!
isImm())
return false;
2419 if (!CE)
return false;
2420 return isValidNEONi32vmovImm(~
CE->getValue());
2423 bool isNEONi64splat()
const {
2424 if (!
isImm())
return false;
2427 if (!CE)
return false;
2430 for (
unsigned i = 0; i < 8; ++i, Value >>= 8)
2431 if ((
Value & 0xff) != 0 && (
Value & 0xff) != 0xff)
return false;
2435 template<
int64_t Angle,
int64_t Remainder>
2436 bool isComplexRotation()
const {
2437 if (!
isImm())
return false;
2440 if (!CE)
return false;
2443 return (
Value % Angle == Remainder &&
Value <= 270);
2446 bool isMVELongShift()
const {
2447 if (!
isImm())
return false;
2450 if (!CE)
return false;
2455 bool isMveSaturateOp()
const {
2456 if (!
isImm())
return false;
2458 if (!CE)
return false;
2463 bool isITCondCodeNoAL()
const {
2464 if (!isITCondCode())
return false;
2469 bool isITCondCodeRestrictedI()
const {
2470 if (!isITCondCode())
2476 bool isITCondCodeRestrictedS()
const {
2477 if (!isITCondCode())
2484 bool isITCondCodeRestrictedU()
const {
2485 if (!isITCondCode())
2491 bool isITCondCodeRestrictedFP()
const {
2492 if (!isITCondCode())
2499 void setVecListDPair(
unsigned int DPair) {
2500 Kind = k_VectorList;
2501 VectorList.RegNum = DPair;
2502 VectorList.Count = 2;
2503 VectorList.isDoubleSpaced =
false;
2506 void setVecListOneD(
unsigned int DReg) {
2507 Kind = k_VectorList;
2508 VectorList.RegNum =
DReg;
2509 VectorList.Count = 1;
2510 VectorList.isDoubleSpaced =
false;
2517 else if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2523 void addARMBranchTargetOperands(
MCInst &Inst,
unsigned N)
const {
2524 assert(
N == 1 &&
"Invalid number of operands!");
2525 addExpr(Inst, getImm());
2528 void addThumbBranchTargetOperands(
MCInst &Inst,
unsigned N)
const {
2529 assert(
N == 1 &&
"Invalid number of operands!");
2530 addExpr(Inst, getImm());
2533 void addCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
2534 assert(
N == 2 &&
"Invalid number of operands!");
2540 void addVPTPredNOperands(
MCInst &Inst,
unsigned N)
const {
2541 assert(
N == 3 &&
"Invalid number of operands!");
2543 unsigned RegNum = getVPTPred() ==
ARMVCC::None ? ARM::NoRegister : ARM::P0;
2548 void addVPTPredROperands(
MCInst &Inst,
unsigned N)
const {
2549 assert(
N == 4 &&
"Invalid number of operands!");
2550 addVPTPredNOperands(Inst,
N-1);
2553 RegNum = ARM::NoRegister;
2556 auto &MCID = Parser->getInstrDesc(Inst.
getOpcode());
2557 int TiedOp = MCID.getOperandConstraint(NextOpIndex,
MCOI::TIED_TO);
2559 "Inactive register in vpred_r is not tied to an output!");
2565 void addCoprocNumOperands(
MCInst &Inst,
unsigned N)
const {
2566 assert(
N == 1 &&
"Invalid number of operands!");
2570 void addCoprocRegOperands(
MCInst &Inst,
unsigned N)
const {
2571 assert(
N == 1 &&
"Invalid number of operands!");
2575 void addCoprocOptionOperands(
MCInst &Inst,
unsigned N)
const {
2576 assert(
N == 1 &&
"Invalid number of operands!");
2580 void addITMaskOperands(
MCInst &Inst,
unsigned N)
const {
2581 assert(
N == 1 &&
"Invalid number of operands!");
2585 void addITCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
2586 assert(
N == 1 &&
"Invalid number of operands!");
2590 void addITCondCodeInvOperands(
MCInst &Inst,
unsigned N)
const {
2591 assert(
N == 1 &&
"Invalid number of operands!");
2595 void addCCOutOperands(
MCInst &Inst,
unsigned N)
const {
2596 assert(
N == 1 &&
"Invalid number of operands!");
2600 void addRegOperands(
MCInst &Inst,
unsigned N)
const {
2601 assert(
N == 1 &&
"Invalid number of operands!");
2605 void addRegShiftedRegOperands(
MCInst &Inst,
unsigned N)
const {
2606 assert(
N == 3 &&
"Invalid number of operands!");
2607 assert(isRegShiftedReg() &&
2608 "addRegShiftedRegOperands() on non-RegShiftedReg!");
2615 void addRegShiftedImmOperands(
MCInst &Inst,
unsigned N)
const {
2616 assert(
N == 2 &&
"Invalid number of operands!");
2617 assert(isRegShiftedImm() &&
2618 "addRegShiftedImmOperands() on non-RegShiftedImm!");
2621 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2626 void addShifterImmOperands(
MCInst &Inst,
unsigned N)
const {
2627 assert(
N == 1 &&
"Invalid number of operands!");
2632 void addRegListOperands(
MCInst &Inst,
unsigned N)
const {
2633 assert(
N == 1 &&
"Invalid number of operands!");
2639 void addRegListWithAPSROperands(
MCInst &Inst,
unsigned N)
const {
2640 assert(
N == 1 &&
"Invalid number of operands!");
2646 void addDPRRegListOperands(
MCInst &Inst,
unsigned N)
const {
2647 addRegListOperands(Inst,
N);
2650 void addSPRRegListOperands(
MCInst &Inst,
unsigned N)
const {
2651 addRegListOperands(Inst,
N);
2654 void addFPSRegListWithVPROperands(
MCInst &Inst,
unsigned N)
const {
2655 addRegListOperands(Inst,
N);
2658 void addFPDRegListWithVPROperands(
MCInst &Inst,
unsigned N)
const {
2659 addRegListOperands(Inst,
N);
2662 void addRotImmOperands(
MCInst &Inst,
unsigned N)
const {
2663 assert(
N == 1 &&
"Invalid number of operands!");
2668 void addModImmOperands(
MCInst &Inst,
unsigned N)
const {
2669 assert(
N == 1 &&
"Invalid number of operands!");
2673 return addImmOperands(Inst,
N);
2678 void addModImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2679 assert(
N == 1 &&
"Invalid number of operands!");
2685 void addModImmNegOperands(
MCInst &Inst,
unsigned N)
const {
2686 assert(
N == 1 &&
"Invalid number of operands!");
2692 void addThumbModImmNeg8_255Operands(
MCInst &Inst,
unsigned N)
const {
2693 assert(
N == 1 &&
"Invalid number of operands!");
2699 void addThumbModImmNeg1_7Operands(
MCInst &Inst,
unsigned N)
const {
2700 assert(
N == 1 &&
"Invalid number of operands!");
2706 void addBitfieldOperands(
MCInst &Inst,
unsigned N)
const {
2707 assert(
N == 1 &&
"Invalid number of operands!");
2713 (32 - (lsb + width)));
2717 void addImmOperands(
MCInst &Inst,
unsigned N)
const {
2718 assert(
N == 1 &&
"Invalid number of operands!");
2719 addExpr(Inst, getImm());
2722 void addFBits16Operands(
MCInst &Inst,
unsigned N)
const {
2723 assert(
N == 1 &&
"Invalid number of operands!");
2728 void addFBits32Operands(
MCInst &Inst,
unsigned N)
const {
2729 assert(
N == 1 &&
"Invalid number of operands!");
2734 void addFPImmOperands(
MCInst &Inst,
unsigned N)
const {
2735 assert(
N == 1 &&
"Invalid number of operands!");
2741 void addImm8s4Operands(
MCInst &Inst,
unsigned N)
const {
2742 assert(
N == 1 &&
"Invalid number of operands!");
2749 void addImm7s4Operands(
MCInst &Inst,
unsigned N)
const {
2750 assert(
N == 1 &&
"Invalid number of operands!");
2757 void addImm7Shift0Operands(
MCInst &Inst,
unsigned N)
const {
2758 assert(
N == 1 &&
"Invalid number of operands!");
2763 void addImm7Shift1Operands(
MCInst &Inst,
unsigned N)
const {
2764 assert(
N == 1 &&
"Invalid number of operands!");
2769 void addImm7Shift2Operands(
MCInst &Inst,
unsigned N)
const {
2770 assert(
N == 1 &&
"Invalid number of operands!");
2775 void addImm7Operands(
MCInst &Inst,
unsigned N)
const {
2776 assert(
N == 1 &&
"Invalid number of operands!");
2781 void addImm0_1020s4Operands(
MCInst &Inst,
unsigned N)
const {
2782 assert(
N == 1 &&
"Invalid number of operands!");
2789 void addImm0_508s4NegOperands(
MCInst &Inst,
unsigned N)
const {
2790 assert(
N == 1 &&
"Invalid number of operands!");
2797 void addImm0_508s4Operands(
MCInst &Inst,
unsigned N)
const {
2798 assert(
N == 1 &&
"Invalid number of operands!");
2805 void addImm1_16Operands(
MCInst &Inst,
unsigned N)
const {
2806 assert(
N == 1 &&
"Invalid number of operands!");
2813 void addImm1_32Operands(
MCInst &Inst,
unsigned N)
const {
2814 assert(
N == 1 &&
"Invalid number of operands!");
2821 void addImmThumbSROperands(
MCInst &Inst,
unsigned N)
const {
2822 assert(
N == 1 &&
"Invalid number of operands!");
2826 unsigned Imm =
CE->getValue();
2830 void addPKHASRImmOperands(
MCInst &Inst,
unsigned N)
const {
2831 assert(
N == 1 &&
"Invalid number of operands!");
2835 int Val =
CE->getValue();
2839 void addT2SOImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2840 assert(
N == 1 &&
"Invalid number of operands!");
2847 void addT2SOImmNegOperands(
MCInst &Inst,
unsigned N)
const {
2848 assert(
N == 1 &&
"Invalid number of operands!");
2855 void addImm0_4095NegOperands(
MCInst &Inst,
unsigned N)
const {
2856 assert(
N == 1 &&
"Invalid number of operands!");
2863 void addUnsignedOffset_b8s2Operands(
MCInst &Inst,
unsigned N)
const {
2864 if(
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2872 void addThumbMemPCOperands(
MCInst &Inst,
unsigned N)
const {
2873 assert(
N == 1 &&
"Invalid number of operands!");
2885 assert(isGPRMem() &&
"Unknown value type!");
2886 assert(isa<MCConstantExpr>(
Memory.OffsetImm) &&
"Unknown value type!");
2887 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
2893 void addMemBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2894 assert(
N == 1 &&
"Invalid number of operands!");
2898 void addInstSyncBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2899 assert(
N == 1 &&
"Invalid number of operands!");
2903 void addTraceSyncBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2904 assert(
N == 1 &&
"Invalid number of operands!");
2908 void addMemNoOffsetOperands(
MCInst &Inst,
unsigned N)
const {
2909 assert(
N == 1 &&
"Invalid number of operands!");
2913 void addMemNoOffsetT2Operands(
MCInst &Inst,
unsigned N)
const {
2914 assert(
N == 1 &&
"Invalid number of operands!");
2918 void addMemNoOffsetT2NoSpOperands(
MCInst &Inst,
unsigned N)
const {
2919 assert(
N == 1 &&
"Invalid number of operands!");
2923 void addMemNoOffsetTOperands(
MCInst &Inst,
unsigned N)
const {
2924 assert(
N == 1 &&
"Invalid number of operands!");
2928 void addMemPCRelImm12Operands(
MCInst &Inst,
unsigned N)
const {
2929 assert(
N == 1 &&
"Invalid number of operands!");
2930 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
2936 void addAdrLabelOperands(
MCInst &Inst,
unsigned N)
const {
2937 assert(
N == 1 &&
"Invalid number of operands!");
2942 if (!isa<MCConstantExpr>(getImm())) {
2948 int Val =
CE->getValue();
2952 void addAlignedMemoryOperands(
MCInst &Inst,
unsigned N)
const {
2953 assert(
N == 2 &&
"Invalid number of operands!");
2958 void addDupAlignedMemoryNoneOperands(
MCInst &Inst,
unsigned N)
const {
2959 addAlignedMemoryOperands(Inst,
N);
2962 void addAlignedMemoryNoneOperands(
MCInst &Inst,
unsigned N)
const {
2963 addAlignedMemoryOperands(Inst,
N);
2966 void addAlignedMemory16Operands(
MCInst &Inst,
unsigned N)
const {
2967 addAlignedMemoryOperands(Inst,
N);
2970 void addDupAlignedMemory16Operands(
MCInst &Inst,
unsigned N)
const {
2971 addAlignedMemoryOperands(Inst,
N);
2974 void addAlignedMemory32Operands(
MCInst &Inst,
unsigned N)
const {
2975 addAlignedMemoryOperands(Inst,
N);
2978 void addDupAlignedMemory32Operands(
MCInst &Inst,
unsigned N)
const {
2979 addAlignedMemoryOperands(Inst,
N);
2982 void addAlignedMemory64Operands(
MCInst &Inst,
unsigned N)
const {
2983 addAlignedMemoryOperands(Inst,
N);
2986 void addDupAlignedMemory64Operands(
MCInst &Inst,
unsigned N)
const {
2987 addAlignedMemoryOperands(Inst,
N);
2990 void addAlignedMemory64or128Operands(
MCInst &Inst,
unsigned N)
const {
2991 addAlignedMemoryOperands(Inst,
N);
2994 void addDupAlignedMemory64or128Operands(
MCInst &Inst,
unsigned N)
const {
2995 addAlignedMemoryOperands(Inst,
N);
2998 void addAlignedMemory64or128or256Operands(
MCInst &Inst,
unsigned N)
const {
2999 addAlignedMemoryOperands(Inst,
N);
3002 void addAddrMode2Operands(
MCInst &Inst,
unsigned N)
const {
3003 assert(
N == 3 &&
"Invalid number of operands!");
3006 if (!
Memory.OffsetRegNum) {
3009 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3010 int32_t Val =
CE->getValue();
3013 if (Val == std::numeric_limits<int32_t>::min())
3031 void addAM2OffsetImmOperands(
MCInst &Inst,
unsigned N)
const {
3032 assert(
N == 2 &&
"Invalid number of operands!");
3034 assert(CE &&
"non-constant AM2OffsetImm operand!");
3035 int32_t Val =
CE->getValue();
3038 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3039 if (Val < 0) Val = -Val;
3045 void addAddrMode3Operands(
MCInst &Inst,
unsigned N)
const {
3046 assert(
N == 3 &&
"Invalid number of operands!");
3059 if (!
Memory.OffsetRegNum) {
3062 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3063 int32_t Val =
CE->getValue();
3066 if (Val == std::numeric_limits<int32_t>::min())
3083 void addAM3OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3084 assert(
N == 2 &&
"Invalid number of operands!");
3085 if (Kind == k_PostIndexRegister) {
3095 int32_t Val =
CE->getValue();
3098 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3099 if (Val < 0) Val = -Val;
3105 void addAddrMode5Operands(
MCInst &Inst,
unsigned N)
const {
3106 assert(
N == 2 &&
"Invalid number of operands!");
3119 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3121 int32_t Val =
CE->getValue() / 4;
3124 if (Val == std::numeric_limits<int32_t>::min())
3134 void addAddrMode5FP16Operands(
MCInst &Inst,
unsigned N)
const {
3135 assert(
N == 2 &&
"Invalid number of operands!");
3149 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3150 int32_t Val =
CE->getValue() / 2;
3153 if (Val == std::numeric_limits<int32_t>::min())
3163 void addMemImm8s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3164 assert(
N == 2 &&
"Invalid number of operands!");
3175 addExpr(Inst,
Memory.OffsetImm);
3178 void addMemImm7s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3179 assert(
N == 2 &&
"Invalid number of operands!");
3190 addExpr(Inst,
Memory.OffsetImm);
3193 void addMemImm0_1020s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3194 assert(
N == 2 &&
"Invalid number of operands!");
3198 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3205 void addMemImmOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3206 assert(
N == 2 &&
"Invalid number of operands!");
3208 addExpr(Inst,
Memory.OffsetImm);
3211 void addMemRegRQOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3212 assert(
N == 2 &&
"Invalid number of operands!");
3217 void addMemUImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3218 assert(
N == 2 &&
"Invalid number of operands!");
3221 addExpr(Inst, getImm());
3228 addExpr(Inst,
Memory.OffsetImm);
3231 void addMemImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3232 assert(
N == 2 &&
"Invalid number of operands!");
3235 addExpr(Inst, getImm());
3242 addExpr(Inst,
Memory.OffsetImm);
3245 void addConstPoolAsmImmOperands(
MCInst &Inst,
unsigned N)
const {
3246 assert(
N == 1 &&
"Invalid number of operands!");
3249 addExpr(Inst, getConstantPoolImm());
3252 void addMemTBBOperands(
MCInst &Inst,
unsigned N)
const {
3253 assert(
N == 2 &&
"Invalid number of operands!");
3258 void addMemTBHOperands(
MCInst &Inst,
unsigned N)
const {
3259 assert(
N == 2 &&
"Invalid number of operands!");
3264 void addMemRegOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3265 assert(
N == 3 &&
"Invalid number of operands!");
3274 void addT2MemRegOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3275 assert(
N == 3 &&
"Invalid number of operands!");
3281 void addMemThumbRROperands(
MCInst &Inst,
unsigned N)
const {
3282 assert(
N == 2 &&
"Invalid number of operands!");
3287 void addMemThumbRIs4Operands(
MCInst &Inst,
unsigned N)
const {
3288 assert(
N == 2 &&
"Invalid number of operands!");
3292 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3299 void addMemThumbRIs2Operands(
MCInst &Inst,
unsigned N)
const {
3300 assert(
N == 2 &&
"Invalid number of operands!");
3304 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3310 void addMemThumbRIs1Operands(
MCInst &Inst,
unsigned N)
const {
3311 assert(
N == 2 &&
"Invalid number of operands!");
3313 addExpr(Inst,
Memory.OffsetImm);
3316 void addMemThumbSPIOperands(
MCInst &Inst,
unsigned N)
const {
3317 assert(
N == 2 &&
"Invalid number of operands!");
3321 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3328 void addPostIdxImm8Operands(
MCInst &Inst,
unsigned N)
const {
3329 assert(
N == 1 &&
"Invalid number of operands!");
3331 assert(CE &&
"non-constant post-idx-imm8 operand!");
3332 int Imm =
CE->getValue();
3333 bool isAdd =
Imm >= 0;
3334 if (Imm == std::numeric_limits<int32_t>::min())
Imm = 0;
3339 void addPostIdxImm8s4Operands(
MCInst &Inst,
unsigned N)
const {
3340 assert(
N == 1 &&
"Invalid number of operands!");
3342 assert(CE &&
"non-constant post-idx-imm8s4 operand!");
3343 int Imm =
CE->getValue();
3344 bool isAdd =
Imm >= 0;
3345 if (Imm == std::numeric_limits<int32_t>::min())
Imm = 0;
3351 void addPostIdxRegOperands(
MCInst &Inst,
unsigned N)
const {
3352 assert(
N == 2 &&
"Invalid number of operands!");
3357 void addPostIdxRegShiftedOperands(
MCInst &Inst,
unsigned N)
const {
3358 assert(
N == 2 &&
"Invalid number of operands!");
3364 PostIdxReg.ShiftTy);
3368 void addPowerTwoOperands(
MCInst &Inst,
unsigned N)
const {
3369 assert(
N == 1 &&
"Invalid number of operands!");
3374 void addMSRMaskOperands(
MCInst &Inst,
unsigned N)
const {
3375 assert(
N == 1 &&
"Invalid number of operands!");
3379 void addBankedRegOperands(
MCInst &Inst,
unsigned N)
const {
3380 assert(
N == 1 &&
"Invalid number of operands!");
3384 void addProcIFlagsOperands(
MCInst &Inst,
unsigned N)
const {
3385 assert(
N == 1 &&
"Invalid number of operands!");
3389 void addVecListOperands(
MCInst &Inst,
unsigned N)
const {
3390 assert(
N == 1 &&
"Invalid number of operands!");
3392 if (isAnyVectorList())
3394 else if (isDReg() && !Parser->hasMVE()) {
3396 }
else if (isQReg() && !Parser->hasMVE()) {
3398 DPair = Parser->getMRI()->getMatchingSuperReg(
3399 DPair, ARM::dsub_0, &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3404 "attempted to add a vector list register with wrong type!");
3408 void addMVEVecListOperands(
MCInst &Inst,
unsigned N)
const {
3409 assert(
N == 1 &&
"Invalid number of operands!");
3425 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3427 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3428 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3431 for (
I = 0;
I <
E;
I++)
3434 assert(
I <
E &&
"Invalid vector list start register!");
3439 void addVecListIndexedOperands(
MCInst &Inst,
unsigned N)
const {
3440 assert(
N == 2 &&
"Invalid number of operands!");
3445 void addVectorIndex8Operands(
MCInst &Inst,
unsigned N)
const {
3446 assert(
N == 1 &&
"Invalid number of operands!");
3450 void addVectorIndex16Operands(
MCInst &Inst,
unsigned N)
const {
3451 assert(
N == 1 &&
"Invalid number of operands!");
3455 void addVectorIndex32Operands(
MCInst &Inst,
unsigned N)
const {
3456 assert(
N == 1 &&
"Invalid number of operands!");
3460 void addVectorIndex64Operands(
MCInst &Inst,
unsigned N)
const {
3461 assert(
N == 1 &&
"Invalid number of operands!");
3465 void addMVEVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
3466 assert(
N == 1 &&
"Invalid number of operands!");
3470 void addMVEPairVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
3471 assert(
N == 1 &&
"Invalid number of operands!");
3475 void addNEONi8splatOperands(
MCInst &Inst,
unsigned N)
const {
3476 assert(
N == 1 &&
"Invalid number of operands!");
3483 void addNEONi16splatOperands(
MCInst &Inst,
unsigned N)
const {
3484 assert(
N == 1 &&
"Invalid number of operands!");
3487 unsigned Value =
CE->getValue();
3492 void addNEONi16splatNotOperands(
MCInst &Inst,
unsigned N)
const {
3493 assert(
N == 1 &&
"Invalid number of operands!");
3496 unsigned Value =
CE->getValue();
3501 void addNEONi32splatOperands(
MCInst &Inst,
unsigned N)
const {
3502 assert(
N == 1 &&
"Invalid number of operands!");
3505 unsigned Value =
CE->getValue();
3510 void addNEONi32splatNotOperands(
MCInst &Inst,
unsigned N)
const {
3511 assert(
N == 1 &&
"Invalid number of operands!");
3514 unsigned Value =
CE->getValue();
3519 void addNEONi8ReplicateOperands(
MCInst &Inst,
bool Inv)
const {
3524 "All instructions that wants to replicate non-zero byte "
3525 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3526 unsigned Value =
CE->getValue();
3529 unsigned B =
Value & 0xff;
3534 void addNEONinvi8ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3535 assert(
N == 1 &&
"Invalid number of operands!");
3536 addNEONi8ReplicateOperands(Inst,
true);
3539 static unsigned encodeNeonVMOVImmediate(
unsigned Value) {
3542 else if (
Value > 0xffff &&
Value <= 0xffffff)
3544 else if (
Value > 0xffffff)
3549 void addNEONi32vmovOperands(
MCInst &Inst,
unsigned N)
const {
3550 assert(
N == 1 &&
"Invalid number of operands!");
3553 unsigned Value = encodeNeonVMOVImmediate(
CE->getValue());
3557 void addNEONvmovi8ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3558 assert(
N == 1 &&
"Invalid number of operands!");
3559 addNEONi8ReplicateOperands(Inst,
false);
3562 void addNEONvmovi16ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3563 assert(
N == 1 &&
"Invalid number of operands!");
3569 "All instructions that want to replicate non-zero half-word "
3570 "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3572 unsigned Elem =
Value & 0xffff;
3574 Elem = (Elem >> 8) | 0x200;
3578 void addNEONi32vmovNegOperands(
MCInst &Inst,
unsigned N)
const {
3579 assert(
N == 1 &&
"Invalid number of operands!");
3582 unsigned Value = encodeNeonVMOVImmediate(~
CE->getValue());
3586 void addNEONvmovi32ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3587 assert(
N == 1 &&
"Invalid number of operands!");
3593 "All instructions that want to replicate non-zero word "
3594 "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3596 unsigned Elem = encodeNeonVMOVImmediate(
Value & 0xffffffff);
3600 void addNEONi64splatOperands(
MCInst &Inst,
unsigned N)
const {
3601 assert(
N == 1 &&
"Invalid number of operands!");
3606 for (
unsigned i = 0; i < 8; ++i, Value >>= 8) {
3612 void addComplexRotationEvenOperands(
MCInst &Inst,
unsigned N)
const {
3613 assert(
N == 1 &&
"Invalid number of operands!");
3618 void addComplexRotationOddOperands(
MCInst &Inst,
unsigned N)
const {
3619 assert(
N == 1 &&
"Invalid number of operands!");
3624 void addMveSaturateOperands(
MCInst &Inst,
unsigned N)
const {
3625 assert(
N == 1 &&
"Invalid number of operands!");
3627 unsigned Imm =
CE->getValue();
3628 assert((Imm == 48 || Imm == 64) &&
"Invalid saturate operand");
3634 static std::unique_ptr<ARMOperand> CreateITMask(
unsigned Mask,
SMLoc S,
3635 ARMAsmParser &Parser) {
3636 auto Op = std::make_unique<ARMOperand>(k_ITCondMask, Parser);
3643 static std::unique_ptr<ARMOperand>
3645 auto Op = std::make_unique<ARMOperand>(k_CondCode, Parser);
3653 ARMAsmParser &Parser) {
3654 auto Op = std::make_unique<ARMOperand>(k_VPTPred, Parser);
3661 static std::unique_ptr<ARMOperand> CreateCoprocNum(
unsigned CopVal,
SMLoc S,
3662 ARMAsmParser &Parser) {
3663 auto Op = std::make_unique<ARMOperand>(k_CoprocNum, Parser);
3664 Op->Cop.Val = CopVal;
3670 static std::unique_ptr<ARMOperand> CreateCoprocReg(
unsigned CopVal,
SMLoc S,
3671 ARMAsmParser &Parser) {
3672 auto Op = std::make_unique<ARMOperand>(k_CoprocReg, Parser);
3673 Op->Cop.Val = CopVal;
3679 static std::unique_ptr<ARMOperand>
3680 CreateCoprocOption(
unsigned Val,
SMLoc S,
SMLoc E, ARMAsmParser &Parser) {
3681 auto Op = std::make_unique<ARMOperand>(k_CoprocOption, Parser);
3689 ARMAsmParser &Parser) {
3690 auto Op = std::make_unique<ARMOperand>(k_CCOut, Parser);
3691 Op->Reg.RegNum =
Reg;
3697 static std::unique_ptr<ARMOperand> CreateToken(
StringRef Str,
SMLoc S,
3698 ARMAsmParser &Parser) {
3699 auto Op = std::make_unique<ARMOperand>(k_Token, Parser);
3700 Op->Tok.Data = Str.data();
3701 Op->Tok.Length = Str.size();
3708 ARMAsmParser &Parser) {
3709 auto Op = std::make_unique<ARMOperand>(k_Register, Parser);
3710 Op->Reg.RegNum =
Reg;
3716 static std::unique_ptr<ARMOperand>
3719 SMLoc E, ARMAsmParser &Parser) {
3720 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister, Parser);
3721 Op->RegShiftedReg.ShiftTy = ShTy;
3722 Op->RegShiftedReg.SrcReg = SrcReg;
3723 Op->RegShiftedReg.ShiftReg = ShiftReg;
3724 Op->RegShiftedReg.ShiftImm = ShiftImm;
3730 static std::unique_ptr<ARMOperand>
3733 ARMAsmParser &Parser) {
3734 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate, Parser);
3735 Op->RegShiftedImm.ShiftTy = ShTy;
3736 Op->RegShiftedImm.SrcReg = SrcReg;
3737 Op->RegShiftedImm.ShiftImm = ShiftImm;
3743 static std::unique_ptr<ARMOperand> CreateShifterImm(
bool isASR,
unsigned Imm,
3745 ARMAsmParser &Parser) {
3746 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate, Parser);
3747 Op->ShifterImm.isASR = isASR;
3748 Op->ShifterImm.Imm =
Imm;
3754 static std::unique_ptr<ARMOperand>
3755 CreateRotImm(
unsigned Imm,
SMLoc S,
SMLoc E, ARMAsmParser &Parser) {
3756 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate, Parser);
3757 Op->RotImm.Imm =
Imm;
3763 static std::unique_ptr<ARMOperand> CreateModImm(
unsigned Bits,
unsigned Rot,
3765 ARMAsmParser &Parser) {
3766 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate, Parser);
3768 Op->ModImm.Rot = Rot;
3774 static std::unique_ptr<ARMOperand>
3776 ARMAsmParser &Parser) {
3777 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate, Parser);
3784 static std::unique_ptr<ARMOperand> CreateBitfield(
unsigned LSB,
3785 unsigned Width,
SMLoc S,
3787 ARMAsmParser &Parser) {
3788 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor, Parser);
3789 Op->Bitfield.LSB = LSB;
3790 Op->Bitfield.Width = Width;
3796 static std::unique_ptr<ARMOperand>
3798 SMLoc StartLoc,
SMLoc EndLoc, ARMAsmParser &Parser) {
3799 assert(Regs.size() > 0 &&
"RegList contains no registers?");
3800 KindTy
Kind = k_RegisterList;
3802 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
3803 Regs.front().second)) {
3804 if (Regs.back().second == ARM::VPR)
3805 Kind = k_FPDRegisterListWithVPR;
3807 Kind = k_DPRRegisterList;
3808 }
else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
3809 Regs.front().second)) {
3810 if (Regs.back().second == ARM::VPR)
3811 Kind = k_FPSRegisterListWithVPR;
3813 Kind = k_SPRRegisterList;
3814 }
else if (Regs.front().second == ARM::VPR) {
3815 assert(Regs.size() == 1 &&
3816 "Register list starting with VPR expected to only contain VPR");
3817 Kind = k_FPSRegisterListWithVPR;
3820 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3821 Kind = k_RegisterListWithAPSR;
3825 auto Op = std::make_unique<ARMOperand>(Kind, Parser);
3826 for (
const auto &
P : Regs)
3827 Op->Registers.push_back(
P.second);
3829 Op->StartLoc = StartLoc;
3830 Op->EndLoc = EndLoc;
3834 static std::unique_ptr<ARMOperand>
3836 SMLoc E, ARMAsmParser &Parser) {
3837 auto Op = std::make_unique<ARMOperand>(k_VectorList, Parser);
3838 Op->VectorList.RegNum =
Reg;
3839 Op->VectorList.Count = Count;
3840 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3846 static std::unique_ptr<ARMOperand>
3847 CreateVectorListAllLanes(
MCRegister Reg,
unsigned Count,
bool isDoubleSpaced,
3849 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes, Parser);
3850 Op->VectorList.RegNum =
Reg;
3851 Op->VectorList.Count = Count;
3852 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3858 static std::unique_ptr<ARMOperand>
3861 ARMAsmParser &Parser) {
3862 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed, Parser);
3863 Op->VectorList.RegNum =
Reg;
3864 Op->VectorList.Count = Count;
3865 Op->VectorList.LaneIndex =
Index;
3866 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3872 static std::unique_ptr<ARMOperand> CreateVectorIndex(
unsigned Idx,
SMLoc S,
3874 ARMAsmParser &Parser) {
3875 auto Op = std::make_unique<ARMOperand>(k_VectorIndex, Parser);
3876 Op->VectorIndex.Val =
Idx;
3882 static std::unique_ptr<ARMOperand> CreateImm(
const MCExpr *Val,
SMLoc S,
3883 SMLoc E, ARMAsmParser &Parser) {
3884 auto Op = std::make_unique<ARMOperand>(k_Immediate, Parser);
3891 static std::unique_ptr<ARMOperand>
3894 bool isNegative,
SMLoc S,
SMLoc E, ARMAsmParser &Parser,
3896 auto Op = std::make_unique<ARMOperand>(k_Memory, Parser);
3897 Op->Memory.BaseRegNum = BaseReg;
3898 Op->Memory.OffsetImm = OffsetImm;
3899 Op->Memory.OffsetRegNum = OffsetReg;
3900 Op->Memory.ShiftType = ShiftType;
3901 Op->Memory.ShiftImm = ShiftImm;
3902 Op->Memory.Alignment = Alignment;
3903 Op->Memory.isNegative = isNegative;
3906 Op->AlignmentLoc = AlignmentLoc;
3910 static std::unique_ptr<ARMOperand>
3912 unsigned ShiftImm,
SMLoc S,
SMLoc E, ARMAsmParser &Parser) {
3913 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister, Parser);
3914 Op->PostIdxReg.RegNum =
Reg;
3915 Op->PostIdxReg.isAdd = isAdd;
3916 Op->PostIdxReg.ShiftTy = ShiftTy;
3917 Op->PostIdxReg.ShiftImm = ShiftImm;
3923 static std::unique_ptr<ARMOperand>
3925 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt, Parser);
3926 Op->MBOpt.Val = Opt;
3932 static std::unique_ptr<ARMOperand>
3934 ARMAsmParser &Parser) {
3935 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt, Parser);
3936 Op->ISBOpt.Val = Opt;
3942 static std::unique_ptr<ARMOperand>
3944 ARMAsmParser &Parser) {
3945 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt, Parser);
3946 Op->TSBOpt.Val = Opt;
3952 static std::unique_ptr<ARMOperand>
3954 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags, Parser);
3961 static std::unique_ptr<ARMOperand> CreateMSRMask(
unsigned MMask,
SMLoc S,
3962 ARMAsmParser &Parser) {
3963 auto Op = std::make_unique<ARMOperand>(k_MSRMask, Parser);
3964 Op->MMask.Val = MMask;
3970 static std::unique_ptr<ARMOperand> CreateBankedReg(
unsigned Reg,
SMLoc S,
3971 ARMAsmParser &Parser) {
3972 auto Op = std::make_unique<ARMOperand>(k_BankedReg, Parser);
3973 Op->BankedReg.Val =
Reg;
4000 case k_ITCondMask: {
4001 static const char *
const MaskStr[] = {
4002 "(invalid)",
"(tttt)",
"(ttt)",
"(ttte)",
4003 "(tt)",
"(ttet)",
"(tte)",
"(ttee)",
4004 "(t)",
"(tett)",
"(tet)",
"(tete)",
4005 "(te)",
"(teet)",
"(tee)",
"(teee)",
4007 assert((ITMask.Mask & 0xf) == ITMask.Mask);
4008 OS <<
"<it-mask " << MaskStr[ITMask.Mask] <<
">";
4012 OS <<
"<coprocessor number: " << getCoproc() <<
">";
4015 OS <<
"<coprocessor register: " << getCoproc() <<
">";
4017 case k_CoprocOption:
4018 OS <<
"<coprocessor option: " << CoprocOption.Val <<
">";
4021 OS <<
"<mask: " << getMSRMask() <<
">";
4024 OS <<
"<banked reg: " << getBankedReg() <<
">";
4029 case k_MemBarrierOpt:
4030 OS <<
"<ARM_MB::" << MemBOptToString(getMemBarrierOpt(),
false) <<
">";
4032 case k_InstSyncBarrierOpt:
4033 OS <<
"<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) <<
">";
4035 case k_TraceSyncBarrierOpt:
4036 OS <<
"<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) <<
">";
4043 OS <<
" offset-imm:" << *
Memory.OffsetImm;
4045 OS <<
" offset-reg:" << (
Memory.isNegative ?
"-" :
"")
4049 OS <<
" shift-imm:" <<
Memory.ShiftImm;
4052 OS <<
" alignment:" <<
Memory.Alignment;
4055 case k_PostIndexRegister:
4056 OS <<
"post-idx register " << (PostIdxReg.isAdd ?
"" :
"-")
4057 <<
RegName(PostIdxReg.RegNum);
4060 << PostIdxReg.ShiftImm;
4063 case k_ProcIFlags: {
4064 OS <<
"<ARM_PROC::";
4065 unsigned IFlags = getProcIFlags();
4066 for (
int i=2; i >= 0; --i)
4067 if (IFlags & (1 << i))
4075 case k_ShifterImmediate:
4076 OS <<
"<shift " << (ShifterImm.isASR ?
"asr" :
"lsl")
4077 <<
" #" << ShifterImm.Imm <<
">";
4079 case k_ShiftedRegister:
4080 OS <<
"<so_reg_reg " <<
RegName(RegShiftedReg.SrcReg) <<
" "
4082 <<
RegName(RegShiftedReg.ShiftReg) <<
">";
4084 case k_ShiftedImmediate:
4085 OS <<
"<so_reg_imm " <<
RegName(RegShiftedImm.SrcReg) <<
" "
4087 << RegShiftedImm.ShiftImm <<
">";
4089 case k_RotateImmediate:
4090 OS <<
"<ror " <<
" #" << (RotImm.Imm * 8) <<
">";
4092 case k_ModifiedImmediate:
4093 OS <<
"<mod_imm #" << ModImm.Bits <<
", #"
4094 << ModImm.Rot <<
")>";
4096 case k_ConstantPoolImmediate:
4097 OS <<
"<constant_pool_imm #" << *getConstantPoolImm();
4099 case k_BitfieldDescriptor:
4100 OS <<
"<bitfield " <<
"lsb: " <<
Bitfield.LSB
4101 <<
", width: " <<
Bitfield.Width <<
">";
4103 case k_RegisterList:
4104 case k_RegisterListWithAPSR:
4105 case k_DPRRegisterList:
4106 case k_SPRRegisterList:
4107 case k_FPSRegisterListWithVPR:
4108 case k_FPDRegisterListWithVPR: {
4109 OS <<
"<register_list ";
4112 for (
auto I = RegList.
begin(),
E = RegList.
end();
I !=
E;) {
4114 if (++
I <
E)
OS <<
", ";
4121 OS <<
"<vector_list " << VectorList.Count <<
" * "
4122 <<
RegName(VectorList.RegNum) <<
">";
4124 case k_VectorListAllLanes:
4125 OS <<
"<vector_list(all lanes) " << VectorList.Count <<
" * "
4126 <<
RegName(VectorList.RegNum) <<
">";
4128 case k_VectorListIndexed:
4129 OS <<
"<vector_list(lane " << VectorList.LaneIndex <<
") "
4130 << VectorList.Count <<
" * " <<
RegName(VectorList.RegNum) <<
">";
4133 OS <<
"'" << getToken() <<
"'";
4136 OS <<
"<vectorindex " << getVectorIndex() <<
">";
4150 ".8",
".16",
".32",
".64",
".i8",
".i16",
".i32",
".i64",
4151 ".u8",
".u16",
".u32",
".u64",
".s8",
".s16",
".s32",
".s64",
4152 ".p8",
".p16",
".f32",
".f64",
".f",
".d"};
4157 unsigned MnemonicOpsEndInd = 1;
4161 static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"cps") {
4163 static_cast<ARMOperand &
>(*
Operands[1]).getImm()->getKind() ==
4165 (dyn_cast<MCConstantExpr>(
4166 static_cast<ARMOperand &
>(*
Operands[1]).getImm())
4168 dyn_cast<MCConstantExpr>(
4169 static_cast<ARMOperand &
>(*
Operands[1]).getImm())
4171 ++MnemonicOpsEndInd;
4175 bool RHSCondCode =
false;
4176 while (MnemonicOpsEndInd <
Operands.size()) {
4177 auto Op =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
4179 if (
Op.isITMask()) {
4181 MnemonicOpsEndInd++;
4182 }
else if (
Op.isToken() &&
4186 Op.getToken() ==
".w" ||
Op.getToken() ==
".bf16" ||
4187 Op.getToken() ==
".p64" ||
Op.getToken() ==
".f16" ||
4193 MnemonicOpsEndInd++;
4196 else if (
Op.isCCOut() || (
Op.isCondCode() && !RHSCondCode) ||
4197 Op.isVPTPred() || (
Op.isToken() &&
Op.getToken() ==
".w"))
4198 MnemonicOpsEndInd++;
4202 return MnemonicOpsEndInd;
4207 const AsmToken &Tok = getParser().getTok();
4210 Reg = tryParseRegister();
4217 if (parseRegister(
Reg, StartLoc, EndLoc))
4225MCRegister ARMAsmParser::tryParseRegister(
bool AllowOutOfBoundReg) {
4235 .
Case(
"r13", ARM::SP)
4236 .
Case(
"r14", ARM::LR)
4237 .
Case(
"r15", ARM::PC)
4238 .
Case(
"ip", ARM::R12)
4240 .
Case(
"a1", ARM::R0)
4241 .
Case(
"a2", ARM::R1)
4242 .
Case(
"a3", ARM::R2)
4243 .
Case(
"a4", ARM::R3)
4244 .
Case(
"v1", ARM::R4)
4245 .
Case(
"v2", ARM::R5)
4246 .
Case(
"v3", ARM::R6)
4247 .
Case(
"v4", ARM::R7)
4248 .
Case(
"v5", ARM::R8)
4249 .
Case(
"v6", ARM::R9)
4250 .
Case(
"v7", ARM::R10)
4251 .
Case(
"v8", ARM::R11)
4252 .
Case(
"sb", ARM::R9)
4253 .
Case(
"sl", ARM::R10)
4254 .
Case(
"fp", ARM::R11)
4261 auto Entry = RegisterReqs.
find(lowerCase);
4263 if (Entry == RegisterReqs.
end())
4266 return Entry->getValue();
4270 if (!AllowOutOfBoundReg && !hasD32() &&
Reg >=
ARM::D16 &&
Reg <= ARM::D31)
4278std::optional<ARM_AM::ShiftOpc> ARMAsmParser::tryParseShiftToken() {
4282 return std::nullopt;
4304 auto ShiftTyOpt = tryParseShiftToken();
4305 if (ShiftTyOpt == std::nullopt)
4307 auto ShiftTy = ShiftTyOpt.value();
4314 std::unique_ptr<ARMOperand> PrevOp(
4315 (ARMOperand *)
Operands.pop_back_val().release());
4316 if (!PrevOp->isReg())
4317 return Error(PrevOp->getStartLoc(),
"shift must be of a register");
4334 const MCExpr *ShiftExpr =
nullptr;
4335 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4336 Error(ImmLoc,
"invalid immediate shift value");
4342 Error(ImmLoc,
"invalid immediate shift value");
4348 Imm =
CE->getValue();
4352 Error(ImmLoc,
"immediate shift value out of range");
4362 ShiftReg = tryParseRegister();
4364 Error(L,
"expected immediate or register in shift operand");
4369 "expected immediate or register in shift operand");
4375 Operands.push_back(ARMOperand::CreateShiftedRegister(
4376 ShiftTy, SrcReg, ShiftReg, Imm, S, EndLoc, *
this));
4378 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4398 Operands.push_back(ARMOperand::CreateReg(
Reg, RegStartLoc, RegEndLoc, *
this));
4403 ExclaimTok.
getLoc(), *
this));
4416 if (getParser().parseExpression(ImmVal))
4420 return TokError(
"immediate value expected for vector index");
4429 getContext(), *
this));
4447 if (
Name.size() < 2 ||
Name[0] != CoprocOp)
4451 switch (
Name.size()) {
4474 case '0':
return 10;
4475 case '1':
return 11;
4476 case '2':
return 12;
4477 case '3':
return 13;
4478 case '4':
return 14;
4479 case '5':
return 15;
4519 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S, *
this));
4538 Operands.push_back(ARMOperand::CreateCoprocReg(
Reg, S, *
this));
4555 if (getParser().parseExpression(Expr))
4556 return Error(Loc,
"illegal expression");
4558 if (!CE ||
CE->getValue() < 0 ||
CE->getValue() > 255)
4560 "coprocessor option must be an immediate in range [0, 255]");
4561 int Val =
CE->getValue();
4569 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S,
E, *
this));
4580 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4584 case ARM::R0:
return ARM::R1;
case ARM::R1:
return ARM::R2;
4585 case ARM::R2:
return ARM::R3;
case ARM::R3:
return ARM::R4;
4586 case ARM::R4:
return ARM::R5;
case ARM::R5:
return ARM::R6;
4587 case ARM::R6:
return ARM::R7;
case ARM::R7:
return ARM::R8;
4588 case ARM::R8:
return ARM::R9;
case ARM::R9:
return ARM::R10;
4589 case ARM::R10:
return ARM::R11;
case ARM::R11:
return ARM::R12;
4590 case ARM::R12:
return ARM::SP;
case ARM::SP:
return ARM::LR;
4591 case ARM::LR:
return ARM::PC;
case ARM::PC:
return ARM::R0;
4600 Regs.emplace_back(Enc,
Reg);
4601 for (
auto I = Regs.rbegin(), J =
I + 1,
E = Regs.rend(); J !=
E; ++
I, ++J) {
4602 if (J->first == Enc) {
4603 Regs.erase(J.base());
4615 bool AllowRAAC,
bool IsLazyLoadStore,
4619 return TokError(
"Token is not a Left Curly Brace");
4626 bool AllowOutOfBoundReg = IsLazyLoadStore || IsVSCCLRM;
4629 return Error(RegLoc,
"register expected");
4630 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4631 return Error(RegLoc,
"pseudo-register not allowed");
4642 bool VSCCLRMAdjustEncoding =
false;
4645 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4646 Reg = getDRegFromQReg(
Reg);
4647 EReg =
MRI->getEncodingValue(
Reg);
4652 if (
Reg == ARM::RA_AUTH_CODE ||
4653 ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4654 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4655 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg))
4656 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4657 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
Reg))
4658 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4659 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4660 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4661 else if (
Reg == ARM::VPR)
4662 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4664 return Error(RegLoc,
"invalid register in register list");
4667 EReg =
MRI->getEncodingValue(
Reg);
4676 if (
Reg == ARM::RA_AUTH_CODE)
4677 return Error(RegLoc,
"pseudo-register not allowed");
4680 MCRegister EndReg = tryParseRegister(AllowOutOfBoundReg);
4682 return Error(AfterMinusLoc,
"register expected");
4683 if (EndReg == ARM::RA_AUTH_CODE)
4684 return Error(AfterMinusLoc,
"pseudo-register not allowed");
4686 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4687 EndReg = getDRegFromQReg(EndReg) + 1;
4694 return Error(AfterMinusLoc,
"invalid register in register list");
4696 if (
MRI->getEncodingValue(
Reg) >
MRI->getEncodingValue(EndReg))
4697 return Error(AfterMinusLoc,
"bad range in register list");
4700 while (
Reg != EndReg) {
4702 EReg =
MRI->getEncodingValue(
Reg);
4703 if (VSCCLRMAdjustEncoding)
4708 ") in register list");
4718 Reg = tryParseRegister(AllowOutOfBoundReg);
4720 return Error(RegLoc,
"register expected");
4721 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4722 return Error(RegLoc,
"pseudo-register not allowed");
4724 bool isQReg =
false;
4725 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4726 Reg = getDRegFromQReg(
Reg);
4730 RC->
getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4731 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
Reg)) {
4734 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4736 if (
Reg == ARM::VPR &&
4737 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4738 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4739 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4740 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4741 EReg =
MRI->getEncodingValue(
Reg);
4744 ") in register list");
4750 if (IsVSCCLRM && OldReg == ARM::S31 &&
Reg ==
ARM::D16) {
4751 VSCCLRMAdjustEncoding =
true;
4752 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4755 if ((
Reg == ARM::RA_AUTH_CODE &&
4756 RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4758 return Error(RegLoc,
"invalid register in register list");
4763 EReg =
MRI->getEncodingValue(
Reg);
4764 if (VSCCLRMAdjustEncoding)
4766 if (EnforceOrder && EReg < EOldReg) {
4767 if (ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4768 Warning(RegLoc,
"register list not in ascending order");
4769 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4770 return Error(RegLoc,
"register list not in ascending order");
4773 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4774 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4775 EReg != EOldReg + 1)
4776 return Error(RegLoc,
"non-contiguous register range");
4780 ") in register list");
4784 EReg =
MRI->getEncodingValue(
Reg);
4800 ARMOperand::CreateToken(
"^", Parser.
getTok().
getLoc(), *
this));
4808ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4816 LaneKind = AllLanes;
4829 if (getParser().parseExpression(LaneIndex))
4830 return Error(Loc,
"illegal expression");
4833 return Error(Loc,
"lane index must be empty or an integer");
4838 int64_t Val =
CE->getValue();
4841 if (Val < 0 || Val > 7)
4844 LaneKind = IndexedLane;
4854 VectorLaneTy LaneKind;
4867 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg)) {
4868 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex,
E);
4873 Operands.push_back(ARMOperand::CreateReg(
Reg, S,
E, *
this));
4877 ARMOperand::CreateVectorListAllLanes(
Reg, 1,
false, S,
E, *
this));
4880 Operands.push_back(ARMOperand::CreateVectorListIndexed(
4881 Reg, 1, LaneIndex,
false, S,
E, *
this));
4886 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4887 Reg = getDRegFromQReg(
Reg);
4888 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex,
E);
4893 Operands.push_back(ARMOperand::CreateReg(
Reg, S,
E, *
this));
4896 Reg =
MRI->getMatchingSuperReg(
Reg, ARM::dsub_0,
4897 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4899 ARMOperand::CreateVectorListAllLanes(
Reg, 2,
false, S,
E, *
this));
4902 Operands.push_back(ARMOperand::CreateVectorListIndexed(
4903 Reg, 2, LaneIndex,
false, S,
E, *
this));
4908 Operands.push_back(ARMOperand::CreateReg(
Reg, S,
E, *
this));
4920 return Error(RegLoc,
"register expected");
4925 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg))
4927 "vector register in range Q0-Q7 expected");
4930 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4931 FirstReg =
Reg = getDRegFromQReg(
Reg);
4939 if (!parseVectorLane(LaneKind, LaneIndex,
E).isSuccess())
4947 else if (Spacing == 2)
4949 "sequential registers in double spaced list");
4954 return Error(AfterMinusLoc,
"register expected");
4956 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4957 EndReg = getDRegFromQReg(EndReg) + 1;
4964 !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(EndReg)) ||
4966 !ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(EndReg)))
4967 return Error(AfterMinusLoc,
"invalid register in register list");
4970 return Error(AfterMinusLoc,
"bad range in register list");
4972 VectorLaneTy NextLaneKind;
4973 unsigned NextLaneIndex;
4974 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
4976 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4977 return Error(AfterMinusLoc,
"mismatched lane index in register list");
4980 Count += EndReg -
Reg;
4987 Reg = tryParseRegister();
4989 return Error(RegLoc,
"register expected");
4992 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg))
4993 return Error(RegLoc,
"vector register in range Q0-Q7 expected");
5002 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
5005 else if (Spacing == 2)
5008 "invalid register in double-spaced list (must be 'D' register')");
5009 Reg = getDRegFromQReg(
Reg);
5010 if (
Reg != OldReg + 1)
5011 return Error(RegLoc,
"non-contiguous register range");
5015 VectorLaneTy NextLaneKind;
5016 unsigned NextLaneIndex;
5018 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
5020 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
5021 return Error(LaneLoc,
"mismatched lane index in register list");
5028 Spacing = 1 + (
Reg == OldReg + 2);
5031 if (
Reg != OldReg + Spacing)
5032 return Error(RegLoc,
"non-contiguous register range");
5035 VectorLaneTy NextLaneKind;
5036 unsigned NextLaneIndex;
5038 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
5040 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
5041 return Error(EndLoc,
"mismatched lane index in register list");
5054 if (Count == 2 && !hasMVE()) {
5056 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
5057 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
5058 FirstReg =
MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
5060 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
5061 ARMOperand::CreateVectorListAllLanes);
5062 Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S,
E, *
this));
5066 Operands.push_back(ARMOperand::CreateVectorListIndexed(
5067 FirstReg, Count, LaneIndex, (Spacing == 2), S,
E, *
this));
5118 const MCExpr *MemBarrierID;
5119 if (getParser().parseExpression(MemBarrierID))
5120 return Error(Loc,
"illegal expression");
5124 return Error(Loc,
"constant expression expected");
5126 int Val =
CE->getValue();
5128 return Error(Loc,
"immediate value out of range");
5133 "expected an immediate or barrier type");
5155 ARMOperand::CreateTraceSyncBarrierOpt(
ARM_TSB::CSYNC, S, *
this));
5183 const MCExpr *ISBarrierID;
5184 if (getParser().parseExpression(ISBarrierID))
5185 return Error(Loc,
"illegal expression");
5189 return Error(Loc,
"constant expression expected");
5191 int Val =
CE->getValue();
5193 return Error(Loc,
"immediate value out of range");
5198 "expected an immediate or barrier type");
5200 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5217 if (IFlagsStr !=
"none") {
5218 for (
int i = 0, e = IFlagsStr.
size(); i != e; ++i) {
5227 if (Flag == ~0U || (IFlags & Flag))
5243 if (
static_cast<ARMOperand &
>(*
Operands.back()).isMSRMask() ||
5244 static_cast<ARMOperand &
>(*
Operands.back()).isBankedReg())
5252 if (Val > 255 || Val < 0) {
5255 unsigned SYSmvalue = Val & 0xFF;
5257 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *
this));
5266 auto TheReg = ARMSysReg::lookupMClassSysRegByName(
Mask.lower());
5267 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5270 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5273 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *
this));
5278 size_t Start = 0, Next =
Mask.find(
'_');
5280 std::string SpecReg =
Mask.slice(Start, Next).lower();
5287 unsigned FlagsVal = 0;
5289 if (SpecReg ==
"apsr") {
5293 .
Case(
"nzcvqg", 0xc)
5296 if (FlagsVal == ~0U) {
5302 }
else if (SpecReg ==
"cpsr" || SpecReg ==
"spsr") {
5304 if (Flags ==
"all" || Flags ==
"")
5306 for (
int i = 0, e =
Flags.size(); i != e; ++i) {
5316 if (Flag == ~0U || (FlagsVal & Flag))
5332 if (SpecReg ==
"spsr")
5336 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S, *
this));
5344 if (
static_cast<ARMOperand &
>(*
Operands.back()).isBankedReg() ||
5345 static_cast<ARMOperand &
>(*
Operands.back()).isMSRMask())
5354 auto TheReg = ARMBankedReg::lookupBankedRegByName(
RegName.lower());
5357 unsigned Encoding = TheReg->Encoding;
5360 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S, *
this));
5370 auto ShiftCodeOpt = tryParseShiftToken();
5372 if (!ShiftCodeOpt.has_value())
5374 auto ShiftCode = ShiftCodeOpt.value();
5378 if (ShiftCode !=
Op)
5390 const MCExpr *ShiftAmount;
5393 if (getParser().parseExpression(ShiftAmount, EndLoc))
5394 return Error(Loc,
"illegal expression");
5397 return Error(Loc,
"constant expression expected");
5398 int Val =
CE->getValue();
5399 if (Val < Low || Val >
High)
5400 return Error(Loc,
"immediate value out of range");
5402 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc, *
this));
5412 return Error(S,
"'be' or 'le' operand expected");
5420 return Error(S,
"'be' or 'le' operand expected");
5421 Operands.push_back(ARMOperand::CreateImm(
5439 if (ShiftName ==
"lsl" || ShiftName ==
"LSL")
5441 else if (ShiftName ==
"asr" || ShiftName ==
"ASR")
5454 const MCExpr *ShiftAmount;
5456 if (getParser().parseExpression(ShiftAmount, EndLoc))
5457 return Error(ExLoc,
"malformed shift expression");
5460 return Error(ExLoc,
"shift amount must be an immediate");
5462 int64_t Val =
CE->getValue();
5465 if (Val < 1 || Val > 32)
5466 return Error(ExLoc,
"'asr' shift amount must be in range [1,32]");
5469 return Error(ExLoc,
"'asr #32' shift amount not allowed in Thumb mode");
5470 if (Val == 32) Val = 0;
5473 if (Val < 0 || Val > 31)
5474 return Error(ExLoc,
"'lsr' shift amount must be in range [0,31]");
5478 ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc, *
this));
5493 if (ShiftName !=
"ror" && ShiftName !=
"ROR")
5504 const MCExpr *ShiftAmount;
5506 if (getParser().parseExpression(ShiftAmount, EndLoc))
5507 return Error(ExLoc,
"malformed rotate expression");
5510 return Error(ExLoc,
"rotate amount must be an immediate");
5512 int64_t Val =
CE->getValue();
5516 if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5517 return Error(ExLoc,
"'ror' rotate amount must be 8, 16, or 24");
5519 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc, *
this));
5558 if (getParser().parseExpression(Imm1Exp, Ex1))
5559 return Error(Sx1,
"malformed expression");
5565 Imm1 =
CE->getValue();
5569 Operands.push_back(ARMOperand::CreateModImm(
5570 (Enc & 0xFF), (Enc & 0xF00) >> 7, Sx1, Ex1, *
this));
5581 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *
this));
5587 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *
this));
5594 "expected modified immediate operand: #[0, 255], #even[0-30]");
5597 return Error(Sx1,
"immediate operand must a number in the range [0, 255]");
5612 if (getParser().parseExpression(Imm2Exp, Ex2))
5613 return Error(Sx2,
"malformed expression");
5615 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5618 Imm2 =
CE->getValue();
5619 if (!(Imm2 & ~0x1E)) {
5621 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2, *
this));
5625 "immediate operand must an even number in the range [0, 30]");
5627 return Error(Sx2,
"constant expression expected");
5642 if (getParser().parseExpression(LSBExpr))
5643 return Error(
E,
"malformed immediate expression");
5646 return Error(
E,
"'lsb' operand must be an immediate");
5648 int64_t LSB =
CE->getValue();
5650 if (LSB < 0 || LSB > 31)
5651 return Error(
E,
"'lsb' operand must be in the range [0,31]");
5665 if (getParser().parseExpression(WidthExpr, EndLoc))
5666 return Error(
E,
"malformed immediate expression");
5667 CE = dyn_cast<MCConstantExpr>(WidthExpr);
5669 return Error(
E,
"'width' operand must be an immediate");
5671 int64_t Width =
CE->getValue();
5673 if (Width < 1 || Width > 32 - LSB)
5674 return Error(
E,
"'width' operand must be in the range [1,32-lsb]");
5676 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc, *
this));
5693 bool haveEaten =
false;
5713 unsigned ShiftImm = 0;
5716 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5724 ARMOperand::CreatePostIdxReg(
Reg, isAdd, ShiftTy, ShiftImm, S,
E, *
this));
5754 if (getParser().parseExpression(
Offset,
E))
5758 return Error(S,
"constant expression expected");
5761 int32_t Val =
CE->getValue();
5762 if (isNegative && Val == 0)
5763 Val = std::numeric_limits<int32_t>::min();
5765 Operands.push_back(ARMOperand::CreateImm(
5771 bool haveEaten =
false;
5790 Operands.push_back(ARMOperand::CreatePostIdxReg(
5798 unsigned MnemonicOpsEndInd) {
5799 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
5800 auto Op =
static_cast<ARMOperand &
>(*
Operands[
I]);
5801 if (
Op.isCondCode())
5808 unsigned MnemonicOpsEndInd) {
5809 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
5810 auto Op =
static_cast<ARMOperand &
>(*
Operands[
I]);
5820void ARMAsmParser::cvtThumbMultiply(
MCInst &Inst,
5827 unsigned RegRd = MnemonicOpsEndInd;
5828 unsigned RegRn = MnemonicOpsEndInd + 1;
5829 unsigned RegRm = MnemonicOpsEndInd;
5831 if (
Operands.size() == MnemonicOpsEndInd + 3) {
5836 RegRn = MnemonicOpsEndInd + 2;
5837 RegRm = MnemonicOpsEndInd + 1;
5839 RegRn = MnemonicOpsEndInd + 1;
5840 RegRm = MnemonicOpsEndInd + 2;
5845 ((ARMOperand &)*
Operands[RegRd]).addRegOperands(Inst, 1);
5847 if (CondOutI != 0) {
5848 ((ARMOperand &)*
Operands[CondOutI]).addCCOutOperands(Inst, 1);
5851 *ARMOperand::CreateCCOut(0,
Operands[0]->getEndLoc(), *
this);
5852 Op.addCCOutOperands(Inst, 1);
5855 ((ARMOperand &)*
Operands[RegRn]).addRegOperands(Inst, 1);
5857 ((ARMOperand &)*
Operands[RegRm]).addRegOperands(Inst, 1);
5861 ((ARMOperand &)*
Operands[CondI]).addCondCodeOperands(Inst, 2);
5863 ARMOperand
Op = *ARMOperand::CreateCondCode(
5865 Op.addCondCodeOperands(Inst, 2);
5869void ARMAsmParser::cvtThumbBranches(
MCInst &Inst,
5883 case ARM::tBcc: Inst.
setOpcode(ARM::tB);
break;
5884 case ARM::t2Bcc: Inst.
setOpcode(ARM::t2B);
break;
5903 ARMOperand &
op =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
5904 if (!
op.isSignedOffset<11, 1>() &&
isThumb() && hasV8MBaseline())
5910 ARMOperand &
op =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
5911 if (!
op.isSignedOffset<8, 1>() &&
isThumb() && hasV8MBaseline())
5916 ((ARMOperand &)*
Operands[MnemonicOpsEndInd]).addImmOperands(Inst, 1);
5918 ((ARMOperand &)*
Operands[CondI]).addCondCodeOperands(Inst, 2);
5920 ARMOperand
Op = *ARMOperand::CreateCondCode(
5922 Op.addCondCodeOperands(Inst, 2);
5926void ARMAsmParser::cvtMVEVMOVQtoDReg(
5935 ((ARMOperand &)*
Operands[MnemonicOpsEndInd]).addRegOperands(Inst, 1);
5936 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 1])
5937 .addRegOperands(Inst, 1);
5938 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 2])
5939 .addRegOperands(Inst, 1);
5940 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 3])
5941 .addMVEPairVectorIndexOperands(Inst, 1);
5943 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 5])
5944 .addMVEPairVectorIndexOperands(Inst, 1);
5947 .addCondCodeOperands(Inst, 2);
5951 Op.addCondCodeOperands(Inst, 2);
5961 return TokError(
"Token is not a Left Bracket");
5968 return Error(BaseRegTok.
getLoc(),
"register expected");
5974 return Error(Tok.
getLoc(),
"malformed memory operand");
5980 Operands.push_back(ARMOperand::CreateMem(
5987 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
5995 "Lost colon or comma in memory operand?!");
6007 if (getParser().parseExpression(Expr))
6015 return Error (
E,
"constant expression expected");
6018 switch (
CE->getValue()) {
6021 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
6022 case 16:
Align = 2;
break;
6023 case 32:
Align = 4;
break;
6024 case 64:
Align = 8;
break;
6025 case 128:
Align = 16;
break;
6026 case 256:
Align = 32;
break;
6037 Operands.push_back(ARMOperand::CreateMem(BaseReg,
nullptr, 0,
6039 S,
E, *
this, AlignmentLoc));
6045 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
6066 if (getParser().parseExpression(
Offset))
6069 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Offset)) {
6072 int32_t Val =
CE->getValue();
6073 if (isNegative && Val == 0)
6078 AdjustedOffset =
CE;
6081 Operands.push_back(ARMOperand::CreateMem(BaseReg, AdjustedOffset, 0,
6095 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
6103 bool isNegative =
false;
6115 return Error(
E,
"register expected");
6119 unsigned ShiftImm = 0;
6122 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
6132 Operands.push_back(ARMOperand::CreateMem(BaseReg,
nullptr, OffsetReg,
6133 ShiftType, ShiftImm, 0, isNegative,
6140 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
6157 return Error(Loc,
"illegal shift operator");
6159 if (ShiftName ==
"lsl" || ShiftName ==
"LSL" ||
6160 ShiftName ==
"asl" || ShiftName ==
"ASL")
6162 else if (ShiftName ==
"lsr" || ShiftName ==
"LSR")
6164 else if (ShiftName ==
"asr" || ShiftName ==
"ASR")
6166 else if (ShiftName ==
"ror" || ShiftName ==
"ROR")
6168 else if (ShiftName ==
"rrx" || ShiftName ==
"RRX")
6170 else if (ShiftName ==
"uxtw" || ShiftName ==
"UXTW")
6173 return Error(Loc,
"illegal shift operator");
6188 if (getParser().parseExpression(Expr))
6195 return Error(Loc,
"shift amount must be an immediate");
6196 int64_t
Imm =
CE->getValue();
6200 return Error(Loc,
"immediate shift value out of range");
6244 bool isVmovf =
false;
6246 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
6247 ARMOperand &TyOp =
static_cast<ARMOperand &
>(*
Operands[
I]);
6248 if (TyOp.isToken() &&
6249 (TyOp.getToken() ==
".f32" || TyOp.getToken() ==
".f64" ||
6250 TyOp.getToken() ==
".f16")) {
6256 ARMOperand &Mnemonic =
static_cast<ARMOperand &
>(*
Operands[0]);
6257 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() ==
"fconstd" ||
6258 Mnemonic.getToken() ==
"fconsts");
6259 if (!(isVmovf || isFconst))
6265 bool isNegative =
false;
6288 if (Val > 255 || Val < 0)
6289 return Error(Loc,
"encoded floating point value out of range");
6299 return Error(Loc,
"invalid floating point immediate");
6319 switch (getLexer().getKind()) {
6327 bool ExpectLabel = Mnemonic ==
"b" || Mnemonic ==
"bl";
6329 if (!tryParseRegisterWithWriteBack(
Operands))
6331 int Res = tryParseShiftRegister(
Operands);
6337 if (Mnemonic ==
"vmrs" &&
6341 Operands.push_back(ARMOperand::CreateToken(
"APSR_nzcv", S, *
this));
6358 if (getParser().parseExpression(IdVal))
6361 Operands.push_back(ARMOperand::CreateImm(IdVal, S,
E, *
this));
6367 bool IsLazyLoadStore = Mnemonic ==
"vlldm" || Mnemonic ==
"vlstm";
6368 bool IsVSCCLRM = Mnemonic ==
"vscclrm";
6370 IsLazyLoadStore, IsVSCCLRM);
6383 auto AdjacentToken = getLexer().peekTok(
false);
6387 if (!ExpectIdentifier) {
6396 if (getParser().parseExpression(ImmVal))
6400 int32_t Val =
CE->getValue();
6401 if (IsNegative && Val == 0)
6406 Operands.push_back(ARMOperand::CreateImm(ImmVal, S,
E, *
this));
6412 Operands.push_back(ARMOperand::CreateToken(
6428 if (parsePrefix(RefKind))
6431 const MCExpr *SubExprVal;
6432 if (getParser().parseExpression(SubExprVal))
6438 Operands.push_back(ARMOperand::CreateImm(ExprVal, S,
E, *
this));
6443 if (Mnemonic !=
"ldr")
6444 return Error(S,
"unexpected token in operand");
6446 const MCExpr *SubExprVal;
6447 if (getParser().parseExpression(SubExprVal))
6454 ARMOperand::CreateConstantPoolImm(SubExprVal, S,
E, *
this));
6460bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6461 const MCExpr *Expr =
nullptr;
6462 SMLoc L = getParser().getTok().getLoc();
6463 if (check(getParser().parseExpression(Expr), L,
"expected expression"))
6466 if (check(!
Value, L,
"expected constant expression"))
6468 Out =
Value->getValue();
6497 static const struct PrefixEntry {
6498 const char *Spelling;
6501 } PrefixEntries[] = {
6513 llvm::find_if(PrefixEntries, [&IDVal](
const PrefixEntry &PE) {
6514 return PE.Spelling == IDVal;
6516 if (Prefix == std::end(PrefixEntries)) {
6522 switch (getContext().getObjectFileType()) {
6524 CurrentFormat = MACHO;
6527 CurrentFormat =
ELF;
6530 CurrentFormat =
COFF;
6533 CurrentFormat = WASM;
6543 if (~
Prefix->SupportedFormats & CurrentFormat) {
6545 "cannot represent relocation in the current file format");
6549 RefKind =
Prefix->VariantKind;
6573 unsigned &ProcessorIMod,
6577 CarrySetting =
false;
6583 if ((Mnemonic ==
"movs" &&
isThumb()) || Mnemonic ==
"teq" ||
6584 Mnemonic ==
"vceq" || Mnemonic ==
"svc" || Mnemonic ==
"mls" ||
6585 Mnemonic ==
"smmls" || Mnemonic ==
"vcls" || Mnemonic ==
"vmls" ||
6586 Mnemonic ==
"vnmls" || Mnemonic ==
"vacge" || Mnemonic ==
"vcge" ||
6587 Mnemonic ==
"vclt" || Mnemonic ==
"vacgt" || Mnemonic ==
"vaclt" ||
6588 Mnemonic ==
"vacle" || Mnemonic ==
"hlt" || Mnemonic ==
"vcgt" ||
6589 Mnemonic ==
"vcle" || Mnemonic ==
"smlal" || Mnemonic ==
"umaal" ||
6590 Mnemonic ==
"umlal" || Mnemonic ==
"vabal" || Mnemonic ==
"vmlal" ||
6591 Mnemonic ==
"vpadal" || Mnemonic ==
"vqdmlal" || Mnemonic ==
"fmuls" ||
6592 Mnemonic ==
"vmaxnm" || Mnemonic ==
"vminnm" || Mnemonic ==
"vcvta" ||
6593 Mnemonic ==
"vcvtn" || Mnemonic ==
"vcvtp" || Mnemonic ==
"vcvtm" ||
6594 Mnemonic ==
"vrinta" || Mnemonic ==
"vrintn" || Mnemonic ==
"vrintp" ||
6595 Mnemonic ==
"vrintm" || Mnemonic ==
"hvc" ||
6596 Mnemonic.
starts_with(
"vsel") || Mnemonic ==
"vins" ||
6597 Mnemonic ==
"vmovx" || Mnemonic ==
"bxns" || Mnemonic ==
"blxns" ||
6598 Mnemonic ==
"vdot" || Mnemonic ==
"vmmla" || Mnemonic ==
"vudot" ||
6599 Mnemonic ==
"vsdot" || Mnemonic ==
"vcmla" || Mnemonic ==
"vcadd" ||
6600 Mnemonic ==
"vfmal" || Mnemonic ==
"vfmsl" || Mnemonic ==
"wls" ||
6601 Mnemonic ==
"le" || Mnemonic ==
"dls" || Mnemonic ==
"csel" ||
6602 Mnemonic ==
"csinc" || Mnemonic ==
"csinv" || Mnemonic ==
"csneg" ||
6603 Mnemonic ==
"cinc" || Mnemonic ==
"cinv" || Mnemonic ==
"cneg" ||
6604 Mnemonic ==
"cset" || Mnemonic ==
"csetm" || Mnemonic ==
"aut" ||
6605 Mnemonic ==
"pac" || Mnemonic ==
"pacbti" || Mnemonic ==
"bti")
6610 if (Mnemonic !=
"adcs" && Mnemonic !=
"bics" && Mnemonic !=
"movs" &&
6611 Mnemonic !=
"muls" && Mnemonic !=
"smlals" && Mnemonic !=
"smulls" &&
6612 Mnemonic !=
"umlals" && Mnemonic !=
"umulls" && Mnemonic !=
"lsls" &&
6613 Mnemonic !=
"sbcs" && Mnemonic !=
"rscs" &&
6615 (Mnemonic ==
"vmine" || Mnemonic ==
"vshle" || Mnemonic ==
"vshlt" ||
6616 Mnemonic ==
"vshllt" || Mnemonic ==
"vrshle" || Mnemonic ==
"vrshlt" ||
6617 Mnemonic ==
"vmvne" || Mnemonic ==
"vorne" || Mnemonic ==
"vnege" ||
6618 Mnemonic ==
"vnegt" || Mnemonic ==
"vmule" || Mnemonic ==
"vmult" ||
6619 Mnemonic ==
"vrintne" || Mnemonic ==
"vcmult" ||
6620 Mnemonic ==
"vcmule" || Mnemonic ==
"vpsele" || Mnemonic ==
"vpselt" ||
6624 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size() - 2);
6632 !(Mnemonic ==
"cps" || Mnemonic ==
"mls" || Mnemonic ==
"mrs" ||
6633 Mnemonic ==
"smmls" || Mnemonic ==
"vabs" || Mnemonic ==
"vcls" ||
6634 Mnemonic ==
"vmls" || Mnemonic ==
"vmrs" || Mnemonic ==
"vnmls" ||
6635 Mnemonic ==
"vqabs" || Mnemonic ==
"vrecps" || Mnemonic ==
"vrsqrts" ||
6636 Mnemonic ==
"srs" || Mnemonic ==
"flds" || Mnemonic ==
"fmrs" ||
6637 Mnemonic ==
"fsqrts" || Mnemonic ==
"fsubs" || Mnemonic ==
"fsts" ||
6638 Mnemonic ==
"fcpys" || Mnemonic ==
"fdivs" || Mnemonic ==
"fmuls" ||
6639 Mnemonic ==
"fcmps" || Mnemonic ==
"fcmpzs" || Mnemonic ==
"vfms" ||
6640 Mnemonic ==
"vfnms" || Mnemonic ==
"fconsts" || Mnemonic ==
"bxns" ||
6641 Mnemonic ==
"blxns" || Mnemonic ==
"vfmas" || Mnemonic ==
"vmlas" ||
6642 (Mnemonic ==
"movs" &&
isThumb()))) {
6643 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size() - 1);
6644 CarrySetting =
true;
6657 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size()-2);
6658 ProcessorIMod =
IMod;
6662 if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic !=
"vmovlt" &&
6663 Mnemonic !=
"vshllt" && Mnemonic !=
"vrshrnt" && Mnemonic !=
"vshrnt" &&
6664 Mnemonic !=
"vqrshrunt" && Mnemonic !=
"vqshrunt" &&
6665 Mnemonic !=
"vqrshrnt" && Mnemonic !=
"vqshrnt" && Mnemonic !=
"vmullt" &&
6666 Mnemonic !=
"vqmovnt" && Mnemonic !=
"vqmovunt" &&
6667 Mnemonic !=
"vqmovnt" && Mnemonic !=
"vmovnt" && Mnemonic !=
"vqdmullt" &&
6668 Mnemonic !=
"vpnot" && Mnemonic !=
"vcvtt" && Mnemonic !=
"vcvt") {
6672 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size()-1);
6680 ITMask = Mnemonic.
substr(2);
6681 Mnemonic = Mnemonic.
slice(0, 2);
6685 ITMask = Mnemonic.
substr(4);
6686 Mnemonic = Mnemonic.
slice(0, 4);
6688 ITMask = Mnemonic.
substr(3);
6689 Mnemonic = Mnemonic.
slice(0, 3);
6699void ARMAsmParser::getMnemonicAcceptInfo(
StringRef Mnemonic,
6702 bool &CanAcceptCarrySet,
6703 bool &CanAcceptPredicationCode,
6704 bool &CanAcceptVPTPredicationCode) {
6705 CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6708 Mnemonic ==
"and" || Mnemonic ==
"lsl" || Mnemonic ==
"lsr" ||
6709 Mnemonic ==
"rrx" || Mnemonic ==
"ror" || Mnemonic ==
"sub" ||
6710 Mnemonic ==
"add" || Mnemonic ==
"adc" || Mnemonic ==
"mul" ||
6711 Mnemonic ==
"bic" || Mnemonic ==
"asr" || Mnemonic ==
"orr" ||
6712 Mnemonic ==
"mvn" || Mnemonic ==
"rsb" || Mnemonic ==
"rsc" ||
6713 Mnemonic ==
"orn" || Mnemonic ==
"sbc" || Mnemonic ==
"eor" ||
6714 Mnemonic ==
"neg" || Mnemonic ==
"vfm" || Mnemonic ==
"vfnm" ||
6716 (Mnemonic ==
"smull" || Mnemonic ==
"mov" || Mnemonic ==
"mla" ||
6717 Mnemonic ==
"smlal" || Mnemonic ==
"umlal" || Mnemonic ==
"umull"));
6719 if (Mnemonic ==
"bkpt" || Mnemonic ==
"cbnz" || Mnemonic ==
"setend" ||
6720 Mnemonic ==
"cps" || Mnemonic ==
"it" || Mnemonic ==
"cbz" ||
6721 Mnemonic ==
"trap" || Mnemonic ==
"hlt" || Mnemonic ==
"udf" ||
6723 Mnemonic.
starts_with(
"vsel") || Mnemonic ==
"vmaxnm" ||
6724 Mnemonic ==
"vminnm" || Mnemonic ==
"vcvta" || Mnemonic ==
"vcvtn" ||
6725 Mnemonic ==
"vcvtp" || Mnemonic ==
"vcvtm" || Mnemonic ==
"vrinta" ||
6726 Mnemonic ==
"vrintn" || Mnemonic ==
"vrintp" || Mnemonic ==
"vrintm" ||
6727 Mnemonic.
starts_with(
"aes") || Mnemonic ==
"hvc" ||
6728 Mnemonic ==
"setpan" || Mnemonic.
starts_with(
"sha1") ||
6731 Mnemonic ==
"vmovx" || Mnemonic ==
"vins" || Mnemonic ==
"vudot" ||
6732 Mnemonic ==
"vsdot" || Mnemonic ==
"vcmla" || Mnemonic ==
"vcadd" ||
6733 Mnemonic ==
"vfmal" || Mnemonic ==
"vfmsl" || Mnemonic ==
"vfmat" ||
6734 Mnemonic ==
"vfmab" || Mnemonic ==
"vdot" || Mnemonic ==
"vmmla" ||
6735 Mnemonic ==
"sb" || Mnemonic ==
"ssbb" || Mnemonic ==
"pssbb" ||
6736 Mnemonic ==
"vsmmla" || Mnemonic ==
"vummla" || Mnemonic ==
"vusmmla" ||
6737 Mnemonic ==
"vusdot" || Mnemonic ==
"vsudot" || Mnemonic ==
"bfcsel" ||
6738 Mnemonic ==
"wls" || Mnemonic ==
"dls" || Mnemonic ==
"le" ||
6739 Mnemonic ==
"csel" || Mnemonic ==
"csinc" || Mnemonic ==
"csinv" ||
6740 Mnemonic ==
"csneg" || Mnemonic ==
"cinc" || Mnemonic ==
"cinv" ||
6741 Mnemonic ==
"cneg" || Mnemonic ==
"cset" || Mnemonic ==
"csetm" ||
6742 (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6743 !MS.isITPredicableCDEInstr(Mnemonic)) ||
6745 Mnemonic ==
"pac" || Mnemonic ==
"pacbti" || Mnemonic ==
"aut" ||
6746 Mnemonic ==
"bti" ||
6753 CanAcceptPredicationCode =
false;
6756 CanAcceptPredicationCode =
6757 Mnemonic !=
"cdp2" && Mnemonic !=
"clrex" && Mnemonic !=
"mcr2" &&
6758 Mnemonic !=
"mcrr2" && Mnemonic !=
"mrc2" && Mnemonic !=
"mrrc2" &&
6759 Mnemonic !=
"dmb" && Mnemonic !=
"dfb" && Mnemonic !=
"dsb" &&
6760 Mnemonic !=
"isb" && Mnemonic !=
"pld" && Mnemonic !=
"pli" &&
6761 Mnemonic !=
"pldw" && Mnemonic !=
"ldc2" && Mnemonic !=
"ldc2l" &&
6762 Mnemonic !=
"stc2" && Mnemonic !=
"stc2l" && Mnemonic !=
"tsb" &&
6764 }
else if (isThumbOne()) {
6766 CanAcceptPredicationCode = Mnemonic !=
"movs";
6768 CanAcceptPredicationCode = Mnemonic !=
"nop" && Mnemonic !=
"movs";
6770 CanAcceptPredicationCode =
true;
6774 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I) {
6775 auto &
Op =
static_cast<ARMOperand &
>(*
Operands[
I]);
6776 if (
Op.isToken() &&
Op.getToken() ==
".w")
6786void ARMAsmParser::tryConvertingToTwoOperandForm(
6792 if (
Operands.size() != MnemonicOpsEndInd + 3)
6795 const auto &Op3 =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
6796 auto &Op4 =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]);
6797 if (!Op3.isReg() || !Op4.isReg())
6800 auto Op3Reg = Op3.getReg();
6801 auto Op4Reg = Op4.getReg();
6807 auto &Op5 =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 2]);
6809 if (Mnemonic !=
"add")
6811 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6812 (Op5.isReg() && Op5.getReg() == ARM::PC);
6813 if (!TryTransform) {
6814 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6815 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6816 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6817 Op5.isImm() && !Op5.isImm0_508s4());
6821 }
else if (!isThumbOne())
6824 if (!(Mnemonic ==
"add" || Mnemonic ==
"sub" || Mnemonic ==
"and" ||
6825 Mnemonic ==
"eor" || Mnemonic ==
"lsl" || Mnemonic ==
"lsr" ||
6826 Mnemonic ==
"asr" || Mnemonic ==
"adc" || Mnemonic ==
"sbc" ||
6827 Mnemonic ==
"ror" || Mnemonic ==
"orr" || Mnemonic ==
"bic"))
6833 bool Transform = Op3Reg == Op4Reg;
6838 const ARMOperand *LastOp = &Op5;
6840 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6841 ((Mnemonic ==
"add" && Op4Reg != ARM::SP) ||
6842 Mnemonic ==
"and" || Mnemonic ==
"eor" ||
6843 Mnemonic ==
"adc" || Mnemonic ==
"orr")) {
6854 if (((Mnemonic ==
"add" && CarrySetting) || Mnemonic ==
"sub") &&
6860 if ((Mnemonic ==
"add" || Mnemonic ==
"sub") && LastOp->isImm0_7())
6874 ARMOperand &
Op =
static_cast<ARMOperand &
>(MCOp);
6880 const MCExpr *E = dyn_cast<MCExpr>(
Op.getImm());
6883 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6892bool ARMAsmParser::shouldOmitVectorPredicateOperand(
6894 if (!hasMVE() ||
Operands.size() <= MnemonicOpsEndInd)
6908 if (
static_cast<ARMOperand &
>(*Operand).isVectorIndex() ||
6909 ((*Operand).isReg() &&
6910 (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6911 (*Operand).getReg()) ||
6912 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6913 (*Operand).getReg())))) {
6923 if (
static_cast<ARMOperand &
>(*Operand).isVectorIndex() ||
6924 static_cast<ARMOperand &
>(*Operand).isQReg())
6940 unsigned VariantID);
6951void ARMAsmParser::fixupGNULDRDAlias(
StringRef Mnemonic,
6953 unsigned MnemonicOpsEndInd) {
6954 if (Mnemonic !=
"ldrd" && Mnemonic !=
"strd" && Mnemonic !=
"ldrexd" &&
6955 Mnemonic !=
"strexd" && Mnemonic !=
"ldaexd" && Mnemonic !=
"stlexd")
6958 unsigned IdX = Mnemonic ==
"strexd" || Mnemonic ==
"stlexd"
6959 ? MnemonicOpsEndInd + 1
6960 : MnemonicOpsEndInd;
6965 ARMOperand &Op2 =
static_cast<ARMOperand &
>(*
Operands[IdX]);
6966 ARMOperand &Op3 =
static_cast<ARMOperand &
>(*
Operands[IdX + 1]);
6970 if (!Op3.isGPRMem())
6977 unsigned RtEncoding =
MRI->getEncodingValue(Op2.getReg());
6978 if (!
isThumb() && (RtEncoding & 1)) {
6983 if (Op2.getReg() == ARM::PC)
6985 unsigned PairedReg = GPR.
getRegister(RtEncoding + 1);
6986 if (!PairedReg || PairedReg == ARM::PC ||
6987 (PairedReg == ARM::SP && !hasV8Ops()))
6991 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(),
6992 Op2.getEndLoc(), *
this));
7000bool ARMAsmParser::CDEConvertDualRegOperand(
StringRef Mnemonic,
7002 unsigned MnemonicOpsEndInd) {
7003 assert(MS.isCDEDualRegInstr(Mnemonic));
7005 if (
Operands.size() < 3 + MnemonicOpsEndInd)
7009 "operand must be an even-numbered register in the range [r0, r10]");
7042 RPair = ARM::R10_R11;
7057 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
7058 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isCondCode()) {
7060 --MnemonicOpsEndInd;
7066 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
7067 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isCCOut()) {
7069 --MnemonicOpsEndInd;
7075 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
7076 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isVPTPred()) {
7078 --MnemonicOpsEndInd;
7093 const FeatureBitset &AvailableFeatures = getAvailableFeatures();
7094 unsigned AssemblerDialect = getParser().getAssemblerDialect();
7100 parseDirectiveReq(
Name, NameLoc);
7107 size_t Start = 0, Next =
Name.find(
'.');
7114 unsigned ProcessorIMod;
7117 Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
7118 CarrySetting, ProcessorIMod, ITMask);
7121 if (isThumbOne() && PredicationCode !=
ARMCC::AL && Mnemonic !=
"b") {
7122 return Error(NameLoc,
"conditional execution not supported in Thumb1");
7125 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc, *
this));
7138 if (Mnemonic ==
"it" || Mnemonic.
starts_with(
"vpt") ||
7141 Mnemonic ==
"vpt" ?
SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
7142 SMLoc::getFromPointer(NameLoc.getPointer() + 4);
7143 if (ITMask.
size() > 3) {
7144 if (Mnemonic ==
"it")
7145 return Error(Loc,
"too many conditions on IT instruction");
7146 return Error(Loc,
"too many conditions on VPT instruction");
7150 if (Pos !=
't' && Pos !=
'e') {
7151 return Error(Loc,
"illegal IT block condition mask '" + ITMask +
"'");
7157 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc, *
this));
7170 bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7171 getMnemonicAcceptInfo(Mnemonic, ExtraToken,
Name, CanAcceptCarrySet,
7172 CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7176 if (!CanAcceptCarrySet && CarrySetting) {
7177 return Error(NameLoc,
"instruction '" + Mnemonic +
7178 "' can not set flags, but 's' suffix specified");
7182 if (!CanAcceptPredicationCode && PredicationCode !=
ARMCC::AL) {
7183 return Error(NameLoc,
"instruction '" + Mnemonic +
7184 "' is not predicable, but condition code specified");
7189 if (!CanAcceptVPTPredicationCode && VPTPredicationCode !=
ARMVCC::None) {
7190 return Error(NameLoc,
"instruction '" + Mnemonic +
7191 "' is not VPT predicable, but VPT code T/E is specified");
7195 if (CanAcceptCarrySet && CarrySetting) {
7197 Operands.push_back(ARMOperand::CreateCCOut(
7198 CarrySetting ? ARM::CPSR : ARM::NoRegister, Loc, *
this));
7205 Operands.push_back(ARMOperand::CreateCondCode(
7213 !(Mnemonic.
starts_with(
"vcvt") && Mnemonic !=
"vcvta" &&
7214 Mnemonic !=
"vcvtn" && Mnemonic !=
"vcvtp" && Mnemonic !=
"vcvtm")) {
7217 Operands.push_back(ARMOperand::CreateVPTPred(
7222 if (ProcessorIMod) {
7223 Operands.push_back(ARMOperand::CreateImm(
7226 }
else if (Mnemonic ==
"cps" && isMClass()) {
7227 return Error(NameLoc,
"instruction 'cps' requires effect for M-class");
7233 Next =
Name.find(
'.', Start + 1);
7234 ExtraToken =
Name.slice(Start, Next);
7243 if (ExtraToken ==
".n" && !
isThumb()) {
7245 return Error(Loc,
"instruction with .n (narrow) qualifier not allowed in "
7252 if (ExtraToken !=
".n" && (
isThumb() || ExtraToken !=
".w")) {
7254 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc, *
this));
7261 unsigned MnemonicOpsEndInd =
Operands.size();
7266 if (parseOperand(
Operands, Mnemonic)) {
7272 if (parseOperand(
Operands, Mnemonic)) {
7281 tryConvertingToTwoOperandForm(Mnemonic, PredicationCode, CarrySetting,
7284 if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7292 if (MS.isCDEDualRegInstr(Mnemonic)) {
7294 CDEConvertDualRegOperand(Mnemonic,
Operands, MnemonicOpsEndInd);
7301 if (!shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7302 MnemonicOpsEndInd) &&
7303 Mnemonic ==
"vmov" && PredicationCode ==
ARMCC::LT) {
7311 Mnemonic.
size() - 1 + CarrySetting);
7316 }
else if (Mnemonic ==
"vcvt" && PredicationCode ==
ARMCC::NE &&
7317 !shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7318 MnemonicOpsEndInd)) {
7327 Mnemonic.
size() - 1 + CarrySetting);
7331 ARMOperand::CreateToken(
StringRef(
"vcvtn"), MLoc, *
this));
7332 }
else if (Mnemonic ==
"vmul" && PredicationCode ==
ARMCC::LT &&
7333 !shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7334 MnemonicOpsEndInd)) {
7347 if (!shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7348 MnemonicOpsEndInd)) {
7355 if (Mnemonic.
starts_with(
"vcvtt") && MnemonicOpsEndInd > 2) {
7357 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd - 2]);
7359 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd - 1]);
7360 if (!(Sz1.isToken() && Sz1.getToken().starts_with(
".f") &&
7361 Sz2.isToken() && Sz2.getToken().starts_with(
".f"))) {
7366 Mnemonic = Mnemonic.
substr(0, 4);
7368 ARMOperand::CreateToken(Mnemonic, MLoc, *
this));
7372 Mnemonic.
size() + CarrySetting);
7375 ARMOperand::CreateVPTPred(
7377 ++MnemonicOpsEndInd;
7379 }
else if (CanAcceptVPTPredicationCode) {
7383 if (shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7384 MnemonicOpsEndInd)) {
7391 bool usedVPTPredicationCode =
false;
7393 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isVPTPred())
7394 usedVPTPredicationCode =
true;
7395 if (!usedVPTPredicationCode) {
7403 Mnemonic =
Name.slice(0, Mnemonic.
size() + 1);
7406 ARMOperand::CreateToken(Mnemonic, NameLoc, *
this));
7415 if (!
isThumb() && Mnemonic ==
"blx" &&
7416 Operands.size() == MnemonicOpsEndInd + 1 &&
7417 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]).isImm())
7421 fixupGNULDRDAlias(Mnemonic,
Operands, MnemonicOpsEndInd);
7430 bool IsLoad = (Mnemonic ==
"ldrexd" || Mnemonic ==
"ldaexd");
7431 if (!
isThumb() &&
Operands.size() > MnemonicOpsEndInd + 1 + (!IsLoad) &&
7432 (Mnemonic ==
"ldrexd" || Mnemonic ==
"strexd" || Mnemonic ==
"ldaexd" ||
7433 Mnemonic ==
"stlexd")) {
7434 unsigned Idx = IsLoad ? MnemonicOpsEndInd : MnemonicOpsEndInd + 1;
7435 ARMOperand &Op1 =
static_cast<ARMOperand &
>(*
Operands[
Idx]);
7436 ARMOperand &Op2 =
static_cast<ARMOperand &
>(*
Operands[
Idx + 1]);
7440 if (Op1.isReg() && MRC.
contains(Op1.getReg())) {
7442 unsigned Rt =
MRI->getEncodingValue(Reg1);
7444 unsigned Rt2 =
MRI->getEncodingValue(Reg2);
7447 return Error(Op2.getStartLoc(),
7448 IsLoad ?
"destination operands must be sequential"
7449 :
"source operands must be sequential");
7455 IsLoad ?
"destination operands must start start at an even register"
7456 :
"source operands must start start at an even register");
7459 Reg1, ARM::gsub_0, &(
MRI->getRegClass(ARM::GPRPairRegClassID)));
7460 Operands[
Idx] = ARMOperand::CreateReg(NewReg, Op1.getStartLoc(),
7461 Op2.getEndLoc(), *
this);
7471 if (isThumbTwo() && Mnemonic ==
"sub" &&
7472 Operands.size() == MnemonicOpsEndInd + 3 &&
7473 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]).isReg() &&
7474 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]).getReg() ==
7476 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]).isReg() &&
7477 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]).getReg() ==
7479 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 2]).isImm()) {
7480 Operands.front() = ARMOperand::CreateToken(
Name, NameLoc, *
this);
7493 bool &containsReg) {
7494 containsReg =
false;
7520 return Inst.
getOpcode() == ARM::tBKPT ||
7527 unsigned MnemonicOpsEndInd) {
7528 for (
unsigned I = MnemonicOpsEndInd;
I <
Operands.size(); ++
I) {
7529 const ARMOperand &
Op =
static_cast<const ARMOperand &
>(*
Operands[
I]);
7530 if (
Op.isRegList()) {
7537bool ARMAsmParser::validatetLDMRegList(
const MCInst &Inst,
7539 unsigned MnemonicOpsEndInd,
7540 unsigned ListIndex,
bool IsARPop) {
7545 if (!IsARPop && ListContainsSP)
7548 "SP may not be in the register list");
7549 if (ListContainsPC && ListContainsLR)
7552 "PC and LR may not be in the register list simultaneously");
7556bool ARMAsmParser::validatetSTMRegList(
const MCInst &Inst,
7558 unsigned MnemonicOpsEndInd,
7559 unsigned ListIndex) {
7563 if (ListContainsSP && ListContainsPC)
7566 "SP and PC may not be in the register list");
7570 "SP may not be in the register list");
7574 "PC may not be in the register list");
7579 bool Load,
bool ARMMode,
bool Writeback,
7580 unsigned MnemonicOpsEndInd) {
7581 unsigned RtIndex =
Load || !Writeback ? 0 : 1;
7594 "Rt must be even-numbered");
7597 if (Rt2 != Rt + 1) {
7600 "destination operands must be sequential");
7603 "source operands must be sequential");
7610 if (!ARMMode && Load) {
7613 "destination operands can't be identical");
7619 if (Rn == Rt || Rn == Rt2) {
7622 "base register needs to be different from destination "
7625 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7626 "source register and base register can't be identical");
7649 ARMOperand &
Op =
static_cast<ARMOperand &
>(MCOp);
7655 const MCExpr *E = dyn_cast<MCExpr>(
Op.getImm());
7662bool ARMAsmParser::validateInstruction(
MCInst &Inst,
7664 unsigned MnemonicOpsEndInd) {
7674 return Error(Loc,
"instructions in IT block must be predicable");
7677 if (
Cond != currentITCond()) {
7681 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isCondCode())
7683 return Error(CondLoc,
"incorrect condition in IT block; got '" +
7685 "', but expected '" +
7694 return Error(Loc,
"predicated instructions must be in IT block");
7698 return Warning(Loc,
"predicated instructions should be in IT block");
7705 if (MCID.
operands()[i].isPredicate()) {
7707 return Error(Loc,
"instruction is not predicable");
7715 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7716 return Error(Loc,
"instruction must be outside of IT block or the last instruction in an IT block");
7720 unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7722 return Error(Loc,
"instruction in VPT block must be predicable");
7725 if (Pred != VPTPred) {
7728 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isVPTPred())
7730 return Error(PredLoc,
"incorrect predication in VPT block; got '" +
7732 "', but expected '" +
7739 return Error(Loc,
"VPT predicated instructions must be in VPT block");
7741 const unsigned Opcode = Inst.
getOpcode();
7746 case ARM::VLSTM_T2: {
7750 MnemonicOpsEndInd + 2) {
7751 ARMOperand &
Op =
static_cast<ARMOperand &
>(
7754 auto &RegList =
Op.getRegList();
7756 if (RegList.size() == 32 && !hasV8_1MMainline()) {
7757 return Error(
Op.getEndLoc(),
"T2 version requires v8.1-M.Main");
7760 if (hasD32() && RegList.size() != 32) {
7761 return Error(
Op.getEndLoc(),
"operand must be exactly {d0-d31}");
7764 if (!hasD32() && (RegList.size() != 16 && RegList.size() != 32)) {
7766 "operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)");
7782 return Error(Loc,
"unpredictable IT predicate sequence");
7786 if (validateLDRDSTRD(Inst,
Operands,
true,
true,
7787 false, MnemonicOpsEndInd))
7791 case ARM::LDRD_POST:
7792 if (validateLDRDSTRD(Inst,
Operands,
true,
true,
7793 true, MnemonicOpsEndInd))
7797 if (validateLDRDSTRD(Inst,
Operands,
true,
false,
7798 false, MnemonicOpsEndInd))
7801 case ARM::t2LDRD_PRE:
7802 case ARM::t2LDRD_POST:
7803 if (validateLDRDSTRD(Inst,
Operands,
true,
false,
7804 true, MnemonicOpsEndInd))
7810 if (RmReg == ARM::SP && !hasV8Ops())
7812 "r13 (SP) is an unpredictable operand to BXJ");
7816 if (validateLDRDSTRD(Inst,
Operands,
false,
true,
7817 false, MnemonicOpsEndInd))
7821 case ARM::STRD_POST:
7822 if (validateLDRDSTRD(Inst,
Operands,
false,
true,
7823 true, MnemonicOpsEndInd))
7826 case ARM::t2STRD_PRE:
7827 case ARM::t2STRD_POST:
7828 if (validateLDRDSTRD(Inst,
Operands,
false,
false,
7829 true, MnemonicOpsEndInd))
7832 case ARM::STR_PRE_IMM:
7833 case ARM::STR_PRE_REG:
7834 case ARM::t2STR_PRE:
7835 case ARM::STR_POST_IMM:
7836 case ARM::STR_POST_REG:
7837 case ARM::t2STR_POST:
7839 case ARM::t2STRH_PRE:
7840 case ARM::STRH_POST:
7841 case ARM::t2STRH_POST:
7842 case ARM::STRB_PRE_IMM:
7843 case ARM::STRB_PRE_REG:
7844 case ARM::t2STRB_PRE:
7845 case ARM::STRB_POST_IMM:
7846 case ARM::STRB_POST_REG:
7847 case ARM::t2STRB_POST: {
7853 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
7854 "source register and base register can't be identical");
7857 case ARM::t2LDR_PRE_imm:
7858 case ARM::t2LDR_POST_imm:
7859 case ARM::t2STR_PRE_imm:
7860 case ARM::t2STR_POST_imm: {
7867 "destination register and base register can't be identical");
7868 if (Inst.
getOpcode() == ARM::t2LDR_POST_imm ||
7869 Inst.
getOpcode() == ARM::t2STR_POST_imm) {
7871 if (Imm > 255 || Imm < -255)
7872 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7873 "operand must be in range [-255, 255]");
7875 if (Inst.
getOpcode() == ARM::t2STR_PRE_imm ||
7876 Inst.
getOpcode() == ARM::t2STR_POST_imm) {
7879 "operand must be a register in range [r0, r14]");
7885 case ARM::t2LDRB_OFFSET_imm:
7886 case ARM::t2LDRB_PRE_imm:
7887 case ARM::t2LDRB_POST_imm:
7888 case ARM::t2STRB_OFFSET_imm:
7889 case ARM::t2STRB_PRE_imm:
7890 case ARM::t2STRB_POST_imm: {
7891 if (Inst.
getOpcode() == ARM::t2LDRB_POST_imm ||
7892 Inst.
getOpcode() == ARM::t2STRB_POST_imm ||
7893 Inst.
getOpcode() == ARM::t2LDRB_PRE_imm ||
7894 Inst.
getOpcode() == ARM::t2STRB_PRE_imm) {
7896 if (Imm > 255 || Imm < -255)
7897 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7898 "operand must be in range [-255, 255]");
7899 }
else if (Inst.
getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7900 Inst.
getOpcode() == ARM::t2STRB_OFFSET_imm) {
7902 if (Imm > 0 || Imm < -255)
7903 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7904 "operand must be in range [0, 255] with a negative sign");
7908 "if operand is PC, should call the LDRB (literal)");
7913 case ARM::t2LDRH_OFFSET_imm:
7914 case ARM::t2LDRH_PRE_imm:
7915 case ARM::t2LDRH_POST_imm:
7916 case ARM::t2STRH_OFFSET_imm:
7917 case ARM::t2STRH_PRE_imm:
7918 case ARM::t2STRH_POST_imm: {
7919 if (Inst.
getOpcode() == ARM::t2LDRH_POST_imm ||
7920 Inst.
getOpcode() == ARM::t2STRH_POST_imm ||
7921 Inst.
getOpcode() == ARM::t2LDRH_PRE_imm ||
7922 Inst.
getOpcode() == ARM::t2STRH_PRE_imm) {
7924 if (Imm > 255 || Imm < -255)
7925 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7926 "operand must be in range [-255, 255]");
7927 }
else if (Inst.
getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7928 Inst.
getOpcode() == ARM::t2STRH_OFFSET_imm) {
7930 if (Imm > 0 || Imm < -255)
7931 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7932 "operand must be in range [0, 255] with a negative sign");
7936 "if operand is PC, should call the LDRH (literal)");
7941 case ARM::t2LDRSB_OFFSET_imm:
7942 case ARM::t2LDRSB_PRE_imm:
7943 case ARM::t2LDRSB_POST_imm: {
7944 if (Inst.
getOpcode() == ARM::t2LDRSB_POST_imm ||
7945 Inst.
getOpcode() == ARM::t2LDRSB_PRE_imm) {
7947 if (Imm > 255 || Imm < -255)
7948 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7949 "operand must be in range [-255, 255]");
7950 }
else if (Inst.
getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7952 if (Imm > 0 || Imm < -255)
7953 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7954 "operand must be in range [0, 255] with a negative sign");
7957 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7958 "if operand is PC, should call the LDRH (literal)");
7963 case ARM::t2LDRSH_OFFSET_imm:
7964 case ARM::t2LDRSH_PRE_imm:
7965 case ARM::t2LDRSH_POST_imm: {
7966 if (Inst.
getOpcode() == ARM::t2LDRSH_POST_imm ||
7967 Inst.
getOpcode() == ARM::t2LDRSH_PRE_imm) {
7969 if (Imm > 255 || Imm < -255)
7970 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7971 "operand must be in range [-255, 255]");
7972 }
else if (Inst.
getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7974 if (Imm > 0 || Imm < -255)
7975 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7976 "operand must be in range [0, 255] with a negative sign");
7980 "if operand is PC, should call the LDRH (literal)");
7985 case ARM::LDR_PRE_IMM:
7986 case ARM::LDR_PRE_REG:
7987 case ARM::t2LDR_PRE:
7988 case ARM::LDR_POST_IMM:
7989 case ARM::LDR_POST_REG:
7990 case ARM::t2LDR_POST:
7992 case ARM::t2LDRH_PRE:
7993 case ARM::LDRH_POST:
7994 case ARM::t2LDRH_POST:
7995 case ARM::LDRSH_PRE:
7996 case ARM::t2LDRSH_PRE:
7997 case ARM::LDRSH_POST:
7998 case ARM::t2LDRSH_POST:
7999 case ARM::LDRB_PRE_IMM:
8000 case ARM::LDRB_PRE_REG:
8001 case ARM::t2LDRB_PRE:
8002 case ARM::LDRB_POST_IMM:
8003 case ARM::LDRB_POST_REG:
8004 case ARM::t2LDRB_POST:
8005 case ARM::LDRSB_PRE:
8006 case ARM::t2LDRSB_PRE:
8007 case ARM::LDRSB_POST:
8008 case ARM::t2LDRSB_POST: {
8015 "destination register and base register can't be identical");
8019 case ARM::MVE_VLDRBU8_rq:
8020 case ARM::MVE_VLDRBU16_rq:
8021 case ARM::MVE_VLDRBS16_rq:
8022 case ARM::MVE_VLDRBU32_rq:
8023 case ARM::MVE_VLDRBS32_rq:
8024 case ARM::MVE_VLDRHU16_rq:
8025 case ARM::MVE_VLDRHU16_rq_u:
8026 case ARM::MVE_VLDRHU32_rq:
8027 case ARM::MVE_VLDRHU32_rq_u:
8028 case ARM::MVE_VLDRHS32_rq:
8029 case ARM::MVE_VLDRHS32_rq_u:
8030 case ARM::MVE_VLDRWU32_rq:
8031 case ARM::MVE_VLDRWU32_rq_u:
8032 case ARM::MVE_VLDRDU64_rq:
8033 case ARM::MVE_VLDRDU64_rq_u:
8034 case ARM::MVE_VLDRWU32_qi:
8035 case ARM::MVE_VLDRWU32_qi_pre:
8036 case ARM::MVE_VLDRDU64_qi:
8037 case ARM::MVE_VLDRDU64_qi_pre: {
8039 unsigned QdIdx = 0, QmIdx = 2;
8040 bool QmIsPointer =
false;
8042 case ARM::MVE_VLDRWU32_qi:
8043 case ARM::MVE_VLDRDU64_qi:
8047 case ARM::MVE_VLDRWU32_qi_pre:
8048 case ARM::MVE_VLDRDU64_qi_pre:
8059 Twine(
"destination vector register and vector ") +
8060 (QmIsPointer ?
"pointer" :
"offset") +
8061 " register can't be identical");
8073 if (Widthm1 >= 32 - LSB)
8074 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8075 "bitfield width must be in range [1,32-lsb]");
8087 bool HasWritebackToken =
8088 (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8090 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8091 .getToken() ==
"!");
8093 bool ListContainsBase;
8098 "registers must be in range r0-r7");
8100 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
8103 "writeback operator '!' expected");
8106 if (ListContainsBase && HasWritebackToken)
8107 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8108 "writeback operator '!' not allowed when base register "
8109 "in register list");
8111 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8115 case ARM::LDMIA_UPD:
8116 case ARM::LDMDB_UPD:
8117 case ARM::LDMIB_UPD:
8118 case ARM::LDMDA_UPD:
8125 "writeback register not allowed in register list");
8129 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8134 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8137 case ARM::t2LDMIA_UPD:
8138 case ARM::t2LDMDB_UPD:
8139 case ARM::t2STMIA_UPD:
8140 case ARM::t2STMDB_UPD:
8143 "writeback register not allowed in register list");
8145 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
8146 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8149 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8154 case ARM::sysLDMIA_UPD:
8155 case ARM::sysLDMDA_UPD:
8156 case ARM::sysLDMDB_UPD:
8157 case ARM::sysLDMIB_UPD:
8159 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8160 "writeback register only allowed on system LDM "
8161 "if PC in register-list");
8163 case ARM::sysSTMIA_UPD:
8164 case ARM::sysSTMDA_UPD:
8165 case ARM::sysSTMDB_UPD:
8166 case ARM::sysSTMIB_UPD:
8168 "system STM cannot have writeback register");
8173 bool ListContainsBase;
8175 ListContainsBase) &&
8178 "registers must be in range r0-r7 or pc");
8179 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 2, !isMClass()))
8184 bool ListContainsBase;
8186 ListContainsBase) &&
8189 "registers must be in range r0-r7 or lr");
8190 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 2))
8194 case ARM::tSTMIA_UPD: {
8195 bool ListContainsBase, InvalidLowList;
8197 0, ListContainsBase);
8198 if (InvalidLowList && !isThumbTwo())
8199 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8200 "registers must be in range r0-r7");
8204 if (InvalidLowList && ListContainsBase)
8206 "writeback operator '!' not allowed when base register "
8207 "in register list");
8209 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 4))
8216 if (!isThumbTwo() &&
8218 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8219 "source register must be the same as destination");
8229 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8230 "source register must be sp if destination is sp");
8235 if (!(
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]))
8236 .isSignedOffset<11, 1>())
8238 "branch target out of range");
8241 int op = (
Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8242 : MnemonicOpsEndInd + 1;
8243 ARMOperand &Operand =
static_cast<ARMOperand &
>(*
Operands[
op]);
8245 if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8246 !Operand.isSignedOffset<24, 1>())
8247 return Error(
Operands[
op]->getStartLoc(),
"branch target out of range");
8252 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd])
8253 .isSignedOffset<8, 1>())
8255 "branch target out of range");
8258 int Op = (
Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8259 : MnemonicOpsEndInd + 1;
8260 if (!
static_cast<ARMOperand &
>(*
Operands[
Op]).isSignedOffset<20, 1>())
8261 return Error(
Operands[
Op]->getStartLoc(),
"branch target out of range");
8266 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8267 .isUnsignedOffset<6, 1>())
8268 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8269 "branch target out of range");
8275 case ARM::t2MOVTi16:
8283 int i = (
Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8284 : MnemonicOpsEndInd + 1;
8285 ARMOperand &
Op =
static_cast<ARMOperand &
>(*
Operands[i]);
8288 const MCExpr *E = dyn_cast<MCExpr>(
Op.getImm());
8290 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
8295 "immediate expression for mov requires :lower16: or :upper16");
8299 int i = (
Operands[MnemonicOpsEndInd + 1]->isImm()) ? MnemonicOpsEndInd + 1
8300 : MnemonicOpsEndInd + 2;
8303 return Error(
Op.getStartLoc(),
8304 "Immediate expression for Thumb adds requires :lower0_7:,"
8305 " :lower8_15:, :upper0_7: or :upper8_15:");
8311 return Error(
Op.getStartLoc(),
8312 "Immediate expression for Thumb movs requires :lower0_7:,"
8313 " :lower8_15:, :upper0_7: or :upper8_15:");
8322 if (Imm8 == 0x10 && Pred !=
ARMCC::AL && hasRAS())
8323 return Error(
Operands[1]->getStartLoc(),
"instruction 'esb' is not "
8324 "predicable, but condition "
8327 return Error(
Operands[1]->getStartLoc(),
"instruction 'csdb' is not "
8328 "predicable, but condition "
8336 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd])
8337 .isUnsignedOffset<4, 1>() ||
8340 "branch location out of range or not a multiple of 2");
8343 if (Opcode == ARM::t2BFi) {
8344 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8345 .isSignedOffset<16, 1>())
8347 "branch target out of range or not a multiple of 2");
8348 }
else if (Opcode == ARM::t2BFLi) {
8349 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8350 .isSignedOffset<18, 1>())
8352 "branch target out of range or not a multiple of 2");
8357 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd])
8358 .isUnsignedOffset<4, 1>() ||
8361 "branch location out of range or not a multiple of 2");
8363 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8364 .isSignedOffset<16, 1>())
8365 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8366 "branch target out of range or not a multiple of 2");
8369 "branch location and else branch target should either both be "
8370 "immediates or both labels");
8374 if (Diff != 4 && Diff != 2)
8377 "else branch target must be 2 or 4 greater than the branch location");
8384 !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8387 "invalid register in register list. Valid registers are "
8388 "r0-r12, lr/r14 and APSR.");
8405 "instruction 'ssbb' is not predicable, but condition code "
8409 "instruction 'pssbb' is not predicable, but condition code "
8413 case ARM::VMOVRRS: {
8418 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8419 "source operands must be sequential");
8422 case ARM::VMOVSRR: {
8428 "destination operands must be sequential");
8432 case ARM::VSTMDIA: {
8434 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]);
8435 auto &RegList =
Op.getRegList();
8436 if (RegList.size() < 1 || RegList.size() > 16)
8437 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8438 "list of registers must be at least 1 and at most 16");
8441 case ARM::MVE_VQDMULLs32bh:
8442 case ARM::MVE_VQDMULLs32th:
8443 case ARM::MVE_VCMULf32:
8444 case ARM::MVE_VMULLBs32:
8445 case ARM::MVE_VMULLTs32:
8446 case ARM::MVE_VMULLBu32:
8447 case ARM::MVE_VMULLTu32: {
8449 Operands[MnemonicOpsEndInd + 1]->getReg()) {
8451 "Qd register and Qn register can't be identical");
8456 "Qd register and Qm register can't be identical");
8460 case ARM::MVE_VREV64_8:
8461 case ARM::MVE_VREV64_16:
8462 case ARM::MVE_VREV64_32:
8463 case ARM::MVE_VQDMULL_qr_s32bh:
8464 case ARM::MVE_VQDMULL_qr_s32th: {
8468 "Qd register and Qn register can't be identical");
8472 case ARM::MVE_VCADDi32:
8473 case ARM::MVE_VCADDf32:
8474 case ARM::MVE_VHCADDs32: {
8478 "Qd register and Qm register can't be identical");
8482 case ARM::MVE_VMOV_rr_q: {
8485 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8486 "Q-registers must be the same");
8487 if (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 3])
8488 .getVectorIndex() !=
8489 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 5])
8492 return Error(
Operands[MnemonicOpsEndInd + 3]->getStartLoc(),
8493 "Q-register indexes must be 2 and 0 or 3 and 1");
8496 case ARM::MVE_VMOV_q_rr: {
8500 "Q-registers must be the same");
8501 if (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8502 .getVectorIndex() !=
8503 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 3])
8506 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8507 "Q-register indexes must be 2 and 0 or 3 and 1");
8510 case ARM::MVE_SQRSHR:
8511 case ARM::MVE_UQRSHL: {
8515 "Rda register and Rm register can't be identical");
8536 case ARM::t2SMLALBB:
8537 case ARM::t2SMLALBT:
8539 case ARM::t2SMLALDX:
8540 case ARM::t2SMLALTB:
8541 case ARM::t2SMLALTT:
8543 case ARM::t2SMLSLDX:
8544 case ARM::t2SMULL: {
8549 "unpredictable instruction, RdHi and RdLo must be different");
8557 case ARM::CDE_CX1DA:
8561 case ARM::CDE_CX2DA:
8565 case ARM::CDE_CX3DA:
8566 case ARM::CDE_VCX1_vec:
8567 case ARM::CDE_VCX1_fpsp:
8568 case ARM::CDE_VCX1_fpdp:
8569 case ARM::CDE_VCX1A_vec:
8570 case ARM::CDE_VCX1A_fpsp:
8571 case ARM::CDE_VCX1A_fpdp:
8572 case ARM::CDE_VCX2_vec:
8573 case ARM::CDE_VCX2_fpsp:
8574 case ARM::CDE_VCX2_fpdp:
8575 case ARM::CDE_VCX2A_vec:
8576 case ARM::CDE_VCX2A_fpsp:
8577 case ARM::CDE_VCX2A_fpdp:
8578 case ARM::CDE_VCX3_vec:
8579 case ARM::CDE_VCX3_fpsp:
8580 case ARM::CDE_VCX3_fpdp:
8581 case ARM::CDE_VCX3A_vec:
8582 case ARM::CDE_VCX3A_fpsp:
8583 case ARM::CDE_VCX3A_fpdp: {
8585 "CDE operand 1 must be a coprocessor ID");
8589 "coprocessor must be configured as CDE");
8590 else if (Coproc >= 8)
8592 "coprocessor must be in the range [p0, p7]");
8598 case ARM::t2LDC2L_OFFSET:
8599 case ARM::t2LDC2L_OPTION:
8600 case ARM::t2LDC2L_POST:
8601 case ARM::t2LDC2L_PRE:
8602 case ARM::t2LDC2_OFFSET:
8603 case ARM::t2LDC2_OPTION:
8604 case ARM::t2LDC2_POST:
8605 case ARM::t2LDC2_PRE:
8606 case ARM::t2LDCL_OFFSET:
8607 case ARM::t2LDCL_OPTION:
8608 case ARM::t2LDCL_POST:
8609 case ARM::t2LDCL_PRE:
8610 case ARM::t2LDC_OFFSET:
8611 case ARM::t2LDC_OPTION:
8612 case ARM::t2LDC_POST:
8613 case ARM::t2LDC_PRE:
8622 case ARM::t2STC2L_OFFSET:
8623 case ARM::t2STC2L_OPTION:
8624 case ARM::t2STC2L_POST:
8625 case ARM::t2STC2L_PRE:
8626 case ARM::t2STC2_OFFSET:
8627 case ARM::t2STC2_OPTION:
8628 case ARM::t2STC2_POST:
8629 case ARM::t2STC2_PRE:
8630 case ARM::t2STCL_OFFSET:
8631 case ARM::t2STCL_OPTION:
8632 case ARM::t2STCL_POST:
8633 case ARM::t2STCL_PRE:
8634 case ARM::t2STC_OFFSET:
8635 case ARM::t2STC_OPTION:
8636 case ARM::t2STC_POST:
8637 case ARM::t2STC_PRE: {
8642 if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8644 else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8647 "Operand must be a coprocessor ID");
8652 "coprocessor must be configured as GCP");
8664 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST1LNd8_UPD;
8665 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST1LNd16_UPD;
8666 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST1LNd32_UPD;
8667 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST1LNd8_UPD;
8668 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST1LNd16_UPD;
8669 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST1LNd32_UPD;
8670 case ARM::VST1LNdAsm_8: Spacing = 1;
return ARM::VST1LNd8;
8671 case ARM::VST1LNdAsm_16: Spacing = 1;
return ARM::VST1LNd16;
8672 case ARM::VST1LNdAsm_32: Spacing = 1;
return ARM::VST1LNd32;
8675 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST2LNd8_UPD;
8676 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST2LNd16_UPD;
8677 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST2LNd32_UPD;
8678 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2;
return ARM::VST2LNq16_UPD;
8679 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST2LNq32_UPD;
8681 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST2LNd8_UPD;
8682 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST2LNd16_UPD;
8683 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST2LNd32_UPD;
8684 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST2LNq16_UPD;
8685 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST2LNq32_UPD;
8687 case ARM::VST2LNdAsm_8: Spacing = 1;
return ARM::VST2LNd8;
8688 case ARM::VST2LNdAsm_16: Spacing = 1;
return ARM::VST2LNd16;
8689 case ARM::VST2LNdAsm_32: Spacing = 1;
return ARM::VST2LNd32;
8690 case ARM::VST2LNqAsm_16: Spacing = 2;
return ARM::VST2LNq16;
8691 case ARM::VST2LNqAsm_32: Spacing = 2;
return ARM::VST2LNq32;
8694 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST3LNd8_UPD;
8695 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3LNd16_UPD;
8696 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST3LNd32_UPD;
8697 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3LNq16_UPD;
8698 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST3LNq32_UPD;
8699 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST3LNd8_UPD;
8700 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST3LNd16_UPD;
8701 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST3LNd32_UPD;
8702 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST3LNq16_UPD;
8703 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST3LNq32_UPD;
8704 case ARM::VST3LNdAsm_8: Spacing = 1;
return ARM::VST3LNd8;
8705 case ARM::VST3LNdAsm_16: Spacing = 1;
return ARM::VST3LNd16;
8706 case ARM::VST3LNdAsm_32: Spacing = 1;
return ARM::VST3LNd32;
8707 case ARM::VST3LNqAsm_16: Spacing = 2;
return ARM::VST3LNq16;
8708 case ARM::VST3LNqAsm_32: Spacing = 2;
return ARM::VST3LNq32;
8711 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1;
return ARM::VST3d8_UPD;
8712 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3d16_UPD;
8713 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1;
return ARM::VST3d32_UPD;
8714 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2;
return ARM::VST3q8_UPD;
8715 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2;
return ARM::VST3q16_UPD;
8716 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2;
return ARM::VST3q32_UPD;
8717 case ARM::VST3dWB_register_Asm_8: Spacing = 1;
return ARM::VST3d8_UPD;
8718 case ARM::VST3dWB_register_Asm_16: Spacing = 1;
return ARM::VST3d16_UPD;
8719 case ARM::VST3dWB_register_Asm_32: Spacing = 1;
return ARM::VST3d32_UPD;
8720 case ARM::VST3qWB_register_Asm_8: Spacing = 2;
return ARM::VST3q8_UPD;
8721 case ARM::VST3qWB_register_Asm_16: Spacing = 2;
return ARM::VST3q16_UPD;
8722 case ARM::VST3qWB_register_Asm_32: Spacing = 2;
return ARM::VST3q32_UPD;
8723 case ARM::VST3dAsm_8: Spacing = 1;
return ARM::VST3d8;
8724 case ARM::VST3dAsm_16: Spacing = 1;
return ARM::VST3d16;
8725 case ARM::VST3dAsm_32: Spacing = 1;
return ARM::VST3d32;
8726 case ARM::VST3qAsm_8: Spacing = 2;
return ARM::VST3q8;
8727 case ARM::VST3qAsm_16: Spacing = 2;
return ARM::VST3q16;
8728 case ARM::VST3qAsm_32: Spacing = 2;
return ARM::VST3q32;
8731 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST4LNd8_UPD;
8732 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4LNd16_UPD;
8733 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST4LNd32_UPD;
8734 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4LNq16_UPD;
8735 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST4LNq32_UPD;
8736 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST4LNd8_UPD;
8737 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST4LNd16_UPD;
8738 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST4LNd32_UPD;
8739 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST4LNq16_UPD;
8740 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST4LNq32_UPD;
8741 case ARM::VST4LNdAsm_8: Spacing = 1;
return ARM::VST4LNd8;
8742 case ARM::VST4LNdAsm_16: Spacing = 1;
return ARM::VST4LNd16;
8743 case ARM::VST4LNdAsm_32: Spacing = 1;
return ARM::VST4LNd32;
8744 case ARM::VST4LNqAsm_16: Spacing = 2;
return ARM::VST4LNq16;
8745 case ARM::VST4LNqAsm_32: Spacing = 2;
return ARM::VST4LNq32;
8748 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1;
return ARM::VST4d8_UPD;
8749 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4d16_UPD;
8750 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1;
return ARM::VST4d32_UPD;
8751 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2;
return ARM::VST4q8_UPD;
8752 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2;
return ARM::VST4q16_UPD;
8753 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2;
return ARM::VST4q32_UPD;
8754 case ARM::VST4dWB_register_Asm_8: Spacing = 1;
return ARM::VST4d8_UPD;
8755 case ARM::VST4dWB_register_Asm_16: Spacing = 1;
return ARM::VST4d16_UPD;
8756 case ARM::VST4dWB_register_Asm_32: Spacing = 1;
return ARM::VST4d32_UPD;
8757 case ARM::VST4qWB_register_Asm_8: Spacing = 2;
return ARM::VST4q8_UPD;
8758 case ARM::VST4qWB_register_Asm_16: Spacing = 2;
return ARM::VST4q16_UPD;
8759 case ARM::VST4qWB_register_Asm_32: Spacing = 2;
return ARM::VST4q32_UPD;
8760 case ARM::VST4dAsm_8: Spacing = 1;
return ARM::VST4d8;
8761 case ARM::VST4dAsm_16: Spacing = 1;
return ARM::VST4d16;
8762 case ARM::VST4dAsm_32: Spacing = 1;
return ARM::VST4d32;
8763 case ARM::VST4qAsm_8: Spacing = 2;
return ARM::VST4q8;
8764 case ARM::VST4qAsm_16: Spacing = 2;
return ARM::VST4q16;
8765 case ARM::VST4qAsm_32: Spacing = 2;
return ARM::VST4q32;
8773 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD1LNd8_UPD;
8774 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD1LNd16_UPD;
8775 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD1LNd32_UPD;
8776 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD1LNd8_UPD;
8777 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD1LNd16_UPD;
8778 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD1LNd32_UPD;
8779 case ARM::VLD1LNdAsm_8: Spacing = 1;
return ARM::VLD1LNd8;
8780 case ARM::VLD1LNdAsm_16: Spacing = 1;
return ARM::VLD1LNd16;
8781 case ARM::VLD1LNdAsm_32: Spacing = 1;
return ARM::VLD1LNd32;
8784 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD2LNd8_UPD;
8785 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD2LNd16_UPD;
8786 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD2LNd32_UPD;
8787 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD2LNq16_UPD;
8788 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD2LNq32_UPD;
8789 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD2LNd8_UPD;
8790 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD2LNd16_UPD;
8791 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD2LNd32_UPD;
8792 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD2LNq16_UPD;
8793 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD2LNq32_UPD;
8794 case ARM::VLD2LNdAsm_8: Spacing = 1;
return ARM::VLD2LNd8;
8795 case ARM::VLD2LNdAsm_16: Spacing = 1;
return ARM::VLD2LNd16;
8796 case ARM::VLD2LNdAsm_32: Spacing = 1;
return ARM::VLD2LNd32;
8797 case ARM::VLD2LNqAsm_16: Spacing = 2;
return ARM::VLD2LNq16;
8798 case ARM::VLD2LNqAsm_32: Spacing = 2;
return ARM::VLD2LNq32;
8801 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3DUPd8_UPD;
8802 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3DUPd16_UPD;
8803 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3DUPd32_UPD;
8804 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3DUPq8_UPD;
8805 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD3DUPq16_UPD;
8806 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3DUPq32_UPD;
8807 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1;
return ARM::VLD3DUPd8_UPD;
8808 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1;
return ARM::VLD3DUPd16_UPD;
8809 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1;
return ARM::VLD3DUPd32_UPD;
8810 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2;
return ARM::VLD3DUPq8_UPD;
8811 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2;
return ARM::VLD3DUPq16_UPD;
8812 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2;
return ARM::VLD3DUPq32_UPD;
8813 case ARM::VLD3DUPdAsm_8: Spacing = 1;
return ARM::VLD3DUPd8;
8814 case ARM::VLD3DUPdAsm_16: Spacing = 1;
return ARM::VLD3DUPd16;
8815 case ARM::VLD3DUPdAsm_32: Spacing = 1;
return ARM::VLD3DUPd32;
8816 case ARM::VLD3DUPqAsm_8: Spacing = 2;
return ARM::VLD3DUPq8;
8817 case ARM::VLD3DUPqAsm_16: Spacing = 2;
return ARM::VLD3DUPq16;
8818 case ARM::VLD3DUPqAsm_32: Spacing = 2;
return ARM::VLD3DUPq32;
8821 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3LNd8_UPD;
8822 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3LNd16_UPD;
8823 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3LNd32_UPD;
8824 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3LNq16_UPD;
8825 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3LNq32_UPD;
8826 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD3LNd8_UPD;
8827 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD3LNd16_UPD;
8828 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD3LNd32_UPD;
8829 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD3LNq16_UPD;
8830 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD3LNq32_UPD;
8831 case ARM::VLD3LNdAsm_8: Spacing = 1;
return ARM::VLD3LNd8;
8832 case ARM::VLD3LNdAsm_16: Spacing = 1;
return ARM::VLD3LNd16;
8833 case ARM::VLD3LNdAsm_32: Spacing = 1;
return ARM::VLD3LNd32;
8834 case ARM::VLD3LNqAsm_16: Spacing = 2;
return ARM::VLD3LNq16;
8835 case ARM::VLD3LNqAsm_32: Spacing = 2;
return ARM::VLD3LNq32;
8838 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3d8_UPD;
8839 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3d16_UPD;
8840 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3d32_UPD;
8841 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2;
return ARM::VLD3q8_UPD;
8842 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD3q16_UPD;
8843 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3q32_UPD;
8844 case ARM::VLD3dWB_register_Asm_8: Spacing = 1;
return ARM::VLD3d8_UPD;
8845 case ARM::VLD3dWB_register_Asm_16: Spacing = 1;
return ARM::VLD3d16_UPD;
8846 case ARM::VLD3dWB_register_Asm_32: Spacing = 1;
return ARM::VLD3d32_UPD;
8847 case ARM::VLD3qWB_register_Asm_8: Spacing = 2;
return ARM::VLD3q8_UPD;
8848 case ARM::VLD3qWB_register_Asm_16: Spacing = 2;
return ARM::VLD3q16_UPD;
8849 case ARM::VLD3qWB_register_Asm_32: Spacing = 2;
return ARM::VLD3q32_UPD;
8850 case ARM::VLD3dAsm_8: Spacing = 1;
return ARM::VLD3d8;
8851 case ARM::VLD3dAsm_16: Spacing = 1;
return ARM::VLD3d16;
8852 case ARM::VLD3dAsm_32: Spacing = 1;
return ARM::VLD3d32;
8853 case ARM::VLD3qAsm_8: Spacing = 2;
return ARM::VLD3q8;
8854 case ARM::VLD3qAsm_16: Spacing = 2;
return ARM::VLD3q16;
8855 case ARM::VLD3qAsm_32: Spacing = 2;
return ARM::VLD3q32;
8858 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4LNd8_UPD;
8859 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4LNd16_UPD;
8860 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4LNd32_UPD;
8861 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD4LNq16_UPD;
8862 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4LNq32_UPD;
8863 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD4LNd8_UPD;
8864 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD4LNd16_UPD;
8865 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD4LNd32_UPD;
8866 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD4LNq16_UPD;
8867 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD4LNq32_UPD;
8868 case ARM::VLD4LNdAsm_8: Spacing = 1;
return ARM::VLD4LNd8;
8869 case ARM::VLD4LNdAsm_16: Spacing = 1;
return ARM::VLD4LNd16;
8870 case ARM::VLD4LNdAsm_32: Spacing = 1;
return ARM::VLD4LNd32;
8871 case ARM::VLD4LNqAsm_16: Spacing = 2;
return ARM::VLD4LNq16;
8872 case ARM::VLD4LNqAsm_32: Spacing = 2;
return ARM::VLD4LNq32;
8875 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4DUPd8_UPD;
8876 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4DUPd16_UPD;
8877 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4DUPd32_UPD;
8878 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4DUPq8_UPD;
8879 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4DUPq16_UPD;
8880 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4DUPq32_UPD;
8881 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1;
return ARM::VLD4DUPd8_UPD;
8882 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1;
return ARM::VLD4DUPd16_UPD;
8883 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1;
return ARM::VLD4DUPd32_UPD;
8884 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2;
return ARM::VLD4DUPq8_UPD;
8885 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2;
return ARM::VLD4DUPq16_UPD;
8886 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2;
return ARM::VLD4DUPq32_UPD;
8887 case ARM::VLD4DUPdAsm_8: Spacing = 1;
return ARM::VLD4DUPd8;
8888 case ARM::VLD4DUPdAsm_16: Spacing = 1;
return ARM::VLD4DUPd16;
8889 case ARM::VLD4DUPdAsm_32: Spacing = 1;
return ARM::VLD4DUPd32;
8890 case ARM::VLD4DUPqAsm_8: Spacing = 2;
return ARM::VLD4DUPq8;
8891 case ARM::VLD4DUPqAsm_16: Spacing = 2;
return ARM::VLD4DUPq16;
8892 case ARM::VLD4DUPqAsm_32: Spacing = 2;
return ARM::VLD4DUPq32;
8895 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4d8_UPD;
8896 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4d16_UPD;
8897 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4d32_UPD;
8898 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2;
return ARM::VLD4q8_UPD;
8899 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD4q16_UPD;
8900 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4q32_UPD;
8901 case ARM::VLD4dWB_register_Asm_8: Spacing = 1;
return ARM::VLD4d8_UPD;
8902 case ARM::VLD4dWB_register_Asm_16: Spacing = 1;
return ARM::VLD4d16_UPD;
8903 case ARM::VLD4dWB_register_Asm_32: Spacing = 1;
return ARM::VLD4d32_UPD;
8904 case ARM::VLD4qWB_register_Asm_8: Spacing = 2;
return ARM::VLD4q8_UPD;
8905 case ARM::VLD4qWB_register_Asm_16: Spacing = 2;
return ARM::VLD4q16_UPD;
8906 case ARM::VLD4qWB_register_Asm_32: Spacing = 2;
return ARM::VLD4q32_UPD;
8907 case ARM::VLD4dAsm_8: Spacing = 1;
return ARM::VLD4d8;
8908 case ARM::VLD4dAsm_16: Spacing = 1;
return ARM::VLD4d16;
8909 case ARM::VLD4dAsm_32: Spacing = 1;
return ARM::VLD4d32;
8910 case ARM::VLD4qAsm_8: Spacing = 2;
return ARM::VLD4q8;
8911 case ARM::VLD4qAsm_16: Spacing = 2;
return ARM::VLD4q16;
8912 case ARM::VLD4qAsm_32: Spacing = 2;
return ARM::VLD4q32;
8916bool ARMAsmParser::processInstruction(
MCInst &Inst,
8918 unsigned MnemonicOpsEndInd,
8922 bool HasWideQualifier =
false;
8924 ARMOperand &ARMOp =
static_cast<ARMOperand&
>(*Op);
8925 if (ARMOp.isToken() && ARMOp.getToken() ==
".w") {
8926 HasWideQualifier =
true;
8937 MnemonicOpsEndInd + 2) {
8938 ARMOperand &
Op =
static_cast<ARMOperand &
>(
8941 auto &RegList =
Op.getRegList();
8944 if (RegList.size() == 32) {
8945 const unsigned Opcode =
8946 (Inst.
getOpcode() == ARM::VLLDM) ? ARM::VLLDM_T2 : ARM::VLSTM_T2;
8960 case ARM::LDRT_POST:
8961 case ARM::LDRBT_POST: {
8962 const unsigned Opcode =
8963 (Inst.
getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8964 : ARM::LDRBT_POST_IMM;
8980 case ARM::LDRSHTii: {
8985 else if (Inst.
getOpcode() == ARM::LDRHTii)
8987 else if (Inst.
getOpcode() == ARM::LDRSHTii)
8998 case ARM::STRT_POST:
8999 case ARM::STRBT_POST: {
9000 const unsigned Opcode =
9001 (Inst.
getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
9002 : ARM::STRBT_POST_IMM;
9028 llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
9033 MCSymbol *Dot = getContext().createTempSymbol();
9052 case ARM::t2LDR_PRE_imm:
9053 case ARM::t2LDR_POST_imm: {
9067 case ARM::t2STR_PRE_imm:
9068 case ARM::t2STR_POST_imm: {
9082 case ARM::t2LDRB_OFFSET_imm: {
9092 case ARM::t2LDRB_PRE_imm:
9093 case ARM::t2LDRB_POST_imm: {
9097 : ARM::t2LDRB_POST);
9108 case ARM::t2STRB_OFFSET_imm: {
9118 case ARM::t2STRB_PRE_imm:
9119 case ARM::t2STRB_POST_imm: {
9123 : ARM::t2STRB_POST);
9134 case ARM::t2LDRH_OFFSET_imm: {
9144 case ARM::t2LDRH_PRE_imm:
9145 case ARM::t2LDRH_POST_imm: {
9149 : ARM::t2LDRH_POST);
9160 case ARM::t2STRH_OFFSET_imm: {
9170 case ARM::t2STRH_PRE_imm:
9171 case ARM::t2STRH_POST_imm: {
9175 : ARM::t2STRH_POST);
9186 case ARM::t2LDRSB_OFFSET_imm: {
9196 case ARM::t2LDRSB_PRE_imm:
9197 case ARM::t2LDRSB_POST_imm: {
9201 : ARM::t2LDRSB_POST);
9212 case ARM::t2LDRSH_OFFSET_imm: {
9222 case ARM::t2LDRSH_PRE_imm:
9223 case ARM::t2LDRSH_POST_imm: {
9227 : ARM::t2LDRSH_POST);
9238 case ARM::t2LDRpcrel:
9247 case ARM::t2LDRBpcrel:
9250 case ARM::t2LDRHpcrel:
9253 case ARM::t2LDRSBpcrel:
9256 case ARM::t2LDRSHpcrel:
9259 case ARM::LDRConstPool:
9260 case ARM::tLDRConstPool:
9261 case ARM::t2LDRConstPool: {
9266 if (Inst.
getOpcode() == ARM::LDRConstPool)
9268 else if (Inst.
getOpcode() == ARM::tLDRConstPool)
9270 else if (Inst.
getOpcode() == ARM::t2LDRConstPool)
9272 const ARMOperand &PoolOperand =
9273 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]);
9274 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9276 if (isa<MCConstantExpr>(SubExprVal) &&
9280 (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9282 bool MovHasS =
true;
9283 if (Inst.
getOpcode() == ARM::LDRConstPool) {
9293 else if (hasV6T2Ops() &&
9306 else if (hasThumb2() &&
9311 else if (hasV8MBaseline() &&
9332 getTargetStreamer().addConstantPoolEntry(SubExprVal,
9333 PoolOperand.getStartLoc());
9344 case ARM::VST1LNdWB_register_Asm_8:
9345 case ARM::VST1LNdWB_register_Asm_16:
9346 case ARM::VST1LNdWB_register_Asm_32: {
9364 case ARM::VST2LNdWB_register_Asm_8:
9365 case ARM::VST2LNdWB_register_Asm_16:
9366 case ARM::VST2LNdWB_register_Asm_32:
9367 case ARM::VST2LNqWB_register_Asm_16:
9368 case ARM::VST2LNqWB_register_Asm_32: {
9388 case ARM::VST3LNdWB_register_Asm_8:
9389 case ARM::VST3LNdWB_register_Asm_16:
9390 case ARM::VST3LNdWB_register_Asm_32:
9391 case ARM::VST3LNqWB_register_Asm_16:
9392 case ARM::VST3LNqWB_register_Asm_32: {
9414 case ARM::VST4LNdWB_register_Asm_8:
9415 case ARM::VST4LNdWB_register_Asm_16:
9416 case ARM::VST4LNdWB_register_Asm_32:
9417 case ARM::VST4LNqWB_register_Asm_16:
9418 case ARM::VST4LNqWB_register_Asm_32: {
9442 case ARM::VST1LNdWB_fixed_Asm_8:
9443 case ARM::VST1LNdWB_fixed_Asm_16:
9444 case ARM::VST1LNdWB_fixed_Asm_32: {
9462 case ARM::VST2LNdWB_fixed_Asm_8:
9463 case ARM::VST2LNdWB_fixed_Asm_16:
9464 case ARM::VST2LNdWB_fixed_Asm_32:
9465 case ARM::VST2LNqWB_fixed_Asm_16:
9466 case ARM::VST2LNqWB_fixed_Asm_32: {
9486 case ARM::VST3LNdWB_fixed_Asm_8:
9487 case ARM::VST3LNdWB_fixed_Asm_16:
9488 case ARM::VST3LNdWB_fixed_Asm_32:
9489 case ARM::VST3LNqWB_fixed_Asm_16:
9490 case ARM::VST3LNqWB_fixed_Asm_32: {
9512 case ARM::VST4LNdWB_fixed_Asm_8:
9513 case ARM::VST4LNdWB_fixed_Asm_16:
9514 case ARM::VST4LNdWB_fixed_Asm_32:
9515 case ARM::VST4LNqWB_fixed_Asm_16:
9516 case ARM::VST4LNqWB_fixed_Asm_32: {
9540 case ARM::VST1LNdAsm_8:
9541 case ARM::VST1LNdAsm_16:
9542 case ARM::VST1LNdAsm_32: {
9558 case ARM::VST2LNdAsm_8:
9559 case ARM::VST2LNdAsm_16:
9560 case ARM::VST2LNdAsm_32:
9561 case ARM::VST2LNqAsm_16:
9562 case ARM::VST2LNqAsm_32: {
9580 case ARM::VST3LNdAsm_8:
9581 case ARM::VST3LNdAsm_16:
9582 case ARM::VST3LNdAsm_32:
9583 case ARM::VST3LNqAsm_16:
9584 case ARM::VST3LNqAsm_32: {
9604 case ARM::VST4LNdAsm_8:
9605 case ARM::VST4LNdAsm_16:
9606 case ARM::VST4LNdAsm_32:
9607 case ARM::VST4LNqAsm_16:
9608 case ARM::VST4LNqAsm_32: {
9631 case ARM::VLD1LNdWB_register_Asm_8:
9632 case ARM::VLD1LNdWB_register_Asm_16:
9633 case ARM::VLD1LNdWB_register_Asm_32: {
9652 case ARM::VLD2LNdWB_register_Asm_8:
9653 case ARM::VLD2LNdWB_register_Asm_16:
9654 case ARM::VLD2LNdWB_register_Asm_32:
9655 case ARM::VLD2LNqWB_register_Asm_16:
9656 case ARM::VLD2LNqWB_register_Asm_32: {
9679 case ARM::VLD3LNdWB_register_Asm_8:
9680 case ARM::VLD3LNdWB_register_Asm_16:
9681 case ARM::VLD3LNdWB_register_Asm_32:
9682 case ARM::VLD3LNqWB_register_Asm_16:
9683 case ARM::VLD3LNqWB_register_Asm_32: {
9710 case ARM::VLD4LNdWB_register_Asm_8:
9711 case ARM::VLD4LNdWB_register_Asm_16:
9712 case ARM::VLD4LNdWB_register_Asm_32:
9713 case ARM::VLD4LNqWB_register_Asm_16:
9714 case ARM::VLD4LNqWB_register_Asm_32: {
9745 case ARM::VLD1LNdWB_fixed_Asm_8:
9746 case ARM::VLD1LNdWB_fixed_Asm_16:
9747 case ARM::VLD1LNdWB_fixed_Asm_32: {
9766 case ARM::VLD2LNdWB_fixed_Asm_8:
9767 case ARM::VLD2LNdWB_fixed_Asm_16:
9768 case ARM::VLD2LNdWB_fixed_Asm_32:
9769 case ARM::VLD2LNqWB_fixed_Asm_16:
9770 case ARM::VLD2LNqWB_fixed_Asm_32: {
9793 case ARM::VLD3LNdWB_fixed_Asm_8:
9794 case ARM::VLD3LNdWB_fixed_Asm_16:
9795 case ARM::VLD3LNdWB_fixed_Asm_32:
9796 case ARM::VLD3LNqWB_fixed_Asm_16:
9797 case ARM::VLD3LNqWB_fixed_Asm_32: {
9824 case ARM::VLD4LNdWB_fixed_Asm_8:
9825 case ARM::VLD4LNdWB_fixed_Asm_16:
9826 case ARM::VLD4LNdWB_fixed_Asm_32:
9827 case ARM::VLD4LNqWB_fixed_Asm_16:
9828 case ARM::VLD4LNqWB_fixed_Asm_32: {
9859 case ARM::VLD1LNdAsm_8:
9860 case ARM::VLD1LNdAsm_16:
9861 case ARM::VLD1LNdAsm_32: {
9878 case ARM::VLD2LNdAsm_8:
9879 case ARM::VLD2LNdAsm_16:
9880 case ARM::VLD2LNdAsm_32:
9881 case ARM::VLD2LNqAsm_16:
9882 case ARM::VLD2LNqAsm_32: {
9903 case ARM::VLD3LNdAsm_8:
9904 case ARM::VLD3LNdAsm_16:
9905 case ARM::VLD3LNdAsm_32:
9906 case ARM::VLD3LNqAsm_16:
9907 case ARM::VLD3LNqAsm_32: {
9932 case ARM::VLD4LNdAsm_8:
9933 case ARM::VLD4LNdAsm_16:
9934 case ARM::VLD4LNdAsm_32:
9935 case ARM::VLD4LNqAsm_16:
9936 case ARM::VLD4LNqAsm_32: {
9966 case ARM::VLD3DUPdAsm_8:
9967 case ARM::VLD3DUPdAsm_16:
9968 case ARM::VLD3DUPdAsm_32:
9969 case ARM::VLD3DUPqAsm_8:
9970 case ARM::VLD3DUPqAsm_16:
9971 case ARM::VLD3DUPqAsm_32: {
9988 case ARM::VLD3DUPdWB_fixed_Asm_8:
9989 case ARM::VLD3DUPdWB_fixed_Asm_16:
9990 case ARM::VLD3DUPdWB_fixed_Asm_32:
9991 case ARM::VLD3DUPqWB_fixed_Asm_8:
9992 case ARM::VLD3DUPqWB_fixed_Asm_16:
9993 case ARM::VLD3DUPqWB_fixed_Asm_32: {
10012 case ARM::VLD3DUPdWB_register_Asm_8:
10013 case ARM::VLD3DUPdWB_register_Asm_16:
10014 case ARM::VLD3DUPdWB_register_Asm_32:
10015 case ARM::VLD3DUPqWB_register_Asm_8:
10016 case ARM::VLD3DUPqWB_register_Asm_16:
10017 case ARM::VLD3DUPqWB_register_Asm_32: {
10037 case ARM::VLD3dAsm_8:
10038 case ARM::VLD3dAsm_16:
10039 case ARM::VLD3dAsm_32:
10040 case ARM::VLD3qAsm_8:
10041 case ARM::VLD3qAsm_16:
10042 case ARM::VLD3qAsm_32: {
10059 case ARM::VLD3dWB_fixed_Asm_8:
10060 case ARM::VLD3dWB_fixed_Asm_16:
10061 case ARM::VLD3dWB_fixed_Asm_32:
10062 case ARM::VLD3qWB_fixed_Asm_8:
10063 case ARM::VLD3qWB_fixed_Asm_16:
10064 case ARM::VLD3qWB_fixed_Asm_32: {
10083 case ARM::VLD3dWB_register_Asm_8:
10084 case ARM::VLD3dWB_register_Asm_16:
10085 case ARM::VLD3dWB_register_Asm_32:
10086 case ARM::VLD3qWB_register_Asm_8:
10087 case ARM::VLD3qWB_register_Asm_16:
10088 case ARM::VLD3qWB_register_Asm_32: {
10108 case ARM::VLD4DUPdAsm_8:
10109 case ARM::VLD4DUPdAsm_16:
10110 case ARM::VLD4DUPdAsm_32:
10111 case ARM::VLD4DUPqAsm_8:
10112 case ARM::VLD4DUPqAsm_16:
10113 case ARM::VLD4DUPqAsm_32: {
10132 case ARM::VLD4DUPdWB_fixed_Asm_8:
10133 case ARM::VLD4DUPdWB_fixed_Asm_16:
10134 case ARM::VLD4DUPdWB_fixed_Asm_32:
10135 case ARM::VLD4DUPqWB_fixed_Asm_8:
10136 case ARM::VLD4DUPqWB_fixed_Asm_16:
10137 case ARM::VLD4DUPqWB_fixed_Asm_32: {
10158 case ARM::VLD4DUPdWB_register_Asm_8:
10159 case ARM::VLD4DUPdWB_register_Asm_16:
10160 case ARM::VLD4DUPdWB_register_Asm_32:
10161 case ARM::VLD4DUPqWB_register_Asm_8:
10162 case ARM::VLD4DUPqWB_register_Asm_16:
10163 case ARM::VLD4DUPqWB_register_Asm_32: {
10185 case ARM::VLD4dAsm_8:
10186 case ARM::VLD4dAsm_16:
10187 case ARM::VLD4dAsm_32:
10188 case ARM::VLD4qAsm_8:
10189 case ARM::VLD4qAsm_16:
10190 case ARM::VLD4qAsm_32: {
10209 case ARM::VLD4dWB_fixed_Asm_8:
10210 case ARM::VLD4dWB_fixed_Asm_16:
10211 case ARM::VLD4dWB_fixed_Asm_32:
10212 case ARM::VLD4qWB_fixed_Asm_8:
10213 case ARM::VLD4qWB_fixed_Asm_16:
10214 case ARM::VLD4qWB_fixed_Asm_32: {
10235 case ARM::VLD4dWB_register_Asm_8:
10236 case ARM::VLD4dWB_register_Asm_16:
10237 case ARM::VLD4dWB_register_Asm_32:
10238 case ARM::VLD4qWB_register_Asm_8:
10239 case ARM::VLD4qWB_register_Asm_16:
10240 case ARM::VLD4qWB_register_Asm_32: {
10262 case ARM::VST3dAsm_8:
10263 case ARM::VST3dAsm_16:
10264 case ARM::VST3dAsm_32:
10265 case ARM::VST3qAsm_8:
10266 case ARM::VST3qAsm_16:
10267 case ARM::VST3qAsm_32: {
10284 case ARM::VST3dWB_fixed_Asm_8:
10285 case ARM::VST3dWB_fixed_Asm_16:
10286 case ARM::VST3dWB_fixed_Asm_32:
10287 case ARM::VST3qWB_fixed_Asm_8:
10288 case ARM::VST3qWB_fixed_Asm_16:
10289 case ARM::VST3qWB_fixed_Asm_32: {
10308 case ARM::VST3dWB_register_Asm_8:
10309 case ARM::VST3dWB_register_Asm_16:
10310 case ARM::VST3dWB_register_Asm_32:
10311 case ARM::VST3qWB_register_Asm_8:
10312 case ARM::VST3qWB_register_Asm_16:
10313 case ARM::VST3qWB_register_Asm_32: {
10333 case ARM::VST4dAsm_8:
10334 case ARM::VST4dAsm_16:
10335 case ARM::VST4dAsm_32:
10336 case ARM::VST4qAsm_8:
10337 case ARM::VST4qAsm_16:
10338 case ARM::VST4qAsm_32: {
10357 case ARM::VST4dWB_fixed_Asm_8:
10358 case ARM::VST4dWB_fixed_Asm_16:
10359 case ARM::VST4dWB_fixed_Asm_32:
10360 case ARM::VST4qWB_fixed_Asm_8:
10361 case ARM::VST4qWB_fixed_Asm_16:
10362 case ARM::VST4qWB_fixed_Asm_32: {
10383 case ARM::VST4dWB_register_Asm_8:
10384 case ARM::VST4dWB_register_Asm_16:
10385 case ARM::VST4dWB_register_Asm_32:
10386 case ARM::VST4qWB_register_Asm_8:
10387 case ARM::VST4qWB_register_Asm_16:
10388 case ARM::VST4qWB_register_Asm_32: {
10416 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
10417 !HasWideQualifier) {
10421 case ARM::t2LSLri: NewOpc = ARM::tLSLri;
break;
10422 case ARM::t2LSRri: NewOpc = ARM::tLSRri;
break;
10423 case ARM::t2ASRri: NewOpc = ARM::tASRri;
break;
10441 case ARM::t2MOVSsr: {
10445 bool isNarrow =
false;
10450 inITBlock() == (Inst.
getOpcode() == ARM::t2MOVsr) &&
10457 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr;
break;
10458 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr;
break;
10459 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr;
break;
10460 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr;
break;
10466 Inst.
getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : ARM::NoRegister));
10473 Inst.
getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : ARM::NoRegister));
10478 case ARM::t2MOVSsi: {
10482 bool isNarrow =
false;
10485 inITBlock() == (Inst.
getOpcode() == ARM::t2MOVsi) &&
10492 bool isMov =
false;
10503 newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10507 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri;
break;
10508 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri;
break;
10509 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri;
break;
10510 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow =
false;
break;
10511 case ARM_AM::rrx: isNarrow =
false; newOpc = ARM::t2RRX;
break;
10514 if (Amount == 32) Amount = 0;
10517 if (isNarrow && !isMov)
10519 Inst.
getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : ARM::NoRegister));
10521 if (newOpc != ARM::t2RRX && !isMov)
10527 Inst.
getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : ARM::NoRegister));
10571 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10580 if (Opc == ARM::MOVsi)
10601 case ARM::t2LDMIA_UPD: {
10617 case ARM::t2STMDB_UPD: {
10633 case ARM::LDMIA_UPD:
10636 if (
static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"pop" &&
10651 case ARM::STMDB_UPD:
10654 if (
static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"push" &&
10667 case ARM::t2ADDri12:
10668 case ARM::t2SUBri12:
10669 case ARM::t2ADDspImm12:
10670 case ARM::t2SUBspImm12: {
10674 if ((Token !=
"add" && Token !=
"sub") ||
10678 case ARM::t2ADDri12:
10681 case ARM::t2SUBri12:
10684 case ARM::t2ADDspImm12:
10687 case ARM::t2SUBspImm12:
10702 Operands.size() == MnemonicOpsEndInd + 3) {
10713 Operands.size() == MnemonicOpsEndInd + 3) {
10719 case ARM::t2SUBri: {
10724 if (HasWideQualifier)
10731 (inITBlock() ? ARM::NoRegister : ARM::CPSR))
10737 int i = (
Operands[MnemonicOpsEndInd + 1]->isImm())
10738 ? MnemonicOpsEndInd + 1
10739 : MnemonicOpsEndInd + 2;
10746 ARM::tADDi8 : ARM::tSUBi8);
10756 case ARM::t2ADDspImm:
10757 case ARM::t2SUBspImm: {
10762 if (V & 3 || V > ((1 << 7) - 1) << 2)
10775 case ARM::t2ADDrr: {
10837 case ARM::tLDMIA: {
10843 bool hasWritebackToken =
10844 (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
10846 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
10847 .getToken() ==
"!");
10848 bool listContainsBase;
10850 (!listContainsBase && !hasWritebackToken) ||
10851 (listContainsBase && hasWritebackToken)) {
10854 Inst.
setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10857 if (hasWritebackToken)
10864 case ARM::tSTMIA_UPD: {
10869 bool listContainsBase;
10879 bool listContainsBase;
10893 bool listContainsBase;
10910 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
10911 !HasWideQualifier) {
10932 !HasWideQualifier) {
10939 if (
Op == ARM::tMOVr) {
10957 !HasWideQualifier) {
10961 case ARM::t2SXTH: NewOpc = ARM::tSXTH;
break;
10962 case ARM::t2SXTB: NewOpc = ARM::tSXTB;
break;
10963 case ARM::t2UXTH: NewOpc = ARM::tUXTH;
break;
10964 case ARM::t2UXTB: NewOpc = ARM::tUXTB;
break;
11002 case ARM::ADDrsi: {
11008 case ARM::ANDrsi: newOpc = ARM::ANDrr;
break;
11009 case ARM::ORRrsi: newOpc = ARM::ORRrr;
break;
11010 case ARM::EORrsi: newOpc = ARM::EORrr;
break;
11011 case ARM::BICrsi: newOpc = ARM::BICrr;
break;
11012 case ARM::SUBrsi: newOpc = ARM::SUBrr;
break;
11013 case ARM::ADDrsi: newOpc = ARM::ADDrr;
break;
11036 assert(!inITBlock() &&
"nested IT blocks?!");
11052 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
11053 !HasWideQualifier) {
11057 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr;
break;
11058 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr;
break;
11059 case ARM::t2ASRrr: NewOpc = ARM::tASRrr;
break;
11060 case ARM::t2SBCrr: NewOpc = ARM::tSBC;
break;
11061 case ARM::t2RORrr: NewOpc = ARM::tROR;
break;
11062 case ARM::t2BICrr: NewOpc = ARM::tBIC;
break;
11089 (inITBlock() ? ARM::NoRegister : ARM::CPSR) &&
11090 !HasWideQualifier) {
11094 case ARM::t2ADCrr: NewOpc = ARM::tADC;
break;
11095 case ARM::t2ANDrr: NewOpc = ARM::tAND;
break;
11096 case ARM::t2EORrr: NewOpc = ARM::tEOR;
break;
11097 case ARM::t2ORRrr: NewOpc = ARM::tORR;
break;
11116 case ARM::MVE_VPST:
11117 case ARM::MVE_VPTv16i8:
11118 case ARM::MVE_VPTv8i16:
11119 case ARM::MVE_VPTv4i32:
11120 case ARM::MVE_VPTv16u8:
11121 case ARM::MVE_VPTv8u16:
11122 case ARM::MVE_VPTv4u32:
11123 case ARM::MVE_VPTv16s8:
11124 case ARM::MVE_VPTv8s16:
11125 case ARM::MVE_VPTv4s32:
11126 case ARM::MVE_VPTv4f32:
11127 case ARM::MVE_VPTv8f16:
11128 case ARM::MVE_VPTv16i8r:
11129 case ARM::MVE_VPTv8i16r:
11130 case ARM::MVE_VPTv4i32r:
11131 case ARM::MVE_VPTv16u8r:
11132 case ARM::MVE_VPTv8u16r:
11133 case ARM::MVE_VPTv4u32r:
11134 case ARM::MVE_VPTv16s8r:
11135 case ARM::MVE_VPTv8s16r:
11136 case ARM::MVE_VPTv4s32r:
11137 case ARM::MVE_VPTv4f32r:
11138 case ARM::MVE_VPTv8f16r: {
11139 assert(!inVPTBlock() &&
"Nested VPT blocks are not allowed");
11141 VPTState.Mask = MO.
getImm();
11142 VPTState.CurPosition = 0;
11150ARMAsmParser::checkEarlyTargetMatchPredicate(
MCInst &Inst,
11158 static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"nop" &&
11159 ((
isThumb() && !isThumbOne()) || hasV6MOps())) {
11160 return Match_MnemonicFail;
11165 return Match_Success;
11169unsigned ARMAsmParser::checkTargetMatchPredicate(
MCInst &Inst) {
11176 "optionally flag setting instruction missing optional def operand");
11178 "operand count mismatch!");
11179 bool IsCPSR =
false;
11181 for (
unsigned OpNo = 0; OpNo < MCID.
NumOperands; ++OpNo) {
11182 if (MCID.
operands()[OpNo].isOptionalDef() &&
11189 if (isThumbOne() && !IsCPSR)
11190 return Match_RequiresFlagSetting;
11193 if (isThumbTwo() && !IsCPSR && !inITBlock())
11194 return Match_RequiresITBlock;
11195 if (isThumbTwo() && IsCPSR && inITBlock())
11196 return Match_RequiresNotITBlock;
11198 if (Opc == ARM::tLSLri && Inst.
getOperand(3).
getImm() == 0 && inITBlock())
11199 return Match_RequiresNotITBlock;
11200 }
else if (isThumbOne()) {
11203 if (Opc == ARM::tADDhirr && !hasV6MOps() &&
11206 return Match_RequiresThumb2;
11208 else if (Opc == ARM::tMOVr && !hasV6Ops() &&
11211 return Match_RequiresV6;
11217 if (Opc == ARM::t2MOVr && !hasV8Ops())
11222 return Match_RequiresV8;
11227 return Match_RequiresV8;
11233 case ARM::VMRS_FPCXTS:
11234 case ARM::VMRS_FPCXTNS:
11235 case ARM::VMSR_FPCXTS:
11236 case ARM::VMSR_FPCXTNS:
11237 case ARM::VMRS_FPSCR_NZCVQC:
11238 case ARM::VMSR_FPSCR_NZCVQC:
11240 case ARM::VMRS_VPR:
11242 case ARM::VMSR_VPR:
11248 return Match_InvalidOperand;
11254 return Match_RequiresV8;
11262 return Match_InvalidTiedOperand;
11269 if (MCID.
operands()[
I].RegClass == ARM::rGPRRegClassID) {
11285 if ((Reg == ARM::SP) && !hasV8Ops())
11286 return Match_RequiresV8;
11287 else if (Reg == ARM::PC)
11288 return Match_InvalidOperand;
11291 return Match_Success;
11304bool ARMAsmParser::isITBlockTerminator(
MCInst &Inst)
const {
11323 bool MatchingInlineAsm,
11324 bool &EmitInITBlock,
11327 if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11328 return MatchInstructionImpl(
Operands, Inst, &NearMisses, MatchingInlineAsm);
11332 if (inImplicitITBlock()) {
11333 extendImplicitITBlock(ITState.Cond);
11334 if (MatchInstructionImpl(
Operands, Inst,
nullptr, MatchingInlineAsm) ==
11344 if (InstCond == ITCond) {
11345 EmitInITBlock =
true;
11346 return Match_Success;
11348 invertCurrentITCondition();
11349 EmitInITBlock =
true;
11350 return Match_Success;
11354 rewindImplicitITPosition();
11358 flushPendingInstructions(Out);
11359 unsigned PlainMatchResult =
11360 MatchInstructionImpl(
Operands, Inst, &NearMisses, MatchingInlineAsm);
11361 if (PlainMatchResult == Match_Success) {
11370 EmitInITBlock =
false;
11371 return Match_Success;
11374 EmitInITBlock =
false;
11375 return Match_Success;
11378 EmitInITBlock =
false;
11379 return Match_Success;
11386 startImplicitITBlock();
11387 if (MatchInstructionImpl(
Operands, Inst,
nullptr, MatchingInlineAsm) ==
11394 EmitInITBlock =
true;
11395 return Match_Success;
11398 discardImplicitITBlock();
11402 EmitInITBlock =
false;
11403 return PlainMatchResult;
11407 unsigned VariantID = 0);
11410bool ARMAsmParser::matchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
11413 bool MatchingInlineAsm) {
11415 unsigned MatchResult;
11416 bool PendConditionalInstruction =
false;
11419 MatchResult = MatchInstruction(
Operands, Inst, NearMisses, MatchingInlineAsm,
11420 PendConditionalInstruction, Out);
11425 switch (MatchResult) {
11426 case Match_Success:
11433 if (validateInstruction(Inst,
Operands, MnemonicOpsEndInd)) {
11436 forwardITPosition();
11437 forwardVPTPosition();
11446 while (processInstruction(Inst,
Operands, MnemonicOpsEndInd, Out))
11455 forwardITPosition();
11456 forwardVPTPosition();
11464 if (PendConditionalInstruction) {
11465 PendingConditionalInsts.
push_back(Inst);
11466 if (isITBlockFull() || isITBlockTerminator(Inst))
11467 flushPendingInstructions(Out);
11472 case Match_NearMisses:
11473 ReportNearMisses(NearMisses, IDLoc,
Operands);
11475 case Match_MnemonicFail: {
11476 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11478 ((ARMOperand &)*
Operands[0]).getToken(), FBS);
11479 return Error(IDLoc,
"invalid instruction" + Suggestion,
11480 ((ARMOperand &)*
Operands[0]).getLocRange());
11488bool ARMAsmParser::ParseDirective(
AsmToken DirectiveID) {
11494 if (IDVal ==
".word")
11495 parseLiteralValues(4, DirectiveID.
getLoc());
11496 else if (IDVal ==
".short" || IDVal ==
".hword")
11497 parseLiteralValues(2, DirectiveID.
getLoc());
11498 else if (IDVal ==
".thumb")
11499 parseDirectiveThumb(DirectiveID.
getLoc());
11500 else if (IDVal ==
".arm")
11501 parseDirectiveARM(DirectiveID.
getLoc());
11502 else if (IDVal ==
".thumb_func")
11503 parseDirectiveThumbFunc(DirectiveID.
getLoc());
11504 else if (IDVal ==
".code")
11505 parseDirectiveCode(DirectiveID.
getLoc());
11506 else if (IDVal ==
".syntax")
11507 parseDirectiveSyntax(DirectiveID.
getLoc());
11508 else if (IDVal ==
".unreq")
11509 parseDirectiveUnreq(DirectiveID.
getLoc());
11510 else if (IDVal ==
".fnend")
11511 parseDirectiveFnEnd(DirectiveID.
getLoc());
11512 else if (IDVal ==
".cantunwind")
11513 parseDirectiveCantUnwind(DirectiveID.
getLoc());
11514 else if (IDVal ==
".personality")
11515 parseDirectivePersonality(DirectiveID.
getLoc());
11516 else if (IDVal ==
".handlerdata")
11517 parseDirectiveHandlerData(DirectiveID.
getLoc());
11518 else if (IDVal ==
".setfp")
11519 parseDirectiveSetFP(DirectiveID.
getLoc());
11520 else if (IDVal ==
".pad")
11521 parseDirectivePad(DirectiveID.
getLoc());
11522 else if (IDVal ==
".save")
11523 parseDirectiveRegSave(DirectiveID.
getLoc(),
false);
11524 else if (IDVal ==
".vsave")
11525 parseDirectiveRegSave(DirectiveID.
getLoc(),
true);
11526 else if (IDVal ==
".ltorg" || IDVal ==
".pool")
11527 parseDirectiveLtorg(DirectiveID.
getLoc());
11528 else if (IDVal ==
".even")
11529 parseDirectiveEven(DirectiveID.
getLoc());
11530 else if (IDVal ==
".personalityindex")
11531 parseDirectivePersonalityIndex(DirectiveID.
getLoc());
11532 else if (IDVal ==
".unwind_raw")
11533 parseDirectiveUnwindRaw(DirectiveID.
getLoc());
11534 else if (IDVal ==
".movsp")
11535 parseDirectiveMovSP(DirectiveID.
getLoc());
11536 else if (IDVal ==
".arch_extension")
11537 parseDirectiveArchExtension(DirectiveID.
getLoc());
11538 else if (IDVal ==
".align")
11539 return parseDirectiveAlign(DirectiveID.
getLoc());
11540 else if (IDVal ==
".thumb_set")
11541 parseDirectiveThumbSet(DirectiveID.
getLoc());
11542 else if (IDVal ==
".inst")
11543 parseDirectiveInst(DirectiveID.
getLoc());
11544 else if (IDVal ==
".inst.n")
11545 parseDirectiveInst(DirectiveID.
getLoc(),
'n');
11546 else if (IDVal ==
".inst.w")
11547 parseDirectiveInst(DirectiveID.
getLoc(),
'w');
11548 else if (!IsMachO && !IsCOFF) {
11549 if (IDVal ==
".arch")
11550 parseDirectiveArch(DirectiveID.
getLoc());
11551 else if (IDVal ==
".cpu")
11552 parseDirectiveCPU(DirectiveID.
getLoc());
11553 else if (IDVal ==
".eabi_attribute")
11554 parseDirectiveEabiAttr(DirectiveID.
getLoc());
11555 else if (IDVal ==
".fpu")
11556 parseDirectiveFPU(DirectiveID.
getLoc());
11557 else if (IDVal ==
".fnstart")
11558 parseDirectiveFnStart(DirectiveID.
getLoc());
11559 else if (IDVal ==
".object_arch")
11560 parseDirectiveObjectArch(DirectiveID.
getLoc());
11561 else if (IDVal ==
".tlsdescseq")
11562 parseDirectiveTLSDescSeq(DirectiveID.
getLoc());
11565 }
else if (IsCOFF) {
11566 if (IDVal ==
".seh_stackalloc")
11567 parseDirectiveSEHAllocStack(DirectiveID.
getLoc(),
false);
11568 else if (IDVal ==
".seh_stackalloc_w")
11569 parseDirectiveSEHAllocStack(DirectiveID.
getLoc(),
true);
11570 else if (IDVal ==
".seh_save_regs")
11571 parseDirectiveSEHSaveRegs(DirectiveID.
getLoc(),
false);
11572 else if (IDVal ==
".seh_save_regs_w")
11573 parseDirectiveSEHSaveRegs(DirectiveID.
getLoc(),
true);
11574 else if (IDVal ==
".seh_save_sp")
11575 parseDirectiveSEHSaveSP(DirectiveID.
getLoc());
11576 else if (IDVal ==
".seh_save_fregs")
11577 parseDirectiveSEHSaveFRegs(DirectiveID.
getLoc());
11578 else if (IDVal ==
".seh_save_lr")
11579 parseDirectiveSEHSaveLR(DirectiveID.
getLoc());
11580 else if (IDVal ==
".seh_endprologue")
11581 parseDirectiveSEHPrologEnd(DirectiveID.
getLoc(),
false);
11582 else if (IDVal ==
".seh_endprologue_fragment")
11583 parseDirectiveSEHPrologEnd(DirectiveID.
getLoc(),
true);
11584 else if (IDVal ==
".seh_nop")
11585 parseDirectiveSEHNop(DirectiveID.
getLoc(),
false);
11586 else if (IDVal ==
".seh_nop_w")
11587 parseDirectiveSEHNop(DirectiveID.
getLoc(),
true);
11588 else if (IDVal ==
".seh_startepilogue")
11589 parseDirectiveSEHEpilogStart(DirectiveID.
getLoc(),
false);
11590 else if (IDVal ==
".seh_startepilogue_cond")
11591 parseDirectiveSEHEpilogStart(DirectiveID.
getLoc(),
true);
11592 else if (IDVal ==
".seh_endepilogue")
11593 parseDirectiveSEHEpilogEnd(DirectiveID.
getLoc());
11594 else if (IDVal ==
".seh_custom")
11595 parseDirectiveSEHCustom(DirectiveID.
getLoc());
11607bool ARMAsmParser::parseLiteralValues(
unsigned Size,
SMLoc L) {
11608 auto parseOne = [&]() ->
bool {
11610 if (getParser().parseExpression(
Value))
11612 getParser().getStreamer().emitValue(
Value,
Size, L);
11615 return (parseMany(parseOne));
11620bool ARMAsmParser::parseDirectiveThumb(
SMLoc L) {
11621 if (parseEOL() || check(!hasThumb(), L,
"target does not support Thumb mode"))
11627 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11628 getParser().getStreamer().emitCodeAlignment(
Align(2), &getSTI(), 0);
11634bool ARMAsmParser::parseDirectiveARM(
SMLoc L) {
11635 if (parseEOL() || check(!hasARM(), L,
"target does not support ARM mode"))
11640 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
11641 getParser().getStreamer().emitCodeAlignment(
Align(4), &getSTI(), 0);
11676void ARMAsmParser::doBeforeLabelEmit(
MCSymbol *Symbol,
SMLoc IDLoc) {
11679 flushPendingInstructions(getStreamer());
11682void ARMAsmParser::onLabelParsed(
MCSymbol *Symbol) {
11683 if (NextSymbolIsThumb) {
11684 getParser().getStreamer().emitThumbFunc(Symbol);
11685 NextSymbolIsThumb =
false;
11691bool ARMAsmParser::parseDirectiveThumbFunc(
SMLoc L) {
11693 const auto Format = getContext().getObjectFileType();
11702 MCSymbol *
Func = getParser().getContext().getOrCreateSymbol(
11704 getParser().getStreamer().emitThumbFunc(Func);
11719 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11721 NextSymbolIsThumb =
true;
11727bool ARMAsmParser::parseDirectiveSyntax(
SMLoc L) {
11731 Error(L,
"unexpected token in .syntax directive");
11737 if (check(Mode ==
"divided" || Mode ==
"DIVIDED", L,
11738 "'.syntax divided' arm assembly not supported") ||
11739 check(Mode !=
"unified" && Mode !=
"UNIFIED", L,
11740 "unrecognized syntax mode in .syntax directive") ||
11751bool ARMAsmParser::parseDirectiveCode(
SMLoc L) {
11755 return Error(L,
"unexpected token in .code directive");
11757 if (Val != 16 && Val != 32) {
11758 Error(L,
"invalid operand to .code directive");
11768 return Error(L,
"target does not support Thumb mode");
11772 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11775 return Error(L,
"target does not support ARM mode");
11779 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
11791 SMLoc SRegLoc, ERegLoc;
11792 if (check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11793 "register name expected") ||
11797 if (RegisterReqs.
insert(std::make_pair(
Name, Reg)).first->second != Reg)
11798 return Error(SRegLoc,
11799 "redefinition of '" +
Name +
"' does not match original.");
11806bool ARMAsmParser::parseDirectiveUnreq(
SMLoc L) {
11809 return Error(L,
"unexpected input in .unreq directive.");
11818void ARMAsmParser::FixModeAfterArchChange(
bool WasThumb,
SMLoc Loc) {
11820 if (WasThumb && hasThumb()) {
11823 }
else if (!WasThumb && hasARM()) {
11834 (WasThumb ?
"thumb" :
"arm") +
" mode, switching to " +
11835 (!WasThumb ?
"thumb" :
"arm") +
" mode");
11842bool ARMAsmParser::parseDirectiveArch(
SMLoc L) {
11843 StringRef Arch = getParser().parseStringToEndOfStatement().
trim();
11846 if (
ID == ARM::ArchKind::INVALID)
11847 return Error(L,
"Unknown arch name");
11854 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11855 FixModeAfterArchChange(WasThumb, L);
11857 getTargetStreamer().emitArch(
ID);
11864bool ARMAsmParser::parseDirectiveEabiAttr(
SMLoc L) {
11874 Error(TagLoc,
"attribute name not recognised: " +
Name);
11887 if (check(!CE, TagLoc,
"expected numeric constant"))
11890 Tag =
CE->getValue();
11897 bool IsStringValue =
false;
11899 int64_t IntegerValue = 0;
11900 bool IsIntegerValue =
false;
11903 IsStringValue =
true;
11905 IsStringValue =
true;
11906 IsIntegerValue =
true;
11907 }
else if (Tag < 32 || Tag % 2 == 0)
11908 IsIntegerValue =
true;
11909 else if (Tag % 2 == 1)
11910 IsStringValue =
true;
11914 if (IsIntegerValue) {
11915 const MCExpr *ValueExpr;
11922 return Error(ValueExprLoc,
"expected numeric constant");
11923 IntegerValue =
CE->getValue();
11931 std::string EscapedValue;
11932 if (IsStringValue) {
11940 StringValue = EscapedValue;
11950 if (IsIntegerValue && IsStringValue) {
11952 getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
11953 }
else if (IsIntegerValue)
11954 getTargetStreamer().emitAttribute(Tag, IntegerValue);
11955 else if (IsStringValue)
11956 getTargetStreamer().emitTextAttribute(Tag, StringValue);
11962bool ARMAsmParser::parseDirectiveCPU(
SMLoc L) {
11963 StringRef CPU = getParser().parseStringToEndOfStatement().
trim();
11968 if (!getSTI().isCPUStringValid(CPU))
11969 return Error(L,
"Unknown CPU name");
11974 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11975 FixModeAfterArchChange(WasThumb, L);
11982bool ARMAsmParser::parseDirectiveFPU(
SMLoc L) {
11983 SMLoc FPUNameLoc = getTok().getLoc();
11984 StringRef FPU = getParser().parseStringToEndOfStatement().
trim();
11987 std::vector<StringRef> Features;
11989 return Error(FPUNameLoc,
"Unknown FPU name");
11992 for (
auto Feature : Features)
11994 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11996 getTargetStreamer().emitFPU(
ID);
12002bool ARMAsmParser::parseDirectiveFnStart(
SMLoc L) {
12006 if (UC.hasFnStart()) {
12007 Error(L,
".fnstart starts before the end of previous one");
12008 UC.emitFnStartLocNotes();
12015 getTargetStreamer().emitFnStart();
12017 UC.recordFnStart(L);
12023bool ARMAsmParser::parseDirectiveFnEnd(
SMLoc L) {
12027 if (!UC.hasFnStart())
12028 return Error(L,
".fnstart must precede .fnend directive");
12031 getTargetStreamer().emitFnEnd();
12039bool ARMAsmParser::parseDirectiveCantUnwind(
SMLoc L) {
12043 UC.recordCantUnwind(L);
12045 if (check(!UC.hasFnStart(), L,
".fnstart must precede .cantunwind directive"))
12048 if (UC.hasHandlerData()) {
12049 Error(L,
".cantunwind can't be used with .handlerdata directive");
12050 UC.emitHandlerDataLocNotes();
12053 if (UC.hasPersonality()) {
12054 Error(L,
".cantunwind can't be used with .personality directive");
12055 UC.emitPersonalityLocNotes();
12059 getTargetStreamer().emitCantUnwind();
12065bool ARMAsmParser::parseDirectivePersonality(
SMLoc L) {
12067 bool HasExistingPersonality = UC.hasPersonality();
12071 return Error(L,
"unexpected input in .personality directive.");
12078 UC.recordPersonality(L);
12081 if (!UC.hasFnStart())
12082 return Error(L,
".fnstart must precede .personality directive");
12083 if (UC.cantUnwind()) {
12084 Error(L,
".personality can't be used with .cantunwind directive");
12085 UC.emitCantUnwindLocNotes();
12088 if (UC.hasHandlerData()) {
12089 Error(L,
".personality must precede .handlerdata directive");
12090 UC.emitHandlerDataLocNotes();
12093 if (HasExistingPersonality) {
12094 Error(L,
"multiple personality directives");
12095 UC.emitPersonalityLocNotes();
12099 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(
Name);
12100 getTargetStreamer().emitPersonality(PR);
12106bool ARMAsmParser::parseDirectiveHandlerData(
SMLoc L) {
12110 UC.recordHandlerData(L);
12112 if (!UC.hasFnStart())
12113 return Error(L,
".fnstart must precede .personality directive");
12114 if (UC.cantUnwind()) {
12115 Error(L,
".handlerdata can't be used with .cantunwind directive");
12116 UC.emitCantUnwindLocNotes();
12120 getTargetStreamer().emitHandlerData();
12126bool ARMAsmParser::parseDirectiveSetFP(
SMLoc L) {
12129 if (check(!UC.hasFnStart(), L,
".fnstart must precede .setfp directive") ||
12130 check(UC.hasHandlerData(), L,
12131 ".setfp must precede .handlerdata directive"))
12138 if (check(!
FPReg, FPRegLoc,
"frame pointer register expected") ||
12145 if (check(!
SPReg, SPRegLoc,
"stack pointer register expected") ||
12146 check(
SPReg != ARM::SP &&
SPReg != UC.getFPReg(), SPRegLoc,
12147 "register should be either $sp or the latest fp register"))
12151 UC.saveFPReg(
FPReg);
12161 const MCExpr *OffsetExpr;
12164 if (getParser().parseExpression(OffsetExpr, EndLoc))
12165 return Error(ExLoc,
"malformed setfp offset");
12167 if (check(!CE, ExLoc,
"setfp offset must be an immediate"))
12181bool ARMAsmParser::parseDirectivePad(
SMLoc L) {
12184 if (!UC.hasFnStart())
12185 return Error(L,
".fnstart must precede .pad directive");
12186 if (UC.hasHandlerData())
12187 return Error(L,
".pad must precede .handlerdata directive");
12195 const MCExpr *OffsetExpr;
12198 if (getParser().parseExpression(OffsetExpr, EndLoc))
12199 return Error(ExLoc,
"malformed pad offset");
12202 return Error(ExLoc,
"pad offset must be an immediate");
12207 getTargetStreamer().emitPad(
CE->getValue());
12214bool ARMAsmParser::parseDirectiveRegSave(
SMLoc L,
bool IsVector) {
12216 if (!UC.hasFnStart())
12217 return Error(L,
".fnstart must precede .save or .vsave directives");
12218 if (UC.hasHandlerData())
12219 return Error(L,
".save or .vsave must precede .handlerdata directive");
12225 if (parseRegisterList(
Operands,
true,
true) || parseEOL())
12227 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12228 if (!IsVector && !
Op.isRegList())
12229 return Error(L,
".save expects GPR registers");
12230 if (IsVector && !
Op.isDPRRegList())
12231 return Error(L,
".vsave expects DPR registers");
12233 getTargetStreamer().emitRegSave(
Op.getRegList(), IsVector);
12241bool ARMAsmParser::parseDirectiveInst(
SMLoc Loc,
char Suffix) {
12257 return Error(Loc,
"width suffixes are invalid in ARM mode");
12260 auto parseOne = [&]() ->
bool {
12262 if (getParser().parseExpression(Expr))
12266 return Error(Loc,
"expected constant expression");
12269 char CurSuffix = Suffix;
12272 if (
Value->getValue() > 0xffff)
12273 return Error(Loc,
"inst.n operand is too big, use inst.w instead");
12276 if (
Value->getValue() > 0xffffffff)
12278 " operand is too big");
12282 if (
Value->getValue() < 0xe800)
12284 else if (
Value->getValue() >= 0xe8000000)
12287 return Error(Loc,
"cannot determine Thumb instruction size, "
12288 "use inst.n/inst.w instead");
12294 getTargetStreamer().emitInst(
Value->getValue(), CurSuffix);
12295 forwardITPosition();
12296 forwardVPTPosition();
12301 return Error(Loc,
"expected expression following directive");
12302 if (parseMany(parseOne))
12309bool ARMAsmParser::parseDirectiveLtorg(
SMLoc L) {
12312 getTargetStreamer().emitCurrentConstantPool();
12316bool ARMAsmParser::parseDirectiveEven(
SMLoc L) {
12323 getStreamer().initSections(
false, getSTI());
12324 Section = getStreamer().getCurrentSectionOnly();
12327 assert(Section &&
"must have section to emit alignment");
12329 getStreamer().emitCodeAlignment(
Align(2), &getSTI());
12331 getStreamer().emitValueToAlignment(
Align(2));
12338bool ARMAsmParser::parseDirectivePersonalityIndex(
SMLoc L) {
12340 bool HasExistingPersonality = UC.hasPersonality();
12342 const MCExpr *IndexExpression;
12348 UC.recordPersonalityIndex(L);
12350 if (!UC.hasFnStart()) {
12351 return Error(L,
".fnstart must precede .personalityindex directive");
12353 if (UC.cantUnwind()) {
12354 Error(L,
".personalityindex cannot be used with .cantunwind");
12355 UC.emitCantUnwindLocNotes();
12358 if (UC.hasHandlerData()) {
12359 Error(L,
".personalityindex must precede .handlerdata directive");
12360 UC.emitHandlerDataLocNotes();
12363 if (HasExistingPersonality) {
12364 Error(L,
"multiple personality directives");
12365 UC.emitPersonalityLocNotes();
12371 return Error(IndexLoc,
"index must be a constant number");
12373 return Error(IndexLoc,
12374 "personality routine index should be in range [0-3]");
12376 getTargetStreamer().emitPersonalityIndex(
CE->getValue());
12382bool ARMAsmParser::parseDirectiveUnwindRaw(
SMLoc L) {
12385 const MCExpr *OffsetExpr;
12386 SMLoc OffsetLoc = getLexer().getLoc();
12388 if (!UC.hasFnStart())
12389 return Error(L,
".fnstart must precede .unwind_raw directives");
12390 if (getParser().parseExpression(OffsetExpr))
12391 return Error(OffsetLoc,
"expected expression");
12395 return Error(OffsetLoc,
"offset must be a constant");
12404 auto parseOne = [&]() ->
bool {
12405 const MCExpr *OE =
nullptr;
12406 SMLoc OpcodeLoc = getLexer().getLoc();
12409 OpcodeLoc,
"expected opcode expression"))
12413 return Error(OpcodeLoc,
"opcode value must be a constant");
12414 const int64_t Opcode =
OC->getValue();
12415 if (Opcode & ~0xff)
12416 return Error(OpcodeLoc,
"invalid opcode");
12422 SMLoc OpcodeLoc = getLexer().getLoc();
12424 return Error(OpcodeLoc,
"expected opcode expression");
12425 if (parseMany(parseOne))
12428 getTargetStreamer().emitUnwindRaw(
StackOffset, Opcodes);
12434bool ARMAsmParser::parseDirectiveTLSDescSeq(
SMLoc L) {
12438 return TokError(
"expected variable after '.tlsdescseq' directive");
12448 getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12454bool ARMAsmParser::parseDirectiveMovSP(
SMLoc L) {
12456 if (!UC.hasFnStart())
12457 return Error(L,
".fnstart must precede .movsp directives");
12458 if (UC.getFPReg() != ARM::SP)
12459 return Error(L,
"unexpected .movsp directive");
12464 return Error(SPRegLoc,
"register expected");
12466 return Error(SPRegLoc,
"sp and pc are not permitted in .movsp directive");
12473 const MCExpr *OffsetExpr;
12477 return Error(OffsetLoc,
"malformed offset expression");
12481 return Error(OffsetLoc,
"offset must be an immediate constant");
12490 UC.saveFPReg(
SPReg);
12497bool ARMAsmParser::parseDirectiveObjectArch(
SMLoc L) {
12500 return Error(getLexer().getLoc(),
"unexpected token");
12508 if (
ID == ARM::ArchKind::INVALID)
12509 return Error(ArchLoc,
"unknown architecture '" + Arch +
"'");
12513 getTargetStreamer().emitObjectArch(
ID);
12519bool ARMAsmParser::parseDirectiveAlign(
SMLoc L) {
12525 assert(Section &&
"must have section to emit alignment");
12527 getStreamer().emitCodeAlignment(
Align(4), &getSTI(), 0);
12529 getStreamer().emitValueToAlignment(
Align(4), 0, 1, 0);
12537bool ARMAsmParser::parseDirectiveThumbSet(
SMLoc L) {
12542 "expected identifier after '.thumb_set'") ||
12552 getTargetStreamer().emitThumbSet(
Sym,
Value);
12559bool ARMAsmParser::parseDirectiveSEHAllocStack(
SMLoc L,
bool Wide) {
12561 if (parseImmExpr(
Size))
12563 getTargetStreamer().emitARMWinCFIAllocStack(
Size, Wide);
12570bool ARMAsmParser::parseDirectiveSEHSaveRegs(
SMLoc L,
bool Wide) {
12573 if (parseRegisterList(
Operands) || parseEOL())
12575 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12576 if (!
Op.isRegList())
12577 return Error(L,
".seh_save_regs{_w} expects GPR registers");
12580 for (
size_t i = 0; i < RegList.
size(); ++i) {
12581 unsigned Reg =
MRI->getEncodingValue(RegList[i]);
12585 return Error(L,
".seh_save_regs{_w} can't include SP");
12586 assert(Reg < 16U &&
"Register out of range");
12587 unsigned Bit = (1u <<
Reg);
12590 if (!Wide && (Mask & 0x1f00) != 0)
12592 ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12593 getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12599bool ARMAsmParser::parseDirectiveSEHSaveSP(
SMLoc L) {
12601 if (!Reg || !
MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12602 return Error(L,
"expected GPR");
12603 unsigned Index =
MRI->getEncodingValue(Reg);
12604 if (Index > 14 || Index == 13)
12605 return Error(L,
"invalid register for .seh_save_sp");
12606 getTargetStreamer().emitARMWinCFISaveSP(Index);
12612bool ARMAsmParser::parseDirectiveSEHSaveFRegs(
SMLoc L) {
12615 if (parseRegisterList(
Operands) || parseEOL())
12617 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12618 if (!
Op.isDPRRegList())
12619 return Error(L,
".seh_save_fregs expects DPR registers");
12622 for (
size_t i = 0; i < RegList.
size(); ++i) {
12623 unsigned Reg =
MRI->getEncodingValue(RegList[i]);
12624 assert(Reg < 32U &&
"Register out of range");
12625 unsigned Bit = (1u <<
Reg);
12630 return Error(L,
".seh_save_fregs missing registers");
12632 unsigned First = 0;
12633 while ((Mask & 1) == 0) {
12637 if (((Mask + 1) & Mask) != 0)
12639 ".seh_save_fregs must take a contiguous range of registers");
12641 while ((Mask & 2) != 0) {
12645 if (First < 16 && Last >= 16)
12646 return Error(L,
".seh_save_fregs must be all d0-d15 or d16-d31");
12647 getTargetStreamer().emitARMWinCFISaveFRegs(
First,
Last);
12653bool ARMAsmParser::parseDirectiveSEHSaveLR(
SMLoc L) {
12655 if (parseImmExpr(
Offset))
12657 getTargetStreamer().emitARMWinCFISaveLR(
Offset);
12664bool ARMAsmParser::parseDirectiveSEHPrologEnd(
SMLoc L,
bool Fragment) {
12665 getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12672bool ARMAsmParser::parseDirectiveSEHNop(
SMLoc L,
bool Wide) {
12673 getTargetStreamer().emitARMWinCFINop(Wide);
12680bool ARMAsmParser::parseDirectiveSEHEpilogStart(
SMLoc L,
bool Condition) {
12687 return Error(S,
".seh_startepilogue_cond missing condition");
12690 return Error(S,
"invalid condition");
12694 getTargetStreamer().emitARMWinCFIEpilogStart(
CC);
12700bool ARMAsmParser::parseDirectiveSEHEpilogEnd(
SMLoc L) {
12701 getTargetStreamer().emitARMWinCFIEpilogEnd();
12707bool ARMAsmParser::parseDirectiveSEHCustom(
SMLoc L) {
12708 unsigned Opcode = 0;
12711 if (parseImmExpr(Byte))
12713 if (Byte > 0xff || Byte < 0)
12714 return Error(L,
"Invalid byte value in .seh_custom");
12715 if (Opcode > 0x00ffffff)
12716 return Error(L,
"Too many bytes in .seh_custom");
12719 Opcode = (Opcode << 8) | Byte;
12721 getTargetStreamer().emitARMWinCFICustom(Opcode);
12733#define GET_REGISTER_MATCHER
12734#define GET_SUBTARGET_FEATURE_NAME
12735#define GET_MATCHER_IMPLEMENTATION
12736#define GET_MNEMONIC_SPELL_CHECKER
12737#include "ARMGenAsmMatcher.inc"
12743ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12744 switch (MatchError) {
12747 return hasV8Ops() ?
"operand must be a register in range [r0, r14]"
12748 :
"operand must be a register in range [r0, r12] or r14";
12751 return hasD32() ?
"operand must be a register in range [d0, d31]"
12752 :
"operand must be a register in range [d0, d15]";
12753 case Match_DPR_RegList:
12754 return hasD32() ?
"operand must be a list of registers in range [d0, d31]"
12755 :
"operand must be a list of registers in range [d0, d15]";
12759 return getMatchKindDiag(MatchError);
12782 std::multimap<unsigned, unsigned> OperandMissesSeen;
12784 bool ReportedTooFewOperands =
false;
12791 switch (
I.getKind()) {
12794 ((ARMOperand &)*
Operands[
I.getOperandIndex()]).getStartLoc();
12795 const char *OperandDiag =
12796 getCustomOperandDiag((ARMMatchResultTy)
I.getOperandError());
12803 unsigned DupCheckMatchClass = OperandDiag ?
I.getOperandClass() : ~0
U;
12804 auto PrevReports = OperandMissesSeen.equal_range(
I.getOperandIndex());
12805 if (std::any_of(PrevReports.first, PrevReports.second,
12806 [DupCheckMatchClass](
12807 const std::pair<unsigned, unsigned> Pair) {
12808 if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12809 return Pair.second == DupCheckMatchClass;
12811 return isSubclass((MatchClassKind)DupCheckMatchClass,
12812 (MatchClassKind)Pair.second);
12815 OperandMissesSeen.insert(
12816 std::make_pair(
I.getOperandIndex(), DupCheckMatchClass));
12818 NearMissMessage Message;
12819 Message.Loc = OperandLoc;
12821 Message.Message = OperandDiag;
12822 }
else if (
I.getOperandClass() == InvalidMatchClass) {
12823 Message.Message =
"too many operands for instruction";
12825 Message.Message =
"invalid operand for instruction";
12827 dbgs() <<
"Missing diagnostic string for operand class "
12828 << getMatchClassName((MatchClassKind)
I.getOperandClass())
12829 <<
I.getOperandClass() <<
", error " <<
I.getOperandError()
12830 <<
", opcode " << MII.getName(
I.getOpcode()) <<
"\n");
12838 if (FeatureMissesSeen.
count(MissingFeatures))
12840 FeatureMissesSeen.
insert(MissingFeatures);
12844 if (MissingFeatures.
test(Feature_IsARMBit) && !hasARM())
12848 if (
isThumb() && MissingFeatures.
test(Feature_IsARMBit) &&
12849 MissingFeatures.
count() > 1)
12851 if (!
isThumb() && MissingFeatures.
test(Feature_IsThumbBit) &&
12852 MissingFeatures.
count() > 1)
12854 if (!
isThumb() && MissingFeatures.
test(Feature_IsThumb2Bit) &&
12856 Feature_IsThumbBit})).
any())
12858 if (isMClass() && MissingFeatures.
test(Feature_HasNEONBit))
12861 NearMissMessage Message;
12862 Message.Loc = IDLoc;
12865 OS <<
"instruction requires:";
12866 for (
unsigned i = 0, e = MissingFeatures.
size(); i != e; ++i)
12867 if (MissingFeatures.
test(i))
12875 NearMissMessage Message;
12876 Message.Loc = IDLoc;
12877 switch (
I.getPredicateError()) {
12878 case Match_RequiresNotITBlock:
12879 Message.Message =
"flag setting instruction only valid outside IT block";
12881 case Match_RequiresITBlock:
12882 Message.Message =
"instruction only valid inside IT block";
12884 case Match_RequiresV6:
12885 Message.Message =
"instruction variant requires ARMv6 or later";
12887 case Match_RequiresThumb2:
12888 Message.Message =
"instruction variant requires Thumb2";
12890 case Match_RequiresV8:
12891 Message.Message =
"instruction variant requires ARMv8 or later";
12893 case Match_RequiresFlagSetting:
12894 Message.Message =
"no flag-preserving variant of this instruction available";
12896 case Match_InvalidTiedOperand: {
12897 ARMOperand &
Op =
static_cast<ARMOperand &
>(*
Operands[0]);
12898 if (
Op.isToken() &&
Op.getToken() ==
"mul") {
12899 Message.Message =
"destination register must match a source register";
12900 Message.Loc =
Operands[MnemonicOpsEndInd]->getStartLoc();
12906 case Match_InvalidOperand:
12907 Message.Message =
"invalid operand for instruction";
12917 if (!ReportedTooFewOperands) {
12918 SMLoc EndLoc = ((ARMOperand &)*
Operands.back()).getEndLoc();
12920 EndLoc,
StringRef(
"too few operands for instruction")});
12921 ReportedTooFewOperands =
true;
12936 FilterNearMisses(NearMisses, Messages, IDLoc,
Operands);
12938 if (Messages.
size() == 0) {
12941 Error(IDLoc,
"invalid instruction");
12942 }
else if (Messages.
size() == 1) {
12944 Error(Messages[0].Loc, Messages[0].Message);
12948 Error(IDLoc,
"invalid instruction, any one of the following would fix this:");
12949 for (
auto &M : Messages) {
12959 static const struct {
12964 {
ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12966 {Feature_HasV8Bit},
12967 {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12969 {Feature_HasV8Bit},
12970 {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12972 {Feature_HasV8Bit},
12973 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12975 {Feature_HasV8_1MMainlineBit},
12976 {ARM::HasMVEFloatOps}},
12978 {Feature_HasV8Bit},
12979 {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12981 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12982 {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12984 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12987 {Feature_HasV8Bit},
12988 {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12989 {
ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12991 {
ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12993 {Feature_HasV8_2aBit},
12994 {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12995 {
ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12996 {
ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12997 {
ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
13005 bool EnableFeature = !
Name.consume_front_insensitive(
"no");
13008 return Error(ExtLoc,
"unknown architectural extension: " +
Name);
13015 return Error(ExtLoc,
"unsupported architectural extension: " +
Name);
13018 return Error(ExtLoc,
"architectural extension '" +
Name +
13020 "allowed for the current base architecture");
13023 if (EnableFeature) {
13029 setAvailableFeatures(Features);
13037bool ARMAsmParser::parseDirectiveArchExtension(
SMLoc L) {
13042 return Error(getLexer().getLoc(),
"expected architecture extension name");
13051 if (
Name ==
"nocrypto") {
13052 enableArchExtFeature(
"nosha2", ExtLoc);
13053 enableArchExtFeature(
"noaes", ExtLoc);
13056 if (enableArchExtFeature(
Name, ExtLoc))
13059 return Error(ExtLoc,
"unknown architectural extension: " +
Name);
13066 ARMOperand &
Op =
static_cast<ARMOperand &
>(AsmOp);
13075 if (
CE->getValue() == 0)
13076 return Match_Success;
13081 if (
CE->getValue() == 8)
13082 return Match_Success;
13087 if (
CE->getValue() == 16)
13088 return Match_Success;
13092 const MCExpr *SOExpr =
Op.getImm();
13094 if (!SOExpr->evaluateAsAbsolute(
Value))
13095 return Match_Success;
13096 assert((
Value >= std::numeric_limits<int32_t>::min() &&
13097 Value <= std::numeric_limits<uint32_t>::max()) &&
13098 "expression value must be representable in 32 bits");
13102 if (hasV8Ops() &&
Op.isReg() &&
Op.getReg() == ARM::SP)
13103 return Match_Success;
13106 return Match_InvalidOperand;
13109bool ARMAsmParser::isMnemonicVPTPredicable(
StringRef Mnemonic,
13114 if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
13115 (Mnemonic.
starts_with(
"vldrh") && Mnemonic !=
"vldrhi") ||
13117 !(ExtraToken ==
".f16" || ExtraToken ==
".32" || ExtraToken ==
".16" ||
13118 ExtraToken ==
".8")) ||
13119 (Mnemonic.
starts_with(
"vrint") && Mnemonic !=
"vrintr") ||
13120 (Mnemonic.
starts_with(
"vstrh") && Mnemonic !=
"vstrhi"))
13123 const char *predicable_prefixes[] = {
13124 "vabav",
"vabd",
"vabs",
"vadc",
"vadd",
13125 "vaddlv",
"vaddv",
"vand",
"vbic",
"vbrsr",
13126 "vcadd",
"vcls",
"vclz",
"vcmla",
"vcmp",
13127 "vcmul",
"vctp",
"vcvt",
"vddup",
"vdup",
13128 "vdwdup",
"veor",
"vfma",
"vfmas",
"vfms",
13129 "vhadd",
"vhcadd",
"vhsub",
"vidup",
"viwdup",
13130 "vldrb",
"vldrd",
"vldrw",
"vmax",
"vmaxa",
13131 "vmaxav",
"vmaxnm",
"vmaxnma",
"vmaxnmav",
"vmaxnmv",
13132 "vmaxv",
"vmin",
"vminav",
"vminnm",
"vminnmav",
13133 "vminnmv",
"vminv",
"vmla",
"vmladav",
"vmlaldav",
13134 "vmlalv",
"vmlas",
"vmlav",
"vmlsdav",
"vmlsldav",
13135 "vmovlb",
"vmovlt",
"vmovnb",
"vmovnt",
"vmul",
13136 "vmvn",
"vneg",
"vorn",
"vorr",
"vpnot",
13137 "vpsel",
"vqabs",
"vqadd",
"vqdmladh",
"vqdmlah",
13138 "vqdmlash",
"vqdmlsdh",
"vqdmulh",
"vqdmull",
"vqmovn",
13139 "vqmovun",
"vqneg",
"vqrdmladh",
"vqrdmlah",
"vqrdmlash",
13140 "vqrdmlsdh",
"vqrdmulh",
"vqrshl",
"vqrshrn",
"vqrshrun",
13141 "vqshl",
"vqshrn",
"vqshrun",
"vqsub",
"vrev16",
13142 "vrev32",
"vrev64",
"vrhadd",
"vrmlaldavh",
"vrmlalvh",
13143 "vrmlsldavh",
"vrmulh",
"vrshl",
"vrshr",
"vrshrn",
13144 "vsbc",
"vshl",
"vshlc",
"vshll",
"vshr",
13145 "vshrn",
"vsli",
"vsri",
"vstrb",
"vstrd",
13148 return any_of(predicable_prefixes, [&Mnemonic](
const char *prefix) {
13153std::unique_ptr<ARMOperand> ARMAsmParser::defaultCondCodeOp() {
13157std::unique_ptr<ARMOperand> ARMAsmParser::defaultCCOutOp() {
13158 return ARMOperand::CreateCCOut(0,
SMLoc(), *
this);
13161std::unique_ptr<ARMOperand> ARMAsmParser::defaultVPTPredOp() {
unsigned const MachineRegisterInfo * MRI
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing)
static bool instIsBreakpoint(const MCInst &Inst)
unsigned findCCOutInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
static bool isDataTypeToken(StringRef Tok)
}
static MCRegister getNextRegister(MCRegister Reg)
static MCRegister MatchRegisterName(StringRef Name)
static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing)
unsigned getRegListInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
static const char * getSubtargetFeatureName(uint64_t Val)
static bool isVectorPredicable(const MCInstrDesc &MCID)
static bool listContainsReg(const MCInst &Inst, unsigned OpNo, MCRegister Reg)
static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp)
MatchCoprocessorOperandName - Try to parse an coprocessor related instruction with a symbolic operand...
static void applyMnemonicAliases(StringRef &Mnemonic, const FeatureBitset &Features, unsigned VariantID)
void removeCCOut(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo, MCRegister Reg, MCRegister HiReg, bool &containsReg)
static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT)
static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID)
static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp)
bool operandsContainWide(OperandVector &Operands, unsigned MnemonicOpsEndInd)
void removeCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool insertNoDuplicates(SmallVectorImpl< std::pair< unsigned, MCRegister > > &Regs, unsigned Enc, MCRegister Reg)
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser()
Force static initialization.
static unsigned getMnemonicOpsEndInd(const OperandVector &Operands)
static bool isARMMCExpr(MCParsedAsmOperand &MCOp)
unsigned findCondCodeInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
void removeVPTCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool isThumb(const MCSubtargetInfo &STI)
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Register getFPReg(const CSKYSubtarget &STI)
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_EXTERNAL_VISIBILITY
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > AddBuildAttributes("hexagon-add-build-attributes")
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static constexpr Register SPReg
static constexpr Register FPReg
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Pre allocate WWM Registers
static cl::opt< std::set< SPIRV::Extension::Extension >, false, SPIRVExtensionsParser > Extensions("spirv-ext", cl::desc("Specify list of enabled SPIR-V extensions"))
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file implements the SmallBitVector class.
This file defines the SmallSet class.
This file defines the SmallVector class.
StringSet - A set-like wrapper for the StringMap.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=ARM::NoRegAltName)
VariantKind getKind() const
getOpcode - Get the kind of this expression.
static const ARMMCExpr * create(VariantKind Kind, const MCExpr *Expr, MCContext &Ctx)
Target independent representation for an assembler token.
int64_t getIntVal() const
bool isNot(TokenKind K) const
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
StringRef getStringContents() const
Get the contents of a string token (without quotes).
bool is(TokenKind K) const
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
This class represents an Operation in the Expression.
Implements a dense probed hash-table based set.
Base class for user error types.
Lightweight error class with error context and mandatory checking.
Container class for subtarget features.
constexpr bool test(unsigned I) const
constexpr size_t size() const
Generic assembler lexer interface, for use by target specific assembly lexers.
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
MCStreamer & getStreamer()
MCAsmParser & getParser()
Generic assembler parser interface, for use by target specific assembly parsers.
bool parseToken(AsmToken::TokenKind T, const Twine &Msg="unexpected token")
virtual bool parseEscapedString(std::string &Data)=0
Parse the current token as a string which may include escaped characters and return the string conten...
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual void Note(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a note at the location L, with the message Msg.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
bool parseOptionalToken(AsmToken::TokenKind T)
Attempt to parse and consume token, returning true on success.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual bool Warning(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a warning at the location L, with the message Msg.
bool Error(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)
Return an error at the location L, with the message Msg.
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Context object for machine code objects.
const MCRegisterInfo * getRegisterInfo() const
Base class for the full range of assembler expressions which are needed for parsing.
@ Constant
Constant expressions.
Instances of this class represent a single low-level machine instruction.
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCRegisterInfo *RegInfo=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
unsigned getNumOperands() const
unsigned getOpcode() const
iterator insert(iterator I, const MCOperand &Op)
void addOperand(const MCOperand Op)
void setOpcode(unsigned Op)
const MCOperand & getOperand(unsigned i) const
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool isIndirectBranch() const
Return true if this is an indirect branch, such as a branch through a register.
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
bool hasDefOfPhysReg(const MCInst &MI, MCRegister Reg, const MCRegisterInfo &RI) const
Return true if this instruction defines the specified physical register, either explicitly or implici...
unsigned short NumOperands
bool isBranch() const
Returns true if this is a conditional, unconditional, or indirect branch.
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
bool isCall() const
Return true if the instruction is a call.
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
bool isReturn() const
Return true if the instruction is a return.
Interface to description of machine instruction set.
Instances of this class represent operands of the MCInst class.
static MCOperand createExpr(const MCExpr *Val)
static MCOperand createReg(MCRegister Reg)
static MCOperand createImm(int64_t Val)
MCRegister getReg() const
Returns the register number.
const MCExpr * getExpr() const
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual SMLoc getStartLoc() const =0
getStartLoc - Get the location of the first token of this operand.
virtual bool isReg() const =0
isReg - Is this a register operand?
virtual bool isMem() const =0
isMem - Is this a memory operand?
virtual MCRegister getReg() const =0
virtual void print(raw_ostream &OS) const =0
print - Print a debug representation of the operand to the given stream.
virtual bool isToken() const =0
isToken - Is this a token operand?
virtual bool isImm() const =0
isImm - Is this an immediate operand?
virtual SMLoc getEndLoc() const =0
getEndLoc - Get the location of the last token of this operand.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
unsigned getNumRegs() const
getNumRegs - Return the number of registers in this class.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
constexpr unsigned id() const
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Streaming machine code generation interface.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const FeatureBitset & getFeatureBits() const
FeatureBitset ApplyFeatureFlag(StringRef FS)
Apply a feature flag and return the re-computed feature bits, including all feature bits implied by t...
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
Represent a reference to a symbol from inside an expression.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual void onLabelParsed(MCSymbol *Symbol)
virtual bool parseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
Parse one assembly instruction.
MCSubtargetInfo & copySTI()
Create a copy of STI and return a non-const reference to it.
@ FIRST_TARGET_MATCH_RESULT_TY
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual unsigned checkEarlyTargetMatchPredicate(MCInst &Inst, const OperandVector &Operands)
Validate the instruction match against any complex target predicates before rendering any operands to...
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
Recognize a series of operands of a parsed instruction as an actual MCInst and emit it to the specifi...
virtual void flushPendingInstructions(MCStreamer &Out)
Ensure that all previously parsed instructions have been emitted to the output streamer,...
void setAvailableFeatures(const FeatureBitset &Value)
virtual MCSymbolRefExpr::VariantKind getVariantKindForName(StringRef Name) const
const MCSubtargetInfo & getSTI() const
virtual void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc)
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
virtual unsigned checkTargetMatchPredicate(MCInst &Inst)
checkTargetMatchPredicate - Validate the instruction match against any complex target predicates not ...
Target specific streamer interface.
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
Represents a location in source code.
static SMLoc getFromPointer(const char *Ptr)
constexpr const char * getPointer() const
Represents a range in source code.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
iterator find(StringRef Key)
size_type count(StringRef Key) const
count - Return 1 if the element is in the map, 0 otherwise.
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
std::string lower() const
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
static constexpr size_t npos
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
StringSet - A wrapper for StringMap that provides set-like functionality.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
Triple - Helper class for working with autoconf configuration names.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
LLVM Value Representation.
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an SmallVector or SmallString.
This class provides various memory handling functions that manipulate MemoryBlock instances.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const TagNameMap & getARMAttributeTags()
static CondCodes getOppositeCondition(CondCodes CC)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
unsigned encodeNEONi16splat(unsigned Value)
float getFPImmFloat(unsigned Imm)
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO, unsigned IdxMode=0)
unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset)
getAM5Opc - This function encodes the addrmode5 opc field.
ShiftOpc getSORegShOp(unsigned Op)
bool isNEONi16splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getAM5FP16Opc(AddrOpc Opc, unsigned char Offset)
getAM5FP16Opc - This function encodes the addrmode5fp16 opc field.
unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset, unsigned IdxMode=0)
getAM3Opc - This function encodes the addrmode3 opc field.
bool isNEONi32splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
unsigned encodeNEONi32splat(unsigned Value)
Encode NEON 32 bits Splat immediate for instructions like VBIC/VORR.
const StringRef getShiftOpcStr(ShiftOpc Op)
static const char * IFlagsToString(unsigned val)
bool getFPUFeatures(FPUKind FPUKind, std::vector< StringRef > &Features)
StringRef getArchName(ArchKind AK)
uint64_t parseArchExt(StringRef ArchExt)
ArchKind parseArch(StringRef Arch)
bool isVpred(OperandType op)
FPUKind parseFPU(StringRef FPU)
bool isCDECoproc(size_t Coproc, const MCSubtargetInfo &STI)
@ D16
Only 16 D registers.
constexpr bool any(E Val)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
std::optional< unsigned > attrTypeFromString(StringRef tag, TagNameMap tagNameMap)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool parseAssignmentExpression(StringRef Name, bool allow_redef, MCAsmParser &Parser, MCSymbol *&Symbol, const MCExpr *&Value)
Parse a value expression and return whether it can be assigned to a symbol with the given name.
@ CE
Windows NT (Windows on ARM)
Reg
All possible values of the reg field in the ModR/M byte.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
static const char * ARMVPTPredToString(ARMVCC::VPTCodes CC)
int popcount(T Value) noexcept
Count the number of set bits in a value.
static bool isARMLowRegister(MCRegister Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
Target & getTheThumbBETarget()
static unsigned ARMCondCodeFromString(StringRef CC)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
auto reverse(ContainerTy &&C)
@ Never
Never set the bit.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
bool IsCPSRDead< MCInst >(const MCInst *Instr)
static bool isValidCoprocessorNumber(unsigned Num, const FeatureBitset &featureBits)
isValidCoprocessorNumber - decide whether an explicit coprocessor number is legal in generic instruct...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ MCAF_Code16
.code16 (X86) / .code 16 (ARM)
@ MCAF_Code32
.code32 (X86) / .code 32 (ARM)
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static unsigned ARMVectorCondCodeFromString(StringRef CC)
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
Target & getTheARMLETarget()
Target & getTheARMBETarget()
Target & getTheThumbLETarget()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
const FeatureBitset Features
This struct is a compact representation of a valid (non-zero power of two) alignment.
Holds functions to get, set or test bitfields.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...