71#define DEBUG_TYPE "asm-parser"
87enum class ImplicitItModeTy {
Always,
Never, ARMOnly, ThumbOnly };
90 "arm-implicit-it",
cl::init(ImplicitItModeTy::ARMOnly),
91 cl::desc(
"Allow conditional instructions outdside of an IT block"),
93 "Accept in both ISAs, emit implicit ITs in Thumb"),
95 "Warn in ARM, reject in Thumb"),
97 "Accept in ARM, reject in Thumb"),
98 clEnumValN(ImplicitItModeTy::ThumbOnly,
"thumb",
99 "Warn in ARM, emit implicit ITs in Thumb")));
104enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
106static inline unsigned extractITMaskBit(
unsigned Mask,
unsigned Position) {
113 return (Mask >> (5 - Position) & 1);
122 Locs PersonalityLocs;
123 Locs PersonalityIndexLocs;
124 Locs HandlerDataLocs;
130 bool hasFnStart()
const {
return !FnStartLocs.empty(); }
131 bool cantUnwind()
const {
return !CantUnwindLocs.empty(); }
132 bool hasHandlerData()
const {
return !HandlerDataLocs.empty(); }
134 bool hasPersonality()
const {
135 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
138 void recordFnStart(
SMLoc L) { FnStartLocs.push_back(L); }
139 void recordCantUnwind(
SMLoc L) { CantUnwindLocs.push_back(L); }
140 void recordPersonality(
SMLoc L) { PersonalityLocs.push_back(L); }
141 void recordHandlerData(
SMLoc L) { HandlerDataLocs.push_back(L); }
142 void recordPersonalityIndex(
SMLoc L) { PersonalityIndexLocs.push_back(L); }
144 void saveFPReg(
int Reg) { FPReg =
Reg; }
145 int getFPReg()
const {
return FPReg; }
147 void emitFnStartLocNotes()
const {
148 for (
const SMLoc &Loc : FnStartLocs)
149 Parser.
Note(Loc,
".fnstart was specified here");
152 void emitCantUnwindLocNotes()
const {
153 for (
const SMLoc &Loc : CantUnwindLocs)
154 Parser.
Note(Loc,
".cantunwind was specified here");
157 void emitHandlerDataLocNotes()
const {
158 for (
const SMLoc &Loc : HandlerDataLocs)
159 Parser.
Note(Loc,
".handlerdata was specified here");
162 void emitPersonalityLocNotes()
const {
164 PE = PersonalityLocs.end(),
165 PII = PersonalityIndexLocs.begin(),
166 PIE = PersonalityIndexLocs.end();
167 PI != PE || PII != PIE;) {
168 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
169 Parser.
Note(*PI++,
".personality was specified here");
170 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
171 Parser.
Note(*PII++,
".personalityindex was specified here");
174 "at the same location");
179 FnStartLocs = Locs();
180 CantUnwindLocs = Locs();
181 PersonalityLocs = Locs();
182 HandlerDataLocs = Locs();
183 PersonalityIndexLocs = Locs();
189class ARMMnemonicSets {
200 return CDE.
count(Mnemonic);
205 bool isVPTPredicableCDEInstr(
StringRef Mnemonic) {
208 return CDEWithVPTSuffix.
count(Mnemonic);
213 bool isITPredicableCDEInstr(
StringRef Mnemonic) {
223 bool isCDEDualRegInstr(
StringRef Mnemonic) {
226 return Mnemonic ==
"cx1d" || Mnemonic ==
"cx1da" ||
227 Mnemonic ==
"cx2d" || Mnemonic ==
"cx2da" ||
228 Mnemonic ==
"cx3d" || Mnemonic ==
"cx3da";
233 for (
StringRef Mnemonic: {
"cx1",
"cx1a",
"cx1d",
"cx1da",
234 "cx2",
"cx2a",
"cx2d",
"cx2da",
235 "cx3",
"cx3a",
"cx3d",
"cx3da", })
238 {
"vcx1",
"vcx1a",
"vcx2",
"vcx2a",
"vcx3",
"vcx3a"}) {
240 CDEWithVPTSuffix.
insert(Mnemonic);
241 CDEWithVPTSuffix.
insert(std::string(Mnemonic) +
"t");
242 CDEWithVPTSuffix.
insert(std::string(Mnemonic) +
"e");
253 "do not have a target streamer");
261 bool NextSymbolIsThumb;
263 bool useImplicitITThumb()
const {
264 return ImplicitItMode == ImplicitItModeTy::Always ||
265 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
268 bool useImplicitITARM()
const {
269 return ImplicitItMode == ImplicitItModeTy::Always ||
270 ImplicitItMode == ImplicitItModeTy::ARMOnly;
285 unsigned CurPosition;
301 if (!inImplicitITBlock()) {
315 for (
const MCInst &Inst : PendingConditionalInsts) {
318 PendingConditionalInsts.clear();
322 ITState.CurPosition = ~0
U;
325 bool inITBlock() {
return ITState.CurPosition != ~0
U; }
326 bool inExplicitITBlock() {
return inITBlock() && ITState.IsExplicit; }
327 bool inImplicitITBlock() {
return inITBlock() && !ITState.IsExplicit; }
329 bool lastInITBlock() {
333 void forwardITPosition() {
334 if (!inITBlock())
return;
339 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
340 ITState.CurPosition = ~0
U;
344 void rewindImplicitITPosition() {
345 assert(inImplicitITBlock());
346 assert(ITState.CurPosition > 1);
347 ITState.CurPosition--;
349 unsigned NewMask = 0;
350 NewMask |= ITState.Mask & (0xC << TZ);
351 NewMask |= 0x2 << TZ;
352 ITState.Mask = NewMask;
357 void discardImplicitITBlock() {
358 assert(inImplicitITBlock());
359 assert(ITState.CurPosition == 1);
360 ITState.CurPosition = ~0
U;
364 unsigned getDRegFromQReg(
unsigned QReg)
const {
365 return MRI->getSubReg(QReg, ARM::dsub_0);
370 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
376 void invertCurrentITCondition() {
377 if (ITState.CurPosition == 1) {
380 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
385 bool isITBlockFull() {
386 return inITBlock() && (ITState.Mask & 1);
392 assert(inImplicitITBlock());
397 unsigned NewMask = 0;
399 NewMask |= ITState.Mask & (0xE << TZ);
401 NewMask |= (
Cond != ITState.Cond) << TZ;
403 NewMask |= 1 << (TZ - 1);
404 ITState.Mask = NewMask;
408 void startImplicitITBlock() {
412 ITState.CurPosition = 1;
413 ITState.IsExplicit =
false;
424 ITState.CurPosition = 0;
425 ITState.IsExplicit =
true;
430 unsigned CurPosition;
432 bool inVPTBlock() {
return VPTState.CurPosition != ~0
U; }
433 void forwardVPTPosition() {
434 if (!inVPTBlock())
return;
436 if (++VPTState.CurPosition == 5 - TZ)
437 VPTState.CurPosition = ~0
U;
453 unsigned MnemonicOpsEndInd,
unsigned ListIndex,
454 bool IsARPop =
false);
456 unsigned MnemonicOpsEndInd,
unsigned ListIndex);
461 std::optional<ARM_AM::ShiftOpc> tryParseShiftToken();
462 bool parseRegisterList(
OperandVector &,
bool EnforceOrder =
true,
463 bool AllowRAAC =
false,
464 bool AllowOutOfBoundReg =
false);
467 bool parseImmExpr(int64_t &Out);
470 unsigned &ShiftAmount);
471 bool parseLiteralValues(
unsigned Size,
SMLoc L);
472 bool parseDirectiveThumb(
SMLoc L);
473 bool parseDirectiveARM(
SMLoc L);
474 bool parseDirectiveThumbFunc(
SMLoc L);
475 bool parseDirectiveCode(
SMLoc L);
476 bool parseDirectiveSyntax(
SMLoc L);
478 bool parseDirectiveUnreq(
SMLoc L);
479 bool parseDirectiveArch(
SMLoc L);
480 bool parseDirectiveEabiAttr(
SMLoc L);
481 bool parseDirectiveCPU(
SMLoc L);
482 bool parseDirectiveFPU(
SMLoc L);
483 bool parseDirectiveFnStart(
SMLoc L);
484 bool parseDirectiveFnEnd(
SMLoc L);
485 bool parseDirectiveCantUnwind(
SMLoc L);
486 bool parseDirectivePersonality(
SMLoc L);
487 bool parseDirectiveHandlerData(
SMLoc L);
488 bool parseDirectiveSetFP(
SMLoc L);
489 bool parseDirectivePad(
SMLoc L);
490 bool parseDirectiveRegSave(
SMLoc L,
bool IsVector);
491 bool parseDirectiveInst(
SMLoc L,
char Suffix =
'\0');
492 bool parseDirectiveLtorg(
SMLoc L);
493 bool parseDirectiveEven(
SMLoc L);
494 bool parseDirectivePersonalityIndex(
SMLoc L);
495 bool parseDirectiveUnwindRaw(
SMLoc L);
496 bool parseDirectiveTLSDescSeq(
SMLoc L);
497 bool parseDirectiveMovSP(
SMLoc L);
498 bool parseDirectiveObjectArch(
SMLoc L);
499 bool parseDirectiveArchExtension(
SMLoc L);
500 bool parseDirectiveAlign(
SMLoc L);
501 bool parseDirectiveThumbSet(
SMLoc L);
503 bool parseDirectiveSEHAllocStack(
SMLoc L,
bool Wide);
504 bool parseDirectiveSEHSaveRegs(
SMLoc L,
bool Wide);
505 bool parseDirectiveSEHSaveSP(
SMLoc L);
506 bool parseDirectiveSEHSaveFRegs(
SMLoc L);
507 bool parseDirectiveSEHSaveLR(
SMLoc L);
508 bool parseDirectiveSEHPrologEnd(
SMLoc L,
bool Fragment);
509 bool parseDirectiveSEHNop(
SMLoc L,
bool Wide);
510 bool parseDirectiveSEHEpilogStart(
SMLoc L,
bool Condition);
511 bool parseDirectiveSEHEpilogEnd(
SMLoc L);
512 bool parseDirectiveSEHCustom(
SMLoc L);
514 std::unique_ptr<ARMOperand> defaultCondCodeOp();
515 std::unique_ptr<ARMOperand> defaultCCOutOp();
516 std::unique_ptr<ARMOperand> defaultVPTPredOp();
522 bool &CarrySetting,
unsigned &ProcessorIMod,
525 StringRef FullInst,
bool &CanAcceptCarrySet,
526 bool &CanAcceptPredicationCode,
527 bool &CanAcceptVPTPredicationCode);
530 void tryConvertingToTwoOperandForm(
StringRef Mnemonic,
533 unsigned MnemonicOpsEndInd);
536 unsigned MnemonicOpsEndInd);
543 bool isThumbOne()
const {
547 bool isThumbTwo()
const {
551 bool hasThumb()
const {
555 bool hasThumb2()
const {
559 bool hasV6Ops()
const {
563 bool hasV6T2Ops()
const {
567 bool hasV6MOps()
const {
571 bool hasV7Ops()
const {
575 bool hasV8Ops()
const {
579 bool hasV8MBaseline()
const {
583 bool hasV8MMainline()
const {
586 bool hasV8_1MMainline()
const {
589 bool hasMVE()
const {
592 bool hasMVEFloat()
const {
595 bool hasCDE()
const {
598 bool has8MSecExt()
const {
602 bool hasARM()
const {
606 bool hasDSP()
const {
610 bool hasD32()
const {
614 bool hasV8_1aOps()
const {
618 bool hasRAS()
const {
624 auto FB = ComputeAvailableFeatures(STI.
ToggleFeature(ARM::ModeThumb));
628 void FixModeAfterArchChange(
bool WasThumb,
SMLoc Loc);
630 bool isMClass()
const {
637#define GET_ASSEMBLER_HEADER
638#include "ARMGenAsmMatcher.inc"
678 unsigned MnemonicOpsEndInd);
681 bool shouldOmitVectorPredicateOperand(
StringRef Mnemonic,
683 unsigned MnemonicOpsEndInd);
684 bool isITBlockTerminator(
MCInst &Inst)
const;
687 unsigned MnemonicOpsEndInd);
689 bool ARMMode,
bool Writeback,
690 unsigned MnemonicOpsEndInd);
693 enum ARMMatchResultTy {
695 Match_RequiresNotITBlock,
697 Match_RequiresThumb2,
699 Match_RequiresFlagSetting,
700#define GET_OPERAND_DIAGNOSTIC_TYPES
701#include "ARMGenAsmMatcher.inc"
718 getTargetStreamer().emitTargetAttributes(STI);
721 ITState.CurPosition = ~0
U;
723 VPTState.CurPosition = ~0
U;
725 NextSymbolIsThumb =
false;
731 SMLoc &EndLoc)
override;
737 unsigned Kind)
override;
746 bool MatchingInlineAsm)
override;
749 bool MatchingInlineAsm,
bool &EmitInITBlock,
752 struct NearMissMessage {
757 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
786 k_InstSyncBarrierOpt,
787 k_TraceSyncBarrierOpt,
796 k_RegisterListWithAPSR,
799 k_FPSRegisterListWithVPR,
800 k_FPDRegisterListWithVPR,
802 k_VectorListAllLanes,
809 k_ConstantPoolImmediate,
810 k_BitfieldDescriptor,
814 SMLoc StartLoc, EndLoc, AlignmentLoc;
829 struct CoprocOptionOp {
871 struct VectorListOp {
878 struct VectorIndexOp {
892 unsigned OffsetRegNum;
897 unsigned isNegative : 1;
900 struct PostIdxRegOp {
907 struct ShifterImmOp {
912 struct RegShiftedRegOp {
919 struct RegShiftedImmOp {
943 struct CoprocOptionOp CoprocOption;
944 struct MBOptOp MBOpt;
945 struct ISBOptOp ISBOpt;
946 struct TSBOptOp TSBOpt;
947 struct ITMaskOp ITMask;
949 struct MMaskOp MMask;
950 struct BankedRegOp BankedReg;
953 struct VectorListOp VectorList;
954 struct VectorIndexOp VectorIndex;
957 struct PostIdxRegOp PostIdxReg;
958 struct ShifterImmOp ShifterImm;
959 struct RegShiftedRegOp RegShiftedReg;
960 struct RegShiftedImmOp RegShiftedImm;
961 struct RotImmOp RotImm;
962 struct ModImmOp ModImm;
967 ARMOperand(KindTy K) :
Kind(
K) {}
980 SMLoc getAlignmentLoc()
const {
981 assert(Kind == k_Memory &&
"Invalid access!");
986 assert(Kind == k_CondCode &&
"Invalid access!");
991 assert(isVPTPred() &&
"Invalid access!");
995 unsigned getCoproc()
const {
996 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) &&
"Invalid access!");
1001 assert(Kind == k_Token &&
"Invalid access!");
1006 assert((Kind == k_Register || Kind == k_CCOut) &&
"Invalid access!");
1011 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
1012 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
1013 Kind == k_FPSRegisterListWithVPR ||
1014 Kind == k_FPDRegisterListWithVPR) &&
1019 const MCExpr *getImm()
const {
1024 const MCExpr *getConstantPoolImm()
const {
1025 assert(isConstantPoolImm() &&
"Invalid access!");
1029 unsigned getVectorIndex()
const {
1030 assert(Kind == k_VectorIndex &&
"Invalid access!");
1031 return VectorIndex.Val;
1035 assert(Kind == k_MemBarrierOpt &&
"Invalid access!");
1040 assert(Kind == k_InstSyncBarrierOpt &&
"Invalid access!");
1045 assert(Kind == k_TraceSyncBarrierOpt &&
"Invalid access!");
1050 assert(Kind == k_ProcIFlags &&
"Invalid access!");
1054 unsigned getMSRMask()
const {
1055 assert(Kind == k_MSRMask &&
"Invalid access!");
1059 unsigned getBankedReg()
const {
1060 assert(Kind == k_BankedReg &&
"Invalid access!");
1061 return BankedReg.Val;
1064 bool isCoprocNum()
const {
return Kind == k_CoprocNum; }
1065 bool isCoprocReg()
const {
return Kind == k_CoprocReg; }
1066 bool isCoprocOption()
const {
return Kind == k_CoprocOption; }
1067 bool isCondCode()
const {
return Kind == k_CondCode; }
1068 bool isVPTPred()
const {
return Kind == k_VPTPred; }
1069 bool isCCOut()
const {
return Kind == k_CCOut; }
1070 bool isITMask()
const {
return Kind == k_ITCondMask; }
1071 bool isITCondCode()
const {
return Kind == k_CondCode; }
1072 bool isImm()
const override {
1073 return Kind == k_Immediate;
1076 bool isARMBranchTarget()
const {
1077 if (!
isImm())
return false;
1079 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1080 return CE->getValue() % 4 == 0;
1085 bool isThumbBranchTarget()
const {
1086 if (!
isImm())
return false;
1088 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1089 return CE->getValue() % 2 == 0;
1095 template<
unsigned w
idth,
unsigned scale>
1096 bool isUnsignedOffset()
const {
1097 if (!
isImm())
return false;
1098 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1100 int64_t Val =
CE->getValue();
1102 int64_t
Max =
Align * ((1LL << width) - 1);
1103 return ((Val %
Align) == 0) && (Val >= 0) && (Val <= Max);
1110 template<
unsigned w
idth,
unsigned scale>
1111 bool isSignedOffset()
const {
1112 if (!
isImm())
return false;
1113 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1115 int64_t Val =
CE->getValue();
1117 int64_t
Max =
Align * ((1LL << (width-1)) - 1);
1118 int64_t Min = -
Align * (1LL << (width-1));
1119 return ((Val %
Align) == 0) && (Val >= Min) && (Val <= Max);
1126 bool isLEOffset()
const {
1127 if (!
isImm())
return false;
1128 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1130 int64_t Val =
CE->getValue();
1131 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1140 bool isThumbMemPC()
const {
1143 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1145 if (!CE)
return false;
1146 Val =
CE->getValue();
1148 else if (isGPRMem()) {
1149 if(!
Memory.OffsetImm ||
Memory.OffsetRegNum)
return false;
1150 if(
Memory.BaseRegNum != ARM::PC)
return false;
1151 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
1152 Val =
CE->getValue();
1157 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1160 bool isFPImm()
const {
1161 if (!
isImm())
return false;
1163 if (!CE)
return false;
1168 template<
int64_t N,
int64_t M>
1169 bool isImmediate()
const {
1170 if (!
isImm())
return false;
1172 if (!CE)
return false;
1173 int64_t
Value =
CE->getValue();
1177 template<
int64_t N,
int64_t M>
1178 bool isImmediateS4()
const {
1179 if (!
isImm())
return false;
1181 if (!CE)
return false;
1182 int64_t
Value =
CE->getValue();
1185 template<
int64_t N,
int64_t M>
1186 bool isImmediateS2()
const {
1187 if (!
isImm())
return false;
1189 if (!CE)
return false;
1190 int64_t
Value =
CE->getValue();
1193 bool isFBits16()
const {
1194 return isImmediate<0, 17>();
1196 bool isFBits32()
const {
1197 return isImmediate<1, 33>();
1199 bool isImm8s4()
const {
1200 return isImmediateS4<-1020, 1020>();
1202 bool isImm7s4()
const {
1203 return isImmediateS4<-508, 508>();
1205 bool isImm7Shift0()
const {
1206 return isImmediate<-127, 127>();
1208 bool isImm7Shift1()
const {
1209 return isImmediateS2<-255, 255>();
1211 bool isImm7Shift2()
const {
1212 return isImmediateS4<-511, 511>();
1214 bool isImm7()
const {
1215 return isImmediate<-127, 127>();
1217 bool isImm0_1020s4()
const {
1218 return isImmediateS4<0, 1020>();
1220 bool isImm0_508s4()
const {
1221 return isImmediateS4<0, 508>();
1223 bool isImm0_508s4Neg()
const {
1224 if (!
isImm())
return false;
1226 if (!CE)
return false;
1227 int64_t
Value = -
CE->getValue();
1232 bool isImm0_4095Neg()
const {
1233 if (!
isImm())
return false;
1235 if (!CE)
return false;
1240 if ((
CE->getValue() >> 32) > 0)
return false;
1245 bool isImm0_7()
const {
1246 return isImmediate<0, 7>();
1249 bool isImm1_16()
const {
1250 return isImmediate<1, 16>();
1253 bool isImm1_32()
const {
1254 return isImmediate<1, 32>();
1257 bool isImm8_255()
const {
1258 return isImmediate<8, 255>();
1261 bool isImm0_255Expr()
const {
1269 int64_t
Value =
CE->getValue();
1270 return isUInt<8>(
Value);
1273 bool isImm256_65535Expr()
const {
1274 if (!
isImm())
return false;
1278 if (!CE)
return true;
1279 int64_t
Value =
CE->getValue();
1283 bool isImm0_65535Expr()
const {
1284 if (!
isImm())
return false;
1288 if (!CE)
return true;
1289 int64_t
Value =
CE->getValue();
1293 bool isImm24bit()
const {
1294 return isImmediate<0, 0xffffff + 1>();
1297 bool isImmThumbSR()
const {
1298 return isImmediate<1, 33>();
1301 bool isPKHLSLImm()
const {
1302 return isImmediate<0, 32>();
1305 bool isPKHASRImm()
const {
1306 return isImmediate<0, 33>();
1309 bool isAdrLabel()
const {
1312 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1316 if (!
isImm())
return false;
1318 if (!CE)
return false;
1319 int64_t
Value =
CE->getValue();
1324 bool isT2SOImm()
const {
1327 if (
isImm() && !isa<MCConstantExpr>(getImm())) {
1330 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1334 if (!
isImm())
return false;
1336 if (!CE)
return false;
1337 int64_t
Value =
CE->getValue();
1341 bool isT2SOImmNot()
const {
1342 if (!
isImm())
return false;
1344 if (!CE)
return false;
1345 int64_t
Value =
CE->getValue();
1350 bool isT2SOImmNeg()
const {
1351 if (!
isImm())
return false;
1353 if (!CE)
return false;
1354 int64_t
Value =
CE->getValue();
1360 bool isSetEndImm()
const {
1361 if (!
isImm())
return false;
1363 if (!CE)
return false;
1364 int64_t
Value =
CE->getValue();
1368 bool isReg()
const override {
return Kind == k_Register; }
1369 bool isRegList()
const {
return Kind == k_RegisterList; }
1370 bool isRegListWithAPSR()
const {
1371 return Kind == k_RegisterListWithAPSR ||
Kind == k_RegisterList;
1373 bool isDReg()
const {
1375 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
Reg.RegNum);
1377 bool isQReg()
const {
1379 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
Reg.RegNum);
1381 bool isDPRRegList()
const {
return Kind == k_DPRRegisterList; }
1382 bool isSPRRegList()
const {
return Kind == k_SPRRegisterList; }
1383 bool isFPSRegListWithVPR()
const {
return Kind == k_FPSRegisterListWithVPR; }
1384 bool isFPDRegListWithVPR()
const {
return Kind == k_FPDRegisterListWithVPR; }
1385 bool isToken()
const override {
return Kind == k_Token; }
1386 bool isMemBarrierOpt()
const {
return Kind == k_MemBarrierOpt; }
1387 bool isInstSyncBarrierOpt()
const {
return Kind == k_InstSyncBarrierOpt; }
1388 bool isTraceSyncBarrierOpt()
const {
return Kind == k_TraceSyncBarrierOpt; }
1389 bool isMem()
const override {
1390 return isGPRMem() || isMVEMem();
1392 bool isMVEMem()
const {
1393 if (Kind != k_Memory)
1396 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.BaseRegNum) &&
1397 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
Memory.BaseRegNum))
1399 if (
Memory.OffsetRegNum &&
1400 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1405 bool isGPRMem()
const {
1406 if (Kind != k_Memory)
1409 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.BaseRegNum))
1411 if (
Memory.OffsetRegNum &&
1412 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.OffsetRegNum))
1416 bool isShifterImm()
const {
return Kind == k_ShifterImmediate; }
1417 bool isRegShiftedReg()
const {
1418 return Kind == k_ShiftedRegister &&
1419 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1420 RegShiftedReg.SrcReg) &&
1421 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1422 RegShiftedReg.ShiftReg);
1424 bool isRegShiftedImm()
const {
1425 return Kind == k_ShiftedImmediate &&
1426 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1427 RegShiftedImm.SrcReg);
1429 bool isRotImm()
const {
return Kind == k_RotateImmediate; }
1431 template<
unsigned Min,
unsigned Max>
1432 bool isPowerTwoInRange()
const {
1433 if (!
isImm())
return false;
1435 if (!CE)
return false;
1436 int64_t
Value =
CE->getValue();
1440 bool isModImm()
const {
return Kind == k_ModifiedImmediate; }
1442 bool isModImmNot()
const {
1443 if (!
isImm())
return false;
1445 if (!CE)
return false;
1446 int64_t
Value =
CE->getValue();
1450 bool isModImmNeg()
const {
1451 if (!
isImm())
return false;
1453 if (!CE)
return false;
1454 int64_t
Value =
CE->getValue();
1459 bool isThumbModImmNeg1_7()
const {
1460 if (!
isImm())
return false;
1462 if (!CE)
return false;
1463 int32_t
Value = -(int32_t)
CE->getValue();
1467 bool isThumbModImmNeg8_255()
const {
1468 if (!
isImm())
return false;
1470 if (!CE)
return false;
1471 int32_t
Value = -(int32_t)
CE->getValue();
1475 bool isConstantPoolImm()
const {
return Kind == k_ConstantPoolImmediate; }
1476 bool isBitfield()
const {
return Kind == k_BitfieldDescriptor; }
1477 bool isPostIdxRegShifted()
const {
1478 return Kind == k_PostIndexRegister &&
1479 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1481 bool isPostIdxReg()
const {
1484 bool isMemNoOffset(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1488 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1489 (alignOK ||
Memory.Alignment == Alignment);
1491 bool isMemNoOffsetT2(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1495 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1500 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1501 (alignOK ||
Memory.Alignment == Alignment);
1503 bool isMemNoOffsetT2NoSp(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1507 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].
contains(
1512 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1513 (alignOK ||
Memory.Alignment == Alignment);
1515 bool isMemNoOffsetT(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1519 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].
contains(
1524 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1525 (alignOK ||
Memory.Alignment == Alignment);
1527 bool isMemPCRelImm12()
const {
1528 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1531 if (
Memory.BaseRegNum != ARM::PC)
1534 if (!
Memory.OffsetImm)
return true;
1535 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1536 int64_t Val =
CE->getValue();
1537 return (Val > -4096 && Val < 4096) ||
1538 (Val == std::numeric_limits<int32_t>::min());
1543 bool isAlignedMemory()
const {
1544 return isMemNoOffset(
true);
1547 bool isAlignedMemoryNone()
const {
1548 return isMemNoOffset(
false, 0);
1551 bool isDupAlignedMemoryNone()
const {
1552 return isMemNoOffset(
false, 0);
1555 bool isAlignedMemory16()
const {
1556 if (isMemNoOffset(
false, 2))
1558 return isMemNoOffset(
false, 0);
1561 bool isDupAlignedMemory16()
const {
1562 if (isMemNoOffset(
false, 2))
1564 return isMemNoOffset(
false, 0);
1567 bool isAlignedMemory32()
const {
1568 if (isMemNoOffset(
false, 4))
1570 return isMemNoOffset(
false, 0);
1573 bool isDupAlignedMemory32()
const {
1574 if (isMemNoOffset(
false, 4))
1576 return isMemNoOffset(
false, 0);
1579 bool isAlignedMemory64()
const {
1580 if (isMemNoOffset(
false, 8))
1582 return isMemNoOffset(
false, 0);
1585 bool isDupAlignedMemory64()
const {
1586 if (isMemNoOffset(
false, 8))
1588 return isMemNoOffset(
false, 0);
1591 bool isAlignedMemory64or128()
const {
1592 if (isMemNoOffset(
false, 8))
1594 if (isMemNoOffset(
false, 16))
1596 return isMemNoOffset(
false, 0);
1599 bool isDupAlignedMemory64or128()
const {
1600 if (isMemNoOffset(
false, 8))
1602 if (isMemNoOffset(
false, 16))
1604 return isMemNoOffset(
false, 0);
1607 bool isAlignedMemory64or128or256()
const {
1608 if (isMemNoOffset(
false, 8))
1610 if (isMemNoOffset(
false, 16))
1612 if (isMemNoOffset(
false, 32))
1614 return isMemNoOffset(
false, 0);
1617 bool isAddrMode2()
const {
1618 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1620 if (
Memory.OffsetRegNum)
return true;
1622 if (!
Memory.OffsetImm)
return true;
1623 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1624 int64_t Val =
CE->getValue();
1625 return Val > -4096 && Val < 4096;
1630 bool isAM2OffsetImm()
const {
1631 if (!
isImm())
return false;
1634 if (!CE)
return false;
1635 int64_t Val =
CE->getValue();
1636 return (Val == std::numeric_limits<int32_t>::min()) ||
1637 (Val > -4096 && Val < 4096);
1640 bool isAddrMode3()
const {
1644 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1646 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1650 if (
Memory.OffsetRegNum)
return true;
1652 if (!
Memory.OffsetImm)
return true;
1653 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1654 int64_t Val =
CE->getValue();
1657 return (Val > -256 && Val < 256) ||
1658 Val == std::numeric_limits<int32_t>::min();
1663 bool isAM3Offset()
const {
1670 if (!CE)
return false;
1671 int64_t Val =
CE->getValue();
1673 return (Val > -256 && Val < 256) ||
1674 Val == std::numeric_limits<int32_t>::min();
1677 bool isAddrMode5()
const {
1681 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1683 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1685 if (
Memory.OffsetRegNum)
return false;
1687 if (!
Memory.OffsetImm)
return true;
1688 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1689 int64_t Val =
CE->getValue();
1690 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1691 Val == std::numeric_limits<int32_t>::min();
1696 bool isAddrMode5FP16()
const {
1700 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1702 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1704 if (
Memory.OffsetRegNum)
return false;
1706 if (!
Memory.OffsetImm)
return true;
1707 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1708 int64_t Val =
CE->getValue();
1709 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1710 Val == std::numeric_limits<int32_t>::min();
1715 bool isMemTBB()
const {
1716 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1722 bool isMemTBH()
const {
1723 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1730 bool isMemRegOffset()
const {
1731 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1736 bool isT2MemRegOffset()
const {
1737 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1748 bool isMemThumbRR()
const {
1751 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1758 bool isMemThumbRIs4()
const {
1759 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1763 if (!
Memory.OffsetImm)
return true;
1764 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1765 int64_t Val =
CE->getValue();
1766 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1771 bool isMemThumbRIs2()
const {
1772 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1776 if (!
Memory.OffsetImm)
return true;
1777 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1778 int64_t Val =
CE->getValue();
1779 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1784 bool isMemThumbRIs1()
const {
1785 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1789 if (!
Memory.OffsetImm)
return true;
1790 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1791 int64_t Val =
CE->getValue();
1792 return Val >= 0 && Val <= 31;
1797 bool isMemThumbSPI()
const {
1798 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1802 if (!
Memory.OffsetImm)
return true;
1803 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1804 int64_t Val =
CE->getValue();
1805 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1810 bool isMemImm8s4Offset()
const {
1814 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1816 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1819 if (!
Memory.OffsetImm)
return true;
1820 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1821 int64_t Val =
CE->getValue();
1823 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1824 Val == std::numeric_limits<int32_t>::min();
1829 bool isMemImm7s4Offset()
const {
1833 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1835 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0 ||
1836 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1840 if (!
Memory.OffsetImm)
return true;
1841 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1842 int64_t Val =
CE->getValue();
1844 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1849 bool isMemImm0_1020s4Offset()
const {
1850 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1853 if (!
Memory.OffsetImm)
return true;
1854 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1855 int64_t Val =
CE->getValue();
1856 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1861 bool isMemImm8Offset()
const {
1862 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1865 if (
Memory.BaseRegNum == ARM::PC)
return false;
1867 if (!
Memory.OffsetImm)
return true;
1868 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1869 int64_t Val =
CE->getValue();
1870 return (Val == std::numeric_limits<int32_t>::min()) ||
1871 (Val > -256 && Val < 256);
1876 template<
unsigned Bits,
unsigned RegClassID>
1877 bool isMemImm7ShiftedOffset()
const {
1878 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0 ||
1879 !ARMMCRegisterClasses[RegClassID].contains(
Memory.BaseRegNum))
1885 if (!
Memory.OffsetImm)
return true;
1886 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1887 int64_t Val =
CE->getValue();
1891 if (Val == INT32_MIN)
1894 unsigned Divisor = 1U <<
Bits;
1897 if (Val % Divisor != 0)
1902 return (Val >= -127 && Val <= 127);
1907 template <
int shift>
bool isMemRegRQOffset()
const {
1908 if (!isMVEMem() ||
Memory.OffsetImm !=
nullptr ||
Memory.Alignment != 0)
1911 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1914 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1928 template <
int shift>
bool isMemRegQOffset()
const {
1929 if (!isMVEMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1932 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1938 static_assert(shift < 56,
1939 "Such that we dont shift by a value higher than 62");
1940 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1941 int64_t Val =
CE->getValue();
1944 if ((Val & ((1U << shift) - 1)) != 0)
1950 int64_t
Range = (1U << (7 + shift)) - 1;
1951 return (Val == INT32_MIN) || (Val > -
Range && Val <
Range);
1956 bool isMemPosImm8Offset()
const {
1957 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1960 if (!
Memory.OffsetImm)
return true;
1961 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1962 int64_t Val =
CE->getValue();
1963 return Val >= 0 && Val < 256;
1968 bool isMemNegImm8Offset()
const {
1969 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1972 if (
Memory.BaseRegNum == ARM::PC)
return false;
1974 if (!
Memory.OffsetImm)
return false;
1975 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1976 int64_t Val =
CE->getValue();
1977 return (Val == std::numeric_limits<int32_t>::min()) ||
1978 (Val > -256 && Val < 0);
1983 bool isMemUImm12Offset()
const {
1984 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1987 if (!
Memory.OffsetImm)
return true;
1988 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1989 int64_t Val =
CE->getValue();
1990 return (Val >= 0 && Val < 4096);
1995 bool isMemImm12Offset()
const {
2000 if (
isImm() && !isa<MCConstantExpr>(getImm()))
2003 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
2006 if (!
Memory.OffsetImm)
return true;
2007 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
2008 int64_t Val =
CE->getValue();
2009 return (Val > -4096 && Val < 4096) ||
2010 (Val == std::numeric_limits<int32_t>::min());
2017 bool isConstPoolAsmImm()
const {
2020 return (isConstantPoolImm());
2023 bool isPostIdxImm8()
const {
2024 if (!
isImm())
return false;
2026 if (!CE)
return false;
2027 int64_t Val =
CE->getValue();
2028 return (Val > -256 && Val < 256) ||
2029 (Val == std::numeric_limits<int32_t>::min());
2032 bool isPostIdxImm8s4()
const {
2033 if (!
isImm())
return false;
2035 if (!CE)
return false;
2036 int64_t Val =
CE->getValue();
2037 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2038 (Val == std::numeric_limits<int32_t>::min());
2041 bool isMSRMask()
const {
return Kind == k_MSRMask; }
2042 bool isBankedReg()
const {
return Kind == k_BankedReg; }
2043 bool isProcIFlags()
const {
return Kind == k_ProcIFlags; }
2046 bool isVectorList()
const {
return Kind == k_VectorList; }
2048 bool isSingleSpacedVectorList()
const {
2049 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2052 bool isDoubleSpacedVectorList()
const {
2053 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2056 bool isVecListOneD()
const {
2057 if (!isSingleSpacedVectorList())
return false;
2058 return VectorList.Count == 1;
2061 bool isVecListTwoMQ()
const {
2062 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2063 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2067 bool isVecListDPair()
const {
2068 if (!isSingleSpacedVectorList())
return false;
2069 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2073 bool isVecListThreeD()
const {
2074 if (!isSingleSpacedVectorList())
return false;
2075 return VectorList.Count == 3;
2078 bool isVecListFourD()
const {
2079 if (!isSingleSpacedVectorList())
return false;
2080 return VectorList.Count == 4;
2083 bool isVecListDPairSpaced()
const {
2084 if (Kind != k_VectorList)
return false;
2085 if (isSingleSpacedVectorList())
return false;
2086 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2090 bool isVecListThreeQ()
const {
2091 if (!isDoubleSpacedVectorList())
return false;
2092 return VectorList.Count == 3;
2095 bool isVecListFourQ()
const {
2096 if (!isDoubleSpacedVectorList())
return false;
2097 return VectorList.Count == 4;
2100 bool isVecListFourMQ()
const {
2101 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2102 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2106 bool isSingleSpacedVectorAllLanes()
const {
2107 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2110 bool isDoubleSpacedVectorAllLanes()
const {
2111 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2114 bool isVecListOneDAllLanes()
const {
2115 if (!isSingleSpacedVectorAllLanes())
return false;
2116 return VectorList.Count == 1;
2119 bool isVecListDPairAllLanes()
const {
2120 if (!isSingleSpacedVectorAllLanes())
return false;
2121 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2125 bool isVecListDPairSpacedAllLanes()
const {
2126 if (!isDoubleSpacedVectorAllLanes())
return false;
2127 return VectorList.Count == 2;
2130 bool isVecListThreeDAllLanes()
const {
2131 if (!isSingleSpacedVectorAllLanes())
return false;
2132 return VectorList.Count == 3;
2135 bool isVecListThreeQAllLanes()
const {
2136 if (!isDoubleSpacedVectorAllLanes())
return false;
2137 return VectorList.Count == 3;
2140 bool isVecListFourDAllLanes()
const {
2141 if (!isSingleSpacedVectorAllLanes())
return false;
2142 return VectorList.Count == 4;
2145 bool isVecListFourQAllLanes()
const {
2146 if (!isDoubleSpacedVectorAllLanes())
return false;
2147 return VectorList.Count == 4;
2150 bool isSingleSpacedVectorIndexed()
const {
2151 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2154 bool isDoubleSpacedVectorIndexed()
const {
2155 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2158 bool isVecListOneDByteIndexed()
const {
2159 if (!isSingleSpacedVectorIndexed())
return false;
2160 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2163 bool isVecListOneDHWordIndexed()
const {
2164 if (!isSingleSpacedVectorIndexed())
return false;
2165 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2168 bool isVecListOneDWordIndexed()
const {
2169 if (!isSingleSpacedVectorIndexed())
return false;
2170 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2173 bool isVecListTwoDByteIndexed()
const {
2174 if (!isSingleSpacedVectorIndexed())
return false;
2175 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2178 bool isVecListTwoDHWordIndexed()
const {
2179 if (!isSingleSpacedVectorIndexed())
return false;
2180 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2183 bool isVecListTwoQWordIndexed()
const {
2184 if (!isDoubleSpacedVectorIndexed())
return false;
2185 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2188 bool isVecListTwoQHWordIndexed()
const {
2189 if (!isDoubleSpacedVectorIndexed())
return false;
2190 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2193 bool isVecListTwoDWordIndexed()
const {
2194 if (!isSingleSpacedVectorIndexed())
return false;
2195 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2198 bool isVecListThreeDByteIndexed()
const {
2199 if (!isSingleSpacedVectorIndexed())
return false;
2200 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2203 bool isVecListThreeDHWordIndexed()
const {
2204 if (!isSingleSpacedVectorIndexed())
return false;
2205 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2208 bool isVecListThreeQWordIndexed()
const {
2209 if (!isDoubleSpacedVectorIndexed())
return false;
2210 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2213 bool isVecListThreeQHWordIndexed()
const {
2214 if (!isDoubleSpacedVectorIndexed())
return false;
2215 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2218 bool isVecListThreeDWordIndexed()
const {
2219 if (!isSingleSpacedVectorIndexed())
return false;
2220 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2223 bool isVecListFourDByteIndexed()
const {
2224 if (!isSingleSpacedVectorIndexed())
return false;
2225 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2228 bool isVecListFourDHWordIndexed()
const {
2229 if (!isSingleSpacedVectorIndexed())
return false;
2230 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2233 bool isVecListFourQWordIndexed()
const {
2234 if (!isDoubleSpacedVectorIndexed())
return false;
2235 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2238 bool isVecListFourQHWordIndexed()
const {
2239 if (!isDoubleSpacedVectorIndexed())
return false;
2240 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2243 bool isVecListFourDWordIndexed()
const {
2244 if (!isSingleSpacedVectorIndexed())
return false;
2245 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2248 bool isVectorIndex()
const {
return Kind == k_VectorIndex; }
2250 template <
unsigned NumLanes>
2251 bool isVectorIndexInRange()
const {
2252 if (Kind != k_VectorIndex)
return false;
2253 return VectorIndex.Val < NumLanes;
2256 bool isVectorIndex8()
const {
return isVectorIndexInRange<8>(); }
2257 bool isVectorIndex16()
const {
return isVectorIndexInRange<4>(); }
2258 bool isVectorIndex32()
const {
return isVectorIndexInRange<2>(); }
2259 bool isVectorIndex64()
const {
return isVectorIndexInRange<1>(); }
2261 template<
int PermittedValue,
int OtherPermittedValue>
2262 bool isMVEPairVectorIndex()
const {
2263 if (Kind != k_VectorIndex)
return false;
2264 return VectorIndex.Val == PermittedValue ||
2265 VectorIndex.Val == OtherPermittedValue;
2268 bool isNEONi8splat()
const {
2269 if (!
isImm())
return false;
2272 if (!CE)
return false;
2273 int64_t
Value =
CE->getValue();
2280 if (isNEONByteReplicate(2))
2286 if (!CE)
return false;
2287 unsigned Value =
CE->getValue();
2291 bool isNEONi16splatNot()
const {
2296 if (!CE)
return false;
2297 unsigned Value =
CE->getValue();
2302 if (isNEONByteReplicate(4))
2308 if (!CE)
return false;
2309 unsigned Value =
CE->getValue();
2313 bool isNEONi32splatNot()
const {
2318 if (!CE)
return false;
2319 unsigned Value =
CE->getValue();
2323 static bool isValidNEONi32vmovImm(int64_t
Value) {
2326 return ((
Value & 0xffffffffffffff00) == 0) ||
2327 ((
Value & 0xffffffffffff00ff) == 0) ||
2328 ((
Value & 0xffffffffff00ffff) == 0) ||
2329 ((
Value & 0xffffffff00ffffff) == 0) ||
2330 ((
Value & 0xffffffffffff00ff) == 0xff) ||
2331 ((
Value & 0xffffffffff00ffff) == 0xffff);
2334 bool isNEONReplicate(
unsigned Width,
unsigned NumElems,
bool Inv)
const {
2335 assert((Width == 8 || Width == 16 || Width == 32) &&
2336 "Invalid element width");
2337 assert(NumElems * Width <= 64 &&
"Invalid result width");
2345 int64_t
Value =
CE->getValue();
2353 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2355 if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2358 for (
unsigned i = 1; i < NumElems; ++i) {
2360 if ((
Value & Mask) != Elem)
2366 bool isNEONByteReplicate(
unsigned NumBytes)
const {
2367 return isNEONReplicate(8, NumBytes,
false);
2370 static void checkNeonReplicateArgs(
unsigned FromW,
unsigned ToW) {
2371 assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2372 "Invalid source width");
2373 assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2374 "Invalid destination width");
2375 assert(FromW < ToW &&
"ToW is not less than FromW");
2378 template<
unsigned FromW,
unsigned ToW>
2379 bool isNEONmovReplicate()
const {
2380 checkNeonReplicateArgs(FromW, ToW);
2381 if (ToW == 64 && isNEONi64splat())
2383 return isNEONReplicate(FromW, ToW / FromW,
false);
2386 template<
unsigned FromW,
unsigned ToW>
2387 bool isNEONinvReplicate()
const {
2388 checkNeonReplicateArgs(FromW, ToW);
2389 return isNEONReplicate(FromW, ToW / FromW,
true);
2392 bool isNEONi32vmov()
const {
2393 if (isNEONByteReplicate(4))
2401 return isValidNEONi32vmovImm(
CE->getValue());
2404 bool isNEONi32vmovNeg()
const {
2405 if (!
isImm())
return false;
2408 if (!CE)
return false;
2409 return isValidNEONi32vmovImm(~
CE->getValue());
2412 bool isNEONi64splat()
const {
2413 if (!
isImm())
return false;
2416 if (!CE)
return false;
2419 for (
unsigned i = 0; i < 8; ++i, Value >>= 8)
2420 if ((
Value & 0xff) != 0 && (
Value & 0xff) != 0xff)
return false;
2424 template<
int64_t Angle,
int64_t Remainder>
2425 bool isComplexRotation()
const {
2426 if (!
isImm())
return false;
2429 if (!CE)
return false;
2432 return (
Value % Angle == Remainder &&
Value <= 270);
2435 bool isMVELongShift()
const {
2436 if (!
isImm())
return false;
2439 if (!CE)
return false;
2444 bool isMveSaturateOp()
const {
2445 if (!
isImm())
return false;
2447 if (!CE)
return false;
2452 bool isITCondCodeNoAL()
const {
2453 if (!isITCondCode())
return false;
2458 bool isITCondCodeRestrictedI()
const {
2459 if (!isITCondCode())
2465 bool isITCondCodeRestrictedS()
const {
2466 if (!isITCondCode())
2473 bool isITCondCodeRestrictedU()
const {
2474 if (!isITCondCode())
2480 bool isITCondCodeRestrictedFP()
const {
2481 if (!isITCondCode())
2488 void setVecListDPair(
unsigned int DPair) {
2489 Kind = k_VectorList;
2490 VectorList.RegNum = DPair;
2491 VectorList.Count = 2;
2492 VectorList.isDoubleSpaced =
false;
2495 void setVecListOneD(
unsigned int DReg) {
2496 Kind = k_VectorList;
2497 VectorList.RegNum =
DReg;
2498 VectorList.Count = 1;
2499 VectorList.isDoubleSpaced =
false;
2506 else if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2512 void addARMBranchTargetOperands(
MCInst &Inst,
unsigned N)
const {
2513 assert(
N == 1 &&
"Invalid number of operands!");
2514 addExpr(Inst, getImm());
2517 void addThumbBranchTargetOperands(
MCInst &Inst,
unsigned N)
const {
2518 assert(
N == 1 &&
"Invalid number of operands!");
2519 addExpr(Inst, getImm());
2522 void addCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
2523 assert(
N == 2 &&
"Invalid number of operands!");
2529 void addVPTPredNOperands(
MCInst &Inst,
unsigned N)
const {
2530 assert(
N == 3 &&
"Invalid number of operands!");
2532 unsigned RegNum = getVPTPred() ==
ARMVCC::None ? 0: ARM::P0;
2537 void addVPTPredROperands(
MCInst &Inst,
unsigned N)
const {
2538 assert(
N == 4 &&
"Invalid number of operands!");
2539 addVPTPredNOperands(Inst,
N-1);
2549 "Inactive register in vpred_r is not tied to an output!");
2555 void addCoprocNumOperands(
MCInst &Inst,
unsigned N)
const {
2556 assert(
N == 1 &&
"Invalid number of operands!");
2560 void addCoprocRegOperands(
MCInst &Inst,
unsigned N)
const {
2561 assert(
N == 1 &&
"Invalid number of operands!");
2565 void addCoprocOptionOperands(
MCInst &Inst,
unsigned N)
const {
2566 assert(
N == 1 &&
"Invalid number of operands!");
2570 void addITMaskOperands(
MCInst &Inst,
unsigned N)
const {
2571 assert(
N == 1 &&
"Invalid number of operands!");
2575 void addITCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
2576 assert(
N == 1 &&
"Invalid number of operands!");
2580 void addITCondCodeInvOperands(
MCInst &Inst,
unsigned N)
const {
2581 assert(
N == 1 &&
"Invalid number of operands!");
2585 void addCCOutOperands(
MCInst &Inst,
unsigned N)
const {
2586 assert(
N == 1 &&
"Invalid number of operands!");
2590 void addRegOperands(
MCInst &Inst,
unsigned N)
const {
2591 assert(
N == 1 &&
"Invalid number of operands!");
2595 void addRegShiftedRegOperands(
MCInst &Inst,
unsigned N)
const {
2596 assert(
N == 3 &&
"Invalid number of operands!");
2597 assert(isRegShiftedReg() &&
2598 "addRegShiftedRegOperands() on non-RegShiftedReg!");
2605 void addRegShiftedImmOperands(
MCInst &Inst,
unsigned N)
const {
2606 assert(
N == 2 &&
"Invalid number of operands!");
2607 assert(isRegShiftedImm() &&
2608 "addRegShiftedImmOperands() on non-RegShiftedImm!");
2611 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2616 void addShifterImmOperands(
MCInst &Inst,
unsigned N)
const {
2617 assert(
N == 1 &&
"Invalid number of operands!");
2622 void addRegListOperands(
MCInst &Inst,
unsigned N)
const {
2623 assert(
N == 1 &&
"Invalid number of operands!");
2625 for (
unsigned Reg : RegList)
2629 void addRegListWithAPSROperands(
MCInst &Inst,
unsigned N)
const {
2630 assert(
N == 1 &&
"Invalid number of operands!");
2632 for (
unsigned Reg : RegList)
2636 void addDPRRegListOperands(
MCInst &Inst,
unsigned N)
const {
2637 addRegListOperands(Inst,
N);
2640 void addSPRRegListOperands(
MCInst &Inst,
unsigned N)
const {
2641 addRegListOperands(Inst,
N);
2644 void addFPSRegListWithVPROperands(
MCInst &Inst,
unsigned N)
const {
2645 addRegListOperands(Inst,
N);
2648 void addFPDRegListWithVPROperands(
MCInst &Inst,
unsigned N)
const {
2649 addRegListOperands(Inst,
N);
2652 void addRotImmOperands(
MCInst &Inst,
unsigned N)
const {
2653 assert(
N == 1 &&
"Invalid number of operands!");
2658 void addModImmOperands(
MCInst &Inst,
unsigned N)
const {
2659 assert(
N == 1 &&
"Invalid number of operands!");
2663 return addImmOperands(Inst,
N);
2668 void addModImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2669 assert(
N == 1 &&
"Invalid number of operands!");
2675 void addModImmNegOperands(
MCInst &Inst,
unsigned N)
const {
2676 assert(
N == 1 &&
"Invalid number of operands!");
2682 void addThumbModImmNeg8_255Operands(
MCInst &Inst,
unsigned N)
const {
2683 assert(
N == 1 &&
"Invalid number of operands!");
2689 void addThumbModImmNeg1_7Operands(
MCInst &Inst,
unsigned N)
const {
2690 assert(
N == 1 &&
"Invalid number of operands!");
2696 void addBitfieldOperands(
MCInst &Inst,
unsigned N)
const {
2697 assert(
N == 1 &&
"Invalid number of operands!");
2703 (32 - (lsb + width)));
2707 void addImmOperands(
MCInst &Inst,
unsigned N)
const {
2708 assert(
N == 1 &&
"Invalid number of operands!");
2709 addExpr(Inst, getImm());
2712 void addFBits16Operands(
MCInst &Inst,
unsigned N)
const {
2713 assert(
N == 1 &&
"Invalid number of operands!");
2718 void addFBits32Operands(
MCInst &Inst,
unsigned N)
const {
2719 assert(
N == 1 &&
"Invalid number of operands!");
2724 void addFPImmOperands(
MCInst &Inst,
unsigned N)
const {
2725 assert(
N == 1 &&
"Invalid number of operands!");
2731 void addImm8s4Operands(
MCInst &Inst,
unsigned N)
const {
2732 assert(
N == 1 &&
"Invalid number of operands!");
2739 void addImm7s4Operands(
MCInst &Inst,
unsigned N)
const {
2740 assert(
N == 1 &&
"Invalid number of operands!");
2747 void addImm7Shift0Operands(
MCInst &Inst,
unsigned N)
const {
2748 assert(
N == 1 &&
"Invalid number of operands!");
2753 void addImm7Shift1Operands(
MCInst &Inst,
unsigned N)
const {
2754 assert(
N == 1 &&
"Invalid number of operands!");
2759 void addImm7Shift2Operands(
MCInst &Inst,
unsigned N)
const {
2760 assert(
N == 1 &&
"Invalid number of operands!");
2765 void addImm7Operands(
MCInst &Inst,
unsigned N)
const {
2766 assert(
N == 1 &&
"Invalid number of operands!");
2771 void addImm0_1020s4Operands(
MCInst &Inst,
unsigned N)
const {
2772 assert(
N == 1 &&
"Invalid number of operands!");
2779 void addImm0_508s4NegOperands(
MCInst &Inst,
unsigned N)
const {
2780 assert(
N == 1 &&
"Invalid number of operands!");
2787 void addImm0_508s4Operands(
MCInst &Inst,
unsigned N)
const {
2788 assert(
N == 1 &&
"Invalid number of operands!");
2795 void addImm1_16Operands(
MCInst &Inst,
unsigned N)
const {
2796 assert(
N == 1 &&
"Invalid number of operands!");
2803 void addImm1_32Operands(
MCInst &Inst,
unsigned N)
const {
2804 assert(
N == 1 &&
"Invalid number of operands!");
2811 void addImmThumbSROperands(
MCInst &Inst,
unsigned N)
const {
2812 assert(
N == 1 &&
"Invalid number of operands!");
2816 unsigned Imm =
CE->getValue();
2820 void addPKHASRImmOperands(
MCInst &Inst,
unsigned N)
const {
2821 assert(
N == 1 &&
"Invalid number of operands!");
2825 int Val =
CE->getValue();
2829 void addT2SOImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2830 assert(
N == 1 &&
"Invalid number of operands!");
2837 void addT2SOImmNegOperands(
MCInst &Inst,
unsigned N)
const {
2838 assert(
N == 1 &&
"Invalid number of operands!");
2845 void addImm0_4095NegOperands(
MCInst &Inst,
unsigned N)
const {
2846 assert(
N == 1 &&
"Invalid number of operands!");
2853 void addUnsignedOffset_b8s2Operands(
MCInst &Inst,
unsigned N)
const {
2854 if(
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2862 void addThumbMemPCOperands(
MCInst &Inst,
unsigned N)
const {
2863 assert(
N == 1 &&
"Invalid number of operands!");
2875 assert(isGPRMem() &&
"Unknown value type!");
2876 assert(isa<MCConstantExpr>(
Memory.OffsetImm) &&
"Unknown value type!");
2877 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
2883 void addMemBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2884 assert(
N == 1 &&
"Invalid number of operands!");
2888 void addInstSyncBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2889 assert(
N == 1 &&
"Invalid number of operands!");
2893 void addTraceSyncBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2894 assert(
N == 1 &&
"Invalid number of operands!");
2898 void addMemNoOffsetOperands(
MCInst &Inst,
unsigned N)
const {
2899 assert(
N == 1 &&
"Invalid number of operands!");
2903 void addMemNoOffsetT2Operands(
MCInst &Inst,
unsigned N)
const {
2904 assert(
N == 1 &&
"Invalid number of operands!");
2908 void addMemNoOffsetT2NoSpOperands(
MCInst &Inst,
unsigned N)
const {
2909 assert(
N == 1 &&
"Invalid number of operands!");
2913 void addMemNoOffsetTOperands(
MCInst &Inst,
unsigned N)
const {
2914 assert(
N == 1 &&
"Invalid number of operands!");
2918 void addMemPCRelImm12Operands(
MCInst &Inst,
unsigned N)
const {
2919 assert(
N == 1 &&
"Invalid number of operands!");
2920 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
2926 void addAdrLabelOperands(
MCInst &Inst,
unsigned N)
const {
2927 assert(
N == 1 &&
"Invalid number of operands!");
2932 if (!isa<MCConstantExpr>(getImm())) {
2938 int Val =
CE->getValue();
2942 void addAlignedMemoryOperands(
MCInst &Inst,
unsigned N)
const {
2943 assert(
N == 2 &&
"Invalid number of operands!");
2948 void addDupAlignedMemoryNoneOperands(
MCInst &Inst,
unsigned N)
const {
2949 addAlignedMemoryOperands(Inst,
N);
2952 void addAlignedMemoryNoneOperands(
MCInst &Inst,
unsigned N)
const {
2953 addAlignedMemoryOperands(Inst,
N);
2956 void addAlignedMemory16Operands(
MCInst &Inst,
unsigned N)
const {
2957 addAlignedMemoryOperands(Inst,
N);
2960 void addDupAlignedMemory16Operands(
MCInst &Inst,
unsigned N)
const {
2961 addAlignedMemoryOperands(Inst,
N);
2964 void addAlignedMemory32Operands(
MCInst &Inst,
unsigned N)
const {
2965 addAlignedMemoryOperands(Inst,
N);
2968 void addDupAlignedMemory32Operands(
MCInst &Inst,
unsigned N)
const {
2969 addAlignedMemoryOperands(Inst,
N);
2972 void addAlignedMemory64Operands(
MCInst &Inst,
unsigned N)
const {
2973 addAlignedMemoryOperands(Inst,
N);
2976 void addDupAlignedMemory64Operands(
MCInst &Inst,
unsigned N)
const {
2977 addAlignedMemoryOperands(Inst,
N);
2980 void addAlignedMemory64or128Operands(
MCInst &Inst,
unsigned N)
const {
2981 addAlignedMemoryOperands(Inst,
N);
2984 void addDupAlignedMemory64or128Operands(
MCInst &Inst,
unsigned N)
const {
2985 addAlignedMemoryOperands(Inst,
N);
2988 void addAlignedMemory64or128or256Operands(
MCInst &Inst,
unsigned N)
const {
2989 addAlignedMemoryOperands(Inst,
N);
2992 void addAddrMode2Operands(
MCInst &Inst,
unsigned N)
const {
2993 assert(
N == 3 &&
"Invalid number of operands!");
2996 if (!
Memory.OffsetRegNum) {
2999 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3000 int32_t Val =
CE->getValue();
3003 if (Val == std::numeric_limits<int32_t>::min())
3021 void addAM2OffsetImmOperands(
MCInst &Inst,
unsigned N)
const {
3022 assert(
N == 2 &&
"Invalid number of operands!");
3024 assert(CE &&
"non-constant AM2OffsetImm operand!");
3025 int32_t Val =
CE->getValue();
3028 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3029 if (Val < 0) Val = -Val;
3035 void addAddrMode3Operands(
MCInst &Inst,
unsigned N)
const {
3036 assert(
N == 3 &&
"Invalid number of operands!");
3049 if (!
Memory.OffsetRegNum) {
3052 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3053 int32_t Val =
CE->getValue();
3056 if (Val == std::numeric_limits<int32_t>::min())
3073 void addAM3OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3074 assert(
N == 2 &&
"Invalid number of operands!");
3075 if (Kind == k_PostIndexRegister) {
3085 int32_t Val =
CE->getValue();
3088 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3089 if (Val < 0) Val = -Val;
3095 void addAddrMode5Operands(
MCInst &Inst,
unsigned N)
const {
3096 assert(
N == 2 &&
"Invalid number of operands!");
3109 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3111 int32_t Val =
CE->getValue() / 4;
3114 if (Val == std::numeric_limits<int32_t>::min())
3124 void addAddrMode5FP16Operands(
MCInst &Inst,
unsigned N)
const {
3125 assert(
N == 2 &&
"Invalid number of operands!");
3139 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3140 int32_t Val =
CE->getValue() / 2;
3143 if (Val == std::numeric_limits<int32_t>::min())
3153 void addMemImm8s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3154 assert(
N == 2 &&
"Invalid number of operands!");
3165 addExpr(Inst,
Memory.OffsetImm);
3168 void addMemImm7s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3169 assert(
N == 2 &&
"Invalid number of operands!");
3180 addExpr(Inst,
Memory.OffsetImm);
3183 void addMemImm0_1020s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3184 assert(
N == 2 &&
"Invalid number of operands!");
3188 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3195 void addMemImmOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3196 assert(
N == 2 &&
"Invalid number of operands!");
3198 addExpr(Inst,
Memory.OffsetImm);
3201 void addMemRegRQOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3202 assert(
N == 2 &&
"Invalid number of operands!");
3207 void addMemUImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3208 assert(
N == 2 &&
"Invalid number of operands!");
3211 addExpr(Inst, getImm());
3218 addExpr(Inst,
Memory.OffsetImm);
3221 void addMemImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3222 assert(
N == 2 &&
"Invalid number of operands!");
3225 addExpr(Inst, getImm());
3232 addExpr(Inst,
Memory.OffsetImm);
3235 void addConstPoolAsmImmOperands(
MCInst &Inst,
unsigned N)
const {
3236 assert(
N == 1 &&
"Invalid number of operands!");
3239 addExpr(Inst, getConstantPoolImm());
3242 void addMemTBBOperands(
MCInst &Inst,
unsigned N)
const {
3243 assert(
N == 2 &&
"Invalid number of operands!");
3248 void addMemTBHOperands(
MCInst &Inst,
unsigned N)
const {
3249 assert(
N == 2 &&
"Invalid number of operands!");
3254 void addMemRegOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3255 assert(
N == 3 &&
"Invalid number of operands!");
3264 void addT2MemRegOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3265 assert(
N == 3 &&
"Invalid number of operands!");
3271 void addMemThumbRROperands(
MCInst &Inst,
unsigned N)
const {
3272 assert(
N == 2 &&
"Invalid number of operands!");
3277 void addMemThumbRIs4Operands(
MCInst &Inst,
unsigned N)
const {
3278 assert(
N == 2 &&
"Invalid number of operands!");
3282 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3289 void addMemThumbRIs2Operands(
MCInst &Inst,
unsigned N)
const {
3290 assert(
N == 2 &&
"Invalid number of operands!");
3294 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3300 void addMemThumbRIs1Operands(
MCInst &Inst,
unsigned N)
const {
3301 assert(
N == 2 &&
"Invalid number of operands!");
3303 addExpr(Inst,
Memory.OffsetImm);
3306 void addMemThumbSPIOperands(
MCInst &Inst,
unsigned N)
const {
3307 assert(
N == 2 &&
"Invalid number of operands!");
3311 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3318 void addPostIdxImm8Operands(
MCInst &Inst,
unsigned N)
const {
3319 assert(
N == 1 &&
"Invalid number of operands!");
3321 assert(CE &&
"non-constant post-idx-imm8 operand!");
3322 int Imm =
CE->getValue();
3323 bool isAdd =
Imm >= 0;
3324 if (Imm == std::numeric_limits<int32_t>::min())
Imm = 0;
3329 void addPostIdxImm8s4Operands(
MCInst &Inst,
unsigned N)
const {
3330 assert(
N == 1 &&
"Invalid number of operands!");
3332 assert(CE &&
"non-constant post-idx-imm8s4 operand!");
3333 int Imm =
CE->getValue();
3334 bool isAdd =
Imm >= 0;
3335 if (Imm == std::numeric_limits<int32_t>::min())
Imm = 0;
3341 void addPostIdxRegOperands(
MCInst &Inst,
unsigned N)
const {
3342 assert(
N == 2 &&
"Invalid number of operands!");
3347 void addPostIdxRegShiftedOperands(
MCInst &Inst,
unsigned N)
const {
3348 assert(
N == 2 &&
"Invalid number of operands!");
3354 PostIdxReg.ShiftTy);
3358 void addPowerTwoOperands(
MCInst &Inst,
unsigned N)
const {
3359 assert(
N == 1 &&
"Invalid number of operands!");
3364 void addMSRMaskOperands(
MCInst &Inst,
unsigned N)
const {
3365 assert(
N == 1 &&
"Invalid number of operands!");
3369 void addBankedRegOperands(
MCInst &Inst,
unsigned N)
const {
3370 assert(
N == 1 &&
"Invalid number of operands!");
3374 void addProcIFlagsOperands(
MCInst &Inst,
unsigned N)
const {
3375 assert(
N == 1 &&
"Invalid number of operands!");
3379 void addVecListOperands(
MCInst &Inst,
unsigned N)
const {
3380 assert(
N == 1 &&
"Invalid number of operands!");
3384 void addMVEVecListOperands(
MCInst &Inst,
unsigned N)
const {
3385 assert(
N == 1 &&
"Invalid number of operands!");
3401 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3403 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3404 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3407 for (
I = 0;
I <
E;
I++)
3410 assert(
I <
E &&
"Invalid vector list start register!");
3415 void addVecListIndexedOperands(
MCInst &Inst,
unsigned N)
const {
3416 assert(
N == 2 &&
"Invalid number of operands!");
3421 void addVectorIndex8Operands(
MCInst &Inst,
unsigned N)
const {
3422 assert(
N == 1 &&
"Invalid number of operands!");
3426 void addVectorIndex16Operands(
MCInst &Inst,
unsigned N)
const {
3427 assert(
N == 1 &&
"Invalid number of operands!");
3431 void addVectorIndex32Operands(
MCInst &Inst,
unsigned N)
const {
3432 assert(
N == 1 &&
"Invalid number of operands!");
3436 void addVectorIndex64Operands(
MCInst &Inst,
unsigned N)
const {
3437 assert(
N == 1 &&
"Invalid number of operands!");
3441 void addMVEVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
3442 assert(
N == 1 &&
"Invalid number of operands!");
3446 void addMVEPairVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
3447 assert(
N == 1 &&
"Invalid number of operands!");
3451 void addNEONi8splatOperands(
MCInst &Inst,
unsigned N)
const {
3452 assert(
N == 1 &&
"Invalid number of operands!");
3459 void addNEONi16splatOperands(
MCInst &Inst,
unsigned N)
const {
3460 assert(
N == 1 &&
"Invalid number of operands!");
3463 unsigned Value =
CE->getValue();
3468 void addNEONi16splatNotOperands(
MCInst &Inst,
unsigned N)
const {
3469 assert(
N == 1 &&
"Invalid number of operands!");
3472 unsigned Value =
CE->getValue();
3477 void addNEONi32splatOperands(
MCInst &Inst,
unsigned N)
const {
3478 assert(
N == 1 &&
"Invalid number of operands!");
3481 unsigned Value =
CE->getValue();
3486 void addNEONi32splatNotOperands(
MCInst &Inst,
unsigned N)
const {
3487 assert(
N == 1 &&
"Invalid number of operands!");
3490 unsigned Value =
CE->getValue();
3495 void addNEONi8ReplicateOperands(
MCInst &Inst,
bool Inv)
const {
3500 "All instructions that wants to replicate non-zero byte "
3501 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3502 unsigned Value =
CE->getValue();
3505 unsigned B =
Value & 0xff;
3510 void addNEONinvi8ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3511 assert(
N == 1 &&
"Invalid number of operands!");
3512 addNEONi8ReplicateOperands(Inst,
true);
3515 static unsigned encodeNeonVMOVImmediate(
unsigned Value) {
3518 else if (
Value > 0xffff &&
Value <= 0xffffff)
3520 else if (
Value > 0xffffff)
3525 void addNEONi32vmovOperands(
MCInst &Inst,
unsigned N)
const {
3526 assert(
N == 1 &&
"Invalid number of operands!");
3529 unsigned Value = encodeNeonVMOVImmediate(
CE->getValue());
3533 void addNEONvmovi8ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3534 assert(
N == 1 &&
"Invalid number of operands!");
3535 addNEONi8ReplicateOperands(Inst,
false);
3538 void addNEONvmovi16ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3539 assert(
N == 1 &&
"Invalid number of operands!");
3545 "All instructions that want to replicate non-zero half-word "
3546 "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3548 unsigned Elem =
Value & 0xffff;
3550 Elem = (Elem >> 8) | 0x200;
3554 void addNEONi32vmovNegOperands(
MCInst &Inst,
unsigned N)
const {
3555 assert(
N == 1 &&
"Invalid number of operands!");
3558 unsigned Value = encodeNeonVMOVImmediate(~
CE->getValue());
3562 void addNEONvmovi32ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3563 assert(
N == 1 &&
"Invalid number of operands!");
3569 "All instructions that want to replicate non-zero word "
3570 "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3572 unsigned Elem = encodeNeonVMOVImmediate(
Value & 0xffffffff);
3576 void addNEONi64splatOperands(
MCInst &Inst,
unsigned N)
const {
3577 assert(
N == 1 &&
"Invalid number of operands!");
3582 for (
unsigned i = 0; i < 8; ++i, Value >>= 8) {
3588 void addComplexRotationEvenOperands(
MCInst &Inst,
unsigned N)
const {
3589 assert(
N == 1 &&
"Invalid number of operands!");
3594 void addComplexRotationOddOperands(
MCInst &Inst,
unsigned N)
const {
3595 assert(
N == 1 &&
"Invalid number of operands!");
3600 void addMveSaturateOperands(
MCInst &Inst,
unsigned N)
const {
3601 assert(
N == 1 &&
"Invalid number of operands!");
3603 unsigned Imm =
CE->getValue();
3604 assert((Imm == 48 || Imm == 64) &&
"Invalid saturate operand");
3610 static std::unique_ptr<ARMOperand> CreateITMask(
unsigned Mask,
SMLoc S) {
3611 auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3620 auto Op = std::make_unique<ARMOperand>(k_CondCode);
3629 auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3636 static std::unique_ptr<ARMOperand> CreateCoprocNum(
unsigned CopVal,
SMLoc S) {
3637 auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3638 Op->Cop.Val = CopVal;
3644 static std::unique_ptr<ARMOperand> CreateCoprocReg(
unsigned CopVal,
SMLoc S) {
3645 auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3646 Op->Cop.Val = CopVal;
3652 static std::unique_ptr<ARMOperand> CreateCoprocOption(
unsigned Val,
SMLoc S,
3654 auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3661 static std::unique_ptr<ARMOperand> CreateCCOut(
unsigned RegNum,
SMLoc S) {
3662 auto Op = std::make_unique<ARMOperand>(k_CCOut);
3663 Op->Reg.RegNum = RegNum;
3669 static std::unique_ptr<ARMOperand> CreateToken(
StringRef Str,
SMLoc S) {
3670 auto Op = std::make_unique<ARMOperand>(k_Token);
3671 Op->Tok.Data = Str.data();
3672 Op->Tok.Length = Str.size();
3678 static std::unique_ptr<ARMOperand> CreateReg(
unsigned RegNum,
SMLoc S,
3680 auto Op = std::make_unique<ARMOperand>(k_Register);
3681 Op->Reg.RegNum = RegNum;
3687 static std::unique_ptr<ARMOperand>
3689 unsigned ShiftReg,
unsigned ShiftImm,
SMLoc S,
3691 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3692 Op->RegShiftedReg.ShiftTy = ShTy;
3693 Op->RegShiftedReg.SrcReg = SrcReg;
3694 Op->RegShiftedReg.ShiftReg = ShiftReg;
3695 Op->RegShiftedReg.ShiftImm = ShiftImm;
3701 static std::unique_ptr<ARMOperand>
3704 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3705 Op->RegShiftedImm.ShiftTy = ShTy;
3706 Op->RegShiftedImm.SrcReg = SrcReg;
3707 Op->RegShiftedImm.ShiftImm = ShiftImm;
3713 static std::unique_ptr<ARMOperand> CreateShifterImm(
bool isASR,
unsigned Imm,
3715 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3716 Op->ShifterImm.isASR = isASR;
3717 Op->ShifterImm.Imm =
Imm;
3723 static std::unique_ptr<ARMOperand> CreateRotImm(
unsigned Imm,
SMLoc S,
3725 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3726 Op->RotImm.Imm =
Imm;
3732 static std::unique_ptr<ARMOperand> CreateModImm(
unsigned Bits,
unsigned Rot,
3734 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3736 Op->ModImm.Rot = Rot;
3742 static std::unique_ptr<ARMOperand>
3744 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3751 static std::unique_ptr<ARMOperand>
3752 CreateBitfield(
unsigned LSB,
unsigned Width,
SMLoc S,
SMLoc E) {
3753 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3754 Op->Bitfield.LSB = LSB;
3755 Op->Bitfield.Width = Width;
3761 static std::unique_ptr<ARMOperand>
3764 assert(Regs.size() > 0 &&
"RegList contains no registers?");
3765 KindTy
Kind = k_RegisterList;
3767 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
3768 Regs.front().second)) {
3769 if (Regs.back().second == ARM::VPR)
3770 Kind = k_FPDRegisterListWithVPR;
3772 Kind = k_DPRRegisterList;
3773 }
else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
3774 Regs.front().second)) {
3775 if (Regs.back().second == ARM::VPR)
3776 Kind = k_FPSRegisterListWithVPR;
3778 Kind = k_SPRRegisterList;
3781 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3782 Kind = k_RegisterListWithAPSR;
3786 auto Op = std::make_unique<ARMOperand>(Kind);
3787 for (
const auto &
P : Regs)
3788 Op->Registers.push_back(
P.second);
3790 Op->StartLoc = StartLoc;
3791 Op->EndLoc = EndLoc;
3795 static std::unique_ptr<ARMOperand> CreateVectorList(
unsigned RegNum,
3797 bool isDoubleSpaced,
3799 auto Op = std::make_unique<ARMOperand>(k_VectorList);
3800 Op->VectorList.RegNum = RegNum;
3801 Op->VectorList.Count = Count;
3802 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3808 static std::unique_ptr<ARMOperand>
3809 CreateVectorListAllLanes(
unsigned RegNum,
unsigned Count,
bool isDoubleSpaced,
3811 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3812 Op->VectorList.RegNum = RegNum;
3813 Op->VectorList.Count = Count;
3814 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3820 static std::unique_ptr<ARMOperand>
3821 CreateVectorListIndexed(
unsigned RegNum,
unsigned Count,
unsigned Index,
3823 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3824 Op->VectorList.RegNum = RegNum;
3825 Op->VectorList.Count = Count;
3826 Op->VectorList.LaneIndex =
Index;
3827 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3833 static std::unique_ptr<ARMOperand>
3835 auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3836 Op->VectorIndex.Val =
Idx;
3842 static std::unique_ptr<ARMOperand> CreateImm(
const MCExpr *Val,
SMLoc S,
3844 auto Op = std::make_unique<ARMOperand>(k_Immediate);
3851 static std::unique_ptr<ARMOperand>
3852 CreateMem(
unsigned BaseRegNum,
const MCExpr *OffsetImm,
unsigned OffsetRegNum,
3855 auto Op = std::make_unique<ARMOperand>(k_Memory);
3856 Op->Memory.BaseRegNum = BaseRegNum;
3857 Op->Memory.OffsetImm = OffsetImm;
3858 Op->Memory.OffsetRegNum = OffsetRegNum;
3859 Op->Memory.ShiftType = ShiftType;
3860 Op->Memory.ShiftImm = ShiftImm;
3861 Op->Memory.Alignment = Alignment;
3862 Op->Memory.isNegative = isNegative;
3865 Op->AlignmentLoc = AlignmentLoc;
3869 static std::unique_ptr<ARMOperand>
3872 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3873 Op->PostIdxReg.RegNum = RegNum;
3874 Op->PostIdxReg.isAdd = isAdd;
3875 Op->PostIdxReg.ShiftTy = ShiftTy;
3876 Op->PostIdxReg.ShiftImm = ShiftImm;
3882 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(
ARM_MB::MemBOpt Opt,
3884 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3885 Op->MBOpt.Val = Opt;
3891 static std::unique_ptr<ARMOperand>
3893 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3894 Op->ISBOpt.Val = Opt;
3900 static std::unique_ptr<ARMOperand>
3902 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3903 Op->TSBOpt.Val = Opt;
3909 static std::unique_ptr<ARMOperand> CreateProcIFlags(
ARM_PROC::IFlags IFlags,
3911 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3918 static std::unique_ptr<ARMOperand> CreateMSRMask(
unsigned MMask,
SMLoc S) {
3919 auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3920 Op->MMask.Val = MMask;
3926 static std::unique_ptr<ARMOperand> CreateBankedReg(
unsigned Reg,
SMLoc S) {
3927 auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3928 Op->BankedReg.Val =
Reg;
3955 case k_ITCondMask: {
3956 static const char *
const MaskStr[] = {
3957 "(invalid)",
"(tttt)",
"(ttt)",
"(ttte)",
3958 "(tt)",
"(ttet)",
"(tte)",
"(ttee)",
3959 "(t)",
"(tett)",
"(tet)",
"(tete)",
3960 "(te)",
"(teet)",
"(tee)",
"(teee)",
3962 assert((ITMask.Mask & 0xf) == ITMask.Mask);
3963 OS <<
"<it-mask " << MaskStr[ITMask.Mask] <<
">";
3967 OS <<
"<coprocessor number: " << getCoproc() <<
">";
3970 OS <<
"<coprocessor register: " << getCoproc() <<
">";
3972 case k_CoprocOption:
3973 OS <<
"<coprocessor option: " << CoprocOption.Val <<
">";
3976 OS <<
"<mask: " << getMSRMask() <<
">";
3979 OS <<
"<banked reg: " << getBankedReg() <<
">";
3984 case k_MemBarrierOpt:
3985 OS <<
"<ARM_MB::" << MemBOptToString(getMemBarrierOpt(),
false) <<
">";
3987 case k_InstSyncBarrierOpt:
3988 OS <<
"<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) <<
">";
3990 case k_TraceSyncBarrierOpt:
3991 OS <<
"<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) <<
">";
3998 OS <<
" offset-imm:" << *
Memory.OffsetImm;
4000 OS <<
" offset-reg:" << (
Memory.isNegative ?
"-" :
"")
4004 OS <<
" shift-imm:" <<
Memory.ShiftImm;
4007 OS <<
" alignment:" <<
Memory.Alignment;
4010 case k_PostIndexRegister:
4011 OS <<
"post-idx register " << (PostIdxReg.isAdd ?
"" :
"-")
4012 <<
RegName(PostIdxReg.RegNum);
4015 << PostIdxReg.ShiftImm;
4018 case k_ProcIFlags: {
4019 OS <<
"<ARM_PROC::";
4020 unsigned IFlags = getProcIFlags();
4021 for (
int i=2; i >= 0; --i)
4022 if (IFlags & (1 << i))
4030 case k_ShifterImmediate:
4031 OS <<
"<shift " << (ShifterImm.isASR ?
"asr" :
"lsl")
4032 <<
" #" << ShifterImm.Imm <<
">";
4034 case k_ShiftedRegister:
4035 OS <<
"<so_reg_reg " <<
RegName(RegShiftedReg.SrcReg) <<
" "
4037 <<
RegName(RegShiftedReg.ShiftReg) <<
">";
4039 case k_ShiftedImmediate:
4040 OS <<
"<so_reg_imm " <<
RegName(RegShiftedImm.SrcReg) <<
" "
4042 << RegShiftedImm.ShiftImm <<
">";
4044 case k_RotateImmediate:
4045 OS <<
"<ror " <<
" #" << (RotImm.Imm * 8) <<
">";
4047 case k_ModifiedImmediate:
4048 OS <<
"<mod_imm #" << ModImm.Bits <<
", #"
4049 << ModImm.Rot <<
")>";
4051 case k_ConstantPoolImmediate:
4052 OS <<
"<constant_pool_imm #" << *getConstantPoolImm();
4054 case k_BitfieldDescriptor:
4055 OS <<
"<bitfield " <<
"lsb: " <<
Bitfield.LSB
4056 <<
", width: " <<
Bitfield.Width <<
">";
4058 case k_RegisterList:
4059 case k_RegisterListWithAPSR:
4060 case k_DPRRegisterList:
4061 case k_SPRRegisterList:
4062 case k_FPSRegisterListWithVPR:
4063 case k_FPDRegisterListWithVPR: {
4064 OS <<
"<register_list ";
4070 if (++
I <
E)
OS <<
", ";
4077 OS <<
"<vector_list " << VectorList.Count <<
" * "
4078 <<
RegName(VectorList.RegNum) <<
">";
4080 case k_VectorListAllLanes:
4081 OS <<
"<vector_list(all lanes) " << VectorList.Count <<
" * "
4082 <<
RegName(VectorList.RegNum) <<
">";
4084 case k_VectorListIndexed:
4085 OS <<
"<vector_list(lane " << VectorList.LaneIndex <<
") "
4086 << VectorList.Count <<
" * " <<
RegName(VectorList.RegNum) <<
">";
4089 OS <<
"'" << getToken() <<
"'";
4092 OS <<
"<vectorindex " << getVectorIndex() <<
">";
4106 ".8",
".16",
".32",
".64",
".i8",
".i16",
".i32",
".i64",
4107 ".u8",
".u16",
".u32",
".u64",
".s8",
".s16",
".s32",
".s64",
4108 ".p8",
".p16",
".f32",
".f64",
".f",
".d"};
4113 unsigned MnemonicOpsEndInd = 1;
4117 static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"cps") {
4119 static_cast<ARMOperand &
>(*
Operands[1]).getImm()->getKind() ==
4121 (dyn_cast<MCConstantExpr>(
4122 static_cast<ARMOperand &
>(*
Operands[1]).getImm())
4124 dyn_cast<MCConstantExpr>(
4125 static_cast<ARMOperand &
>(*
Operands[1]).getImm())
4127 ++MnemonicOpsEndInd;
4131 bool RHSCondCode =
false;
4132 while (MnemonicOpsEndInd <
Operands.size()) {
4133 auto Op =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
4135 if (
Op.isITMask()) {
4137 MnemonicOpsEndInd++;
4138 }
else if (
Op.isToken() &&
4142 Op.getToken() ==
".w" ||
Op.getToken() ==
".bf16" ||
4143 Op.getToken() ==
".p64" ||
Op.getToken() ==
".f16" ||
4149 MnemonicOpsEndInd++;
4152 else if (
Op.isCCOut() || (
Op.isCondCode() && !RHSCondCode) ||
4153 Op.isVPTPred() || (
Op.isToken() &&
Op.getToken() ==
".w"))
4154 MnemonicOpsEndInd++;
4158 return MnemonicOpsEndInd;
4163 const AsmToken &Tok = getParser().getTok();
4166 Reg = tryParseRegister();
4173 if (parseRegister(
Reg, StartLoc, EndLoc))
4181int ARMAsmParser::tryParseRegister(
bool AllowOutOfBoundReg) {
4190 .
Case(
"r13", ARM::SP)
4191 .
Case(
"r14", ARM::LR)
4192 .
Case(
"r15", ARM::PC)
4193 .
Case(
"ip", ARM::R12)
4195 .
Case(
"a1", ARM::R0)
4196 .
Case(
"a2", ARM::R1)
4197 .
Case(
"a3", ARM::R2)
4198 .
Case(
"a4", ARM::R3)
4199 .
Case(
"v1", ARM::R4)
4200 .
Case(
"v2", ARM::R5)
4201 .
Case(
"v3", ARM::R6)
4202 .
Case(
"v4", ARM::R7)
4203 .
Case(
"v5", ARM::R8)
4204 .
Case(
"v6", ARM::R9)
4205 .
Case(
"v7", ARM::R10)
4206 .
Case(
"v8", ARM::R11)
4207 .
Case(
"sb", ARM::R9)
4208 .
Case(
"sl", ARM::R10)
4209 .
Case(
"fp", ARM::R11)
4218 if (Entry == RegisterReqs.
end())
4221 return Entry->getValue();
4225 if (!AllowOutOfBoundReg && !hasD32() && RegNum >=
ARM::D16 &&
4234std::optional<ARM_AM::ShiftOpc> ARMAsmParser::tryParseShiftToken() {
4238 return std::nullopt;
4260 auto ShiftTyOpt = tryParseShiftToken();
4261 if (ShiftTyOpt == std::nullopt)
4263 auto ShiftTy = ShiftTyOpt.value();
4270 std::unique_ptr<ARMOperand> PrevOp(
4271 (ARMOperand *)
Operands.pop_back_val().release());
4272 if (!PrevOp->isReg())
4273 return Error(PrevOp->getStartLoc(),
"shift must be of a register");
4274 int SrcReg = PrevOp->getReg();
4290 const MCExpr *ShiftExpr =
nullptr;
4291 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4292 Error(ImmLoc,
"invalid immediate shift value");
4298 Error(ImmLoc,
"invalid immediate shift value");
4304 Imm =
CE->getValue();
4308 Error(ImmLoc,
"immediate shift value out of range");
4318 ShiftReg = tryParseRegister();
4319 if (ShiftReg == -1) {
4320 Error(L,
"expected immediate or register in shift operand");
4325 "expected immediate or register in shift operand");
4331 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4335 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4351 int RegNo = tryParseRegister();
4355 Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4373 if (getParser().parseExpression(ImmVal))
4377 return TokError(
"immediate value expected for vector index");
4405 if (
Name.size() < 2 ||
Name[0] != CoprocOp)
4409 switch (
Name.size()) {
4432 case '0':
return 10;
4433 case '1':
return 11;
4434 case '2':
return 12;
4435 case '3':
return 13;
4436 case '4':
return 14;
4437 case '5':
return 15;
4476 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4495 Operands.push_back(ARMOperand::CreateCoprocReg(
Reg, S));
4512 if (getParser().parseExpression(Expr))
4513 return Error(Loc,
"illegal expression");
4515 if (!CE ||
CE->getValue() < 0 ||
CE->getValue() > 255)
4517 "coprocessor option must be an immediate in range [0, 255]");
4518 int Val =
CE->getValue();
4526 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S,
E));
4537 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4541 case ARM::R0:
return ARM::R1;
case ARM::R1:
return ARM::R2;
4542 case ARM::R2:
return ARM::R3;
case ARM::R3:
return ARM::R4;
4543 case ARM::R4:
return ARM::R5;
case ARM::R5:
return ARM::R6;
4544 case ARM::R6:
return ARM::R7;
case ARM::R7:
return ARM::R8;
4545 case ARM::R8:
return ARM::R9;
case ARM::R9:
return ARM::R10;
4546 case ARM::R10:
return ARM::R11;
case ARM::R11:
return ARM::R12;
4547 case ARM::R12:
return ARM::SP;
case ARM::SP:
return ARM::LR;
4548 case ARM::LR:
return ARM::PC;
case ARM::PC:
return ARM::R0;
4556 unsigned Enc,
unsigned Reg) {
4557 Regs.emplace_back(Enc,
Reg);
4558 for (
auto I = Regs.rbegin(), J =
I + 1,
E = Regs.rend(); J !=
E; ++
I, ++J) {
4559 if (J->first == Enc) {
4560 Regs.erase(J.base());
4572 bool AllowRAAC,
bool AllowOutOfBoundReg) {
4575 return TokError(
"Token is not a Left Curly Brace");
4582 int Reg = tryParseRegister();
4584 return Error(RegLoc,
"register expected");
4585 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4586 return Error(RegLoc,
"pseudo-register not allowed");
4593 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4594 Reg = getDRegFromQReg(
Reg);
4595 EReg =
MRI->getEncodingValue(
Reg);
4600 if (
Reg == ARM::RA_AUTH_CODE ||
4601 ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4602 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4603 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg))
4604 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4605 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
Reg))
4606 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4607 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4608 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4610 return Error(RegLoc,
"invalid register in register list");
4613 EReg =
MRI->getEncodingValue(
Reg);
4622 if (
Reg == ARM::RA_AUTH_CODE)
4623 return Error(RegLoc,
"pseudo-register not allowed");
4626 int EndReg = tryParseRegister(AllowOutOfBoundReg);
4628 return Error(AfterMinusLoc,
"register expected");
4629 if (EndReg == ARM::RA_AUTH_CODE)
4630 return Error(AfterMinusLoc,
"pseudo-register not allowed");
4632 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4633 EndReg = getDRegFromQReg(EndReg) + 1;
4640 return Error(AfterMinusLoc,
"invalid register in register list");
4642 if (
MRI->getEncodingValue(
Reg) >
MRI->getEncodingValue(EndReg))
4643 return Error(AfterMinusLoc,
"bad range in register list");
4646 while (
Reg != EndReg) {
4648 EReg =
MRI->getEncodingValue(
Reg);
4652 ") in register list");
4661 Reg = tryParseRegister(AllowOutOfBoundReg);
4663 return Error(RegLoc,
"register expected");
4664 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4665 return Error(RegLoc,
"pseudo-register not allowed");
4667 bool isQReg =
false;
4668 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4669 Reg = getDRegFromQReg(
Reg);
4673 RC->
getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4674 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
Reg)) {
4677 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4679 if (
Reg == ARM::VPR &&
4680 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4681 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4682 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4683 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4684 EReg =
MRI->getEncodingValue(
Reg);
4687 ") in register list");
4692 if ((
Reg == ARM::RA_AUTH_CODE &&
4693 RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4695 return Error(RegLoc,
"invalid register in register list");
4701 MRI->getEncodingValue(
Reg) <
MRI->getEncodingValue(OldReg)) {
4702 if (ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4703 Warning(RegLoc,
"register list not in ascending order");
4704 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4705 return Error(RegLoc,
"register list not in ascending order");
4708 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4709 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4711 return Error(RegLoc,
"non-contiguous register range");
4712 EReg =
MRI->getEncodingValue(
Reg);
4715 ") in register list");
4718 EReg =
MRI->getEncodingValue(++
Reg);
4741ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4749 LaneKind = AllLanes;
4762 if (getParser().parseExpression(LaneIndex))
4763 return Error(Loc,
"illegal expression");
4766 return Error(Loc,
"lane index must be empty or an integer");
4771 int64_t Val =
CE->getValue();
4774 if (Val < 0 || Val > 7)
4777 LaneKind = IndexedLane;
4787 VectorLaneTy LaneKind;
4797 int Reg = tryParseRegister();
4800 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg)) {
4801 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex,
E);
4809 Operands.push_back(ARMOperand::CreateVectorListAllLanes(
Reg, 1,
false,
4813 Operands.push_back(ARMOperand::CreateVectorListIndexed(
Reg, 1,
4820 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4821 Reg = getDRegFromQReg(
Reg);
4822 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex,
E);
4830 Reg =
MRI->getMatchingSuperReg(
Reg, ARM::dsub_0,
4831 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4832 Operands.push_back(ARMOperand::CreateVectorListAllLanes(
Reg, 2,
false,
4836 Operands.push_back(ARMOperand::CreateVectorListIndexed(
Reg, 2,
4853 int Reg = tryParseRegister();
4855 return Error(RegLoc,
"register expected");
4858 unsigned FirstReg =
Reg;
4860 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg))
4862 "vector register in range Q0-Q7 expected");
4865 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4866 FirstReg =
Reg = getDRegFromQReg(
Reg);
4874 if (!parseVectorLane(LaneKind, LaneIndex,
E).isSuccess())
4882 else if (Spacing == 2)
4884 "sequential registers in double spaced list");
4887 int EndReg = tryParseRegister();
4889 return Error(AfterMinusLoc,
"register expected");
4891 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4892 EndReg = getDRegFromQReg(EndReg) + 1;
4899 !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(EndReg)) ||
4901 !ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(EndReg)))
4902 return Error(AfterMinusLoc,
"invalid register in register list");
4905 return Error(AfterMinusLoc,
"bad range in register list");
4907 VectorLaneTy NextLaneKind;
4908 unsigned NextLaneIndex;
4909 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
4911 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4912 return Error(AfterMinusLoc,
"mismatched lane index in register list");
4915 Count += EndReg -
Reg;
4922 Reg = tryParseRegister();
4924 return Error(RegLoc,
"register expected");
4927 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg))
4928 return Error(RegLoc,
"vector register in range Q0-Q7 expected");
4937 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4940 else if (Spacing == 2)
4943 "invalid register in double-spaced list (must be 'D' register')");
4944 Reg = getDRegFromQReg(
Reg);
4945 if (
Reg != OldReg + 1)
4946 return Error(RegLoc,
"non-contiguous register range");
4950 VectorLaneTy NextLaneKind;
4951 unsigned NextLaneIndex;
4953 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
4955 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4956 return Error(LaneLoc,
"mismatched lane index in register list");
4963 Spacing = 1 + (
Reg == OldReg + 2);
4966 if (
Reg != OldReg + Spacing)
4967 return Error(RegLoc,
"non-contiguous register range");
4970 VectorLaneTy NextLaneKind;
4971 unsigned NextLaneIndex;
4973 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
4975 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4976 return Error(EndLoc,
"mismatched lane index in register list");
4989 if (Count == 2 && !hasMVE()) {
4991 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4992 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4993 FirstReg =
MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4995 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4996 ARMOperand::CreateVectorListAllLanes);
4997 Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S,
E));
5001 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
5055 const MCExpr *MemBarrierID;
5056 if (getParser().parseExpression(MemBarrierID))
5057 return Error(Loc,
"illegal expression");
5061 return Error(Loc,
"constant expression expected");
5063 int Val =
CE->getValue();
5065 return Error(Loc,
"immediate value out of range");
5117 const MCExpr *ISBarrierID;
5118 if (getParser().parseExpression(ISBarrierID))
5119 return Error(Loc,
"illegal expression");
5123 return Error(Loc,
"constant expression expected");
5125 int Val =
CE->getValue();
5127 return Error(Loc,
"immediate value out of range");
5133 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5150 if (IFlagsStr !=
"none") {
5151 for (
int i = 0, e = IFlagsStr.
size(); i != e; ++i) {
5160 if (Flag == ~0U || (IFlags & Flag))
5175 if (
static_cast<ARMOperand &
>(*
Operands.back()).isMSRMask() ||
5176 static_cast<ARMOperand &
>(*
Operands.back()).isBankedReg())
5184 if (Val > 255 || Val < 0) {
5187 unsigned SYSmvalue = Val & 0xFF;
5189 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5198 auto TheReg = ARMSysReg::lookupMClassSysRegByName(
Mask.lower());
5199 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5202 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5205 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5210 size_t Start = 0, Next =
Mask.find(
'_');
5212 std::string SpecReg =
Mask.slice(Start, Next).lower();
5219 unsigned FlagsVal = 0;
5221 if (SpecReg ==
"apsr") {
5225 .
Case(
"nzcvqg", 0xc)
5228 if (FlagsVal == ~0U) {
5234 }
else if (SpecReg ==
"cpsr" || SpecReg ==
"spsr") {
5236 if (Flags ==
"all" || Flags ==
"")
5238 for (
int i = 0, e =
Flags.size(); i != e; ++i) {
5248 if (Flag == ~0U || (FlagsVal & Flag))
5264 if (SpecReg ==
"spsr")
5268 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
5276 if (
static_cast<ARMOperand &
>(*
Operands.back()).isBankedReg() ||
5277 static_cast<ARMOperand &
>(*
Operands.back()).isMSRMask())
5286 auto TheReg = ARMBankedReg::lookupBankedRegByName(
RegName.lower());
5289 unsigned Encoding = TheReg->Encoding;
5292 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
5302 auto ShiftCodeOpt = tryParseShiftToken();
5304 if (!ShiftCodeOpt.has_value())
5306 auto ShiftCode = ShiftCodeOpt.value();
5310 if (ShiftCode !=
Op)
5322 const MCExpr *ShiftAmount;
5325 if (getParser().parseExpression(ShiftAmount, EndLoc))
5326 return Error(Loc,
"illegal expression");
5329 return Error(Loc,
"constant expression expected");
5330 int Val =
CE->getValue();
5331 if (Val < Low || Val >
High)
5332 return Error(Loc,
"immediate value out of range");
5334 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
5344 return Error(S,
"'be' or 'le' operand expected");
5352 return Error(S,
"'be' or 'le' operand expected");
5372 if (ShiftName ==
"lsl" || ShiftName ==
"LSL")
5374 else if (ShiftName ==
"asr" || ShiftName ==
"ASR")
5387 const MCExpr *ShiftAmount;
5389 if (getParser().parseExpression(ShiftAmount, EndLoc))
5390 return Error(ExLoc,
"malformed shift expression");
5393 return Error(ExLoc,
"shift amount must be an immediate");
5395 int64_t Val =
CE->getValue();
5398 if (Val < 1 || Val > 32)
5399 return Error(ExLoc,
"'asr' shift amount must be in range [1,32]");
5402 return Error(ExLoc,
"'asr #32' shift amount not allowed in Thumb mode");
5403 if (Val == 32) Val = 0;
5406 if (Val < 0 || Val > 31)
5407 return Error(ExLoc,
"'lsr' shift amount must be in range [0,31]");
5410 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
5425 if (ShiftName !=
"ror" && ShiftName !=
"ROR")
5436 const MCExpr *ShiftAmount;
5438 if (getParser().parseExpression(ShiftAmount, EndLoc))
5439 return Error(ExLoc,
"malformed rotate expression");
5442 return Error(ExLoc,
"rotate amount must be an immediate");
5444 int64_t Val =
CE->getValue();
5448 if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5449 return Error(ExLoc,
"'ror' rotate amount must be 8, 16, or 24");
5451 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
5490 if (getParser().parseExpression(Imm1Exp, Ex1))
5491 return Error(Sx1,
"malformed expression");
5497 Imm1 =
CE->getValue();
5501 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
5514 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5520 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5527 "expected modified immediate operand: #[0, 255], #even[0-30]");
5530 return Error(Sx1,
"immediate operand must a number in the range [0, 255]");
5545 if (getParser().parseExpression(Imm2Exp, Ex2))
5546 return Error(Sx2,
"malformed expression");
5548 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5551 Imm2 =
CE->getValue();
5552 if (!(Imm2 & ~0x1E)) {
5554 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5558 "immediate operand must an even number in the range [0, 30]");
5560 return Error(Sx2,
"constant expression expected");
5575 if (getParser().parseExpression(LSBExpr))
5576 return Error(
E,
"malformed immediate expression");
5579 return Error(
E,
"'lsb' operand must be an immediate");
5581 int64_t LSB =
CE->getValue();
5583 if (LSB < 0 || LSB > 31)
5584 return Error(
E,
"'lsb' operand must be in the range [0,31]");
5598 if (getParser().parseExpression(WidthExpr, EndLoc))
5599 return Error(
E,
"malformed immediate expression");
5600 CE = dyn_cast<MCConstantExpr>(WidthExpr);
5602 return Error(
E,
"'width' operand must be an immediate");
5604 int64_t Width =
CE->getValue();
5606 if (Width < 1 || Width > 32 - LSB)
5607 return Error(
E,
"'width' operand must be in the range [1,32-lsb]");
5609 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5626 bool haveEaten =
false;
5638 int Reg = tryParseRegister();
5646 unsigned ShiftImm = 0;
5649 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5656 Operands.push_back(ARMOperand::CreatePostIdxReg(
Reg, isAdd, ShiftTy,
5687 if (getParser().parseExpression(
Offset,
E))
5691 return Error(S,
"constant expression expected");
5694 int32_t Val =
CE->getValue();
5695 if (isNegative && Val == 0)
5696 Val = std::numeric_limits<int32_t>::min();
5704 bool haveEaten =
false;
5716 int Reg = tryParseRegister();
5731 unsigned MnemonicOpsEndInd) {
5732 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
5733 auto Op =
static_cast<ARMOperand &
>(*
Operands[
I]);
5734 if (
Op.isCondCode())
5741 unsigned MnemonicOpsEndInd) {
5742 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
5743 auto Op =
static_cast<ARMOperand &
>(*
Operands[
I]);
5753void ARMAsmParser::cvtThumbMultiply(
MCInst &Inst,
5760 unsigned RegRd = MnemonicOpsEndInd;
5761 unsigned RegRn = MnemonicOpsEndInd + 1;
5762 unsigned RegRm = MnemonicOpsEndInd;
5764 if (
Operands.size() == MnemonicOpsEndInd + 3) {
5769 RegRn = MnemonicOpsEndInd + 2;
5770 RegRm = MnemonicOpsEndInd + 1;
5772 RegRn = MnemonicOpsEndInd + 1;
5773 RegRm = MnemonicOpsEndInd + 2;
5778 ((ARMOperand &)*
Operands[RegRd]).addRegOperands(Inst, 1);
5780 if (CondOutI != 0) {
5781 ((ARMOperand &)*
Operands[CondOutI]).addCCOutOperands(Inst, 1);
5783 ARMOperand
Op = *ARMOperand::CreateCCOut(0,
Operands[0]->getEndLoc());
5784 Op.addCCOutOperands(Inst, 1);
5787 ((ARMOperand &)*
Operands[RegRn]).addRegOperands(Inst, 1);
5789 ((ARMOperand &)*
Operands[RegRm]).addRegOperands(Inst, 1);
5793 ((ARMOperand &)*
Operands[CondI]).addCondCodeOperands(Inst, 2);
5797 Op.addCondCodeOperands(Inst, 2);
5801void ARMAsmParser::cvtThumbBranches(
MCInst &Inst,
5815 case ARM::tBcc: Inst.
setOpcode(ARM::tB);
break;
5816 case ARM::t2Bcc: Inst.
setOpcode(ARM::t2B);
break;
5835 ARMOperand &
op =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
5836 if (!
op.isSignedOffset<11, 1>() &&
isThumb() && hasV8MBaseline())
5842 ARMOperand &
op =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
5843 if (!
op.isSignedOffset<8, 1>() &&
isThumb() && hasV8MBaseline())
5848 ((ARMOperand &)*
Operands[MnemonicOpsEndInd]).addImmOperands(Inst, 1);
5850 ((ARMOperand &)*
Operands[CondI]).addCondCodeOperands(Inst, 2);
5854 Op.addCondCodeOperands(Inst, 2);
5858void ARMAsmParser::cvtMVEVMOVQtoDReg(
5867 ((ARMOperand &)*
Operands[MnemonicOpsEndInd]).addRegOperands(Inst, 1);
5868 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 1])
5869 .addRegOperands(Inst, 1);
5870 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 2])
5871 .addRegOperands(Inst, 1);
5872 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 3])
5873 .addMVEPairVectorIndexOperands(Inst, 1);
5875 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 5])
5876 .addMVEPairVectorIndexOperands(Inst, 1);
5879 .addCondCodeOperands(Inst, 2);
5883 Op.addCondCodeOperands(Inst, 2);
5893 return TokError(
"Token is not a Left Bracket");
5898 int BaseRegNum = tryParseRegister();
5899 if (BaseRegNum == -1)
5900 return Error(BaseRegTok.
getLoc(),
"register expected");
5906 return Error(Tok.
getLoc(),
"malformed memory operand");
5912 Operands.push_back(ARMOperand::CreateMem(BaseRegNum,
nullptr, 0,
5927 "Lost colon or comma in memory operand?!");
5939 if (getParser().parseExpression(Expr))
5947 return Error (
E,
"constant expression expected");
5950 switch (
CE->getValue()) {
5953 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5954 case 16:
Align = 2;
break;
5955 case 32:
Align = 4;
break;
5956 case 64:
Align = 8;
break;
5957 case 128:
Align = 16;
break;
5958 case 256:
Align = 32;
break;
5969 Operands.push_back(ARMOperand::CreateMem(BaseRegNum,
nullptr, 0,
5971 false, S,
E, AlignmentLoc));
5997 if (getParser().parseExpression(
Offset))
6000 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Offset)) {
6003 int32_t Val =
CE->getValue();
6004 if (isNegative && Val == 0)
6009 AdjustedOffset =
CE;
6012 Operands.push_back(ARMOperand::CreateMem(
6032 bool isNegative =
false;
6042 int OffsetRegNum = tryParseRegister();
6043 if (OffsetRegNum == -1)
6044 return Error(
E,
"register expected");
6048 unsigned ShiftImm = 0;
6051 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
6061 Operands.push_back(ARMOperand::CreateMem(BaseRegNum,
nullptr, OffsetRegNum,
6062 ShiftType, ShiftImm, 0, isNegative,
6085 return Error(Loc,
"illegal shift operator");
6087 if (ShiftName ==
"lsl" || ShiftName ==
"LSL" ||
6088 ShiftName ==
"asl" || ShiftName ==
"ASL")
6090 else if (ShiftName ==
"lsr" || ShiftName ==
"LSR")
6092 else if (ShiftName ==
"asr" || ShiftName ==
"ASR")
6094 else if (ShiftName ==
"ror" || ShiftName ==
"ROR")
6096 else if (ShiftName ==
"rrx" || ShiftName ==
"RRX")
6098 else if (ShiftName ==
"uxtw" || ShiftName ==
"UXTW")
6101 return Error(Loc,
"illegal shift operator");
6116 if (getParser().parseExpression(Expr))
6123 return Error(Loc,
"shift amount must be an immediate");
6124 int64_t
Imm =
CE->getValue();
6128 return Error(Loc,
"immediate shift value out of range");
6172 bool isVmovf =
false;
6174 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
6175 ARMOperand &TyOp =
static_cast<ARMOperand &
>(*
Operands[
I]);
6176 if (TyOp.isToken() &&
6177 (TyOp.getToken() ==
".f32" || TyOp.getToken() ==
".f64" ||
6178 TyOp.getToken() ==
".f16")) {
6184 ARMOperand &Mnemonic =
static_cast<ARMOperand &
>(*
Operands[0]);
6185 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() ==
"fconstd" ||
6186 Mnemonic.getToken() ==
"fconsts");
6187 if (!(isVmovf || isFconst))
6193 bool isNegative =
false;
6206 Operands.push_back(ARMOperand::CreateImm(
6216 if (Val > 255 || Val < 0)
6217 return Error(Loc,
"encoded floating point value out of range");
6221 Operands.push_back(ARMOperand::CreateImm(
6227 return Error(Loc,
"invalid floating point immediate");
6247 switch (getLexer().getKind()) {
6255 bool ExpectLabel = Mnemonic ==
"b" || Mnemonic ==
"bl";
6257 if (!tryParseRegisterWithWriteBack(
Operands))
6259 int Res = tryParseShiftRegister(
Operands);
6265 if (Mnemonic ==
"vmrs" &&
6269 Operands.push_back(ARMOperand::CreateToken(
"APSR_nzcv", S));
6286 if (getParser().parseExpression(IdVal))
6289 Operands.push_back(ARMOperand::CreateImm(IdVal, S,
E));
6295 bool AllowOutOfBoundReg = Mnemonic ==
"vlldm" || Mnemonic ==
"vlstm";
6297 AllowOutOfBoundReg);
6310 auto AdjacentToken = getLexer().peekTok(
false);
6314 if (!ExpectIdentifier) {
6323 if (getParser().parseExpression(ImmVal))
6327 int32_t Val =
CE->getValue();
6328 if (IsNegative && Val == 0)
6333 Operands.push_back(ARMOperand::CreateImm(ImmVal, S,
E));
6355 if (parsePrefix(RefKind))
6358 const MCExpr *SubExprVal;
6359 if (getParser().parseExpression(SubExprVal))
6365 Operands.push_back(ARMOperand::CreateImm(ExprVal, S,
E));
6370 if (Mnemonic !=
"ldr")
6371 return Error(S,
"unexpected token in operand");
6373 const MCExpr *SubExprVal;
6374 if (getParser().parseExpression(SubExprVal))
6380 Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S,
E));
6386bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6387 const MCExpr *Expr =
nullptr;
6388 SMLoc L = getParser().getTok().getLoc();
6389 if (
check(getParser().parseExpression(Expr), L,
"expected expression"))
6392 if (
check(!
Value, L,
"expected constant expression"))
6394 Out =
Value->getValue();
6423 static const struct PrefixEntry {
6424 const char *Spelling;
6426 uint8_t SupportedFormats;
6427 } PrefixEntries[] = {
6439 llvm::find_if(PrefixEntries, [&IDVal](
const PrefixEntry &PE) {
6440 return PE.Spelling == IDVal;
6442 if (Prefix == std::end(PrefixEntries)) {
6447 uint8_t CurrentFormat;
6448 switch (getContext().getObjectFileType()) {
6450 CurrentFormat = MACHO;
6453 CurrentFormat =
ELF;
6456 CurrentFormat =
COFF;
6459 CurrentFormat = WASM;
6469 if (~
Prefix->SupportedFormats & CurrentFormat) {
6471 "cannot represent relocation in the current file format");
6475 RefKind =
Prefix->VariantKind;
6499 unsigned &ProcessorIMod,
6503 CarrySetting =
false;
6509 if ((Mnemonic ==
"movs" &&
isThumb()) || Mnemonic ==
"teq" ||
6510 Mnemonic ==
"vceq" || Mnemonic ==
"svc" || Mnemonic ==
"mls" ||
6511 Mnemonic ==
"smmls" || Mnemonic ==
"vcls" || Mnemonic ==
"vmls" ||
6512 Mnemonic ==
"vnmls" || Mnemonic ==
"vacge" || Mnemonic ==
"vcge" ||
6513 Mnemonic ==
"vclt" || Mnemonic ==
"vacgt" || Mnemonic ==
"vaclt" ||
6514 Mnemonic ==
"vacle" || Mnemonic ==
"hlt" || Mnemonic ==
"vcgt" ||
6515 Mnemonic ==
"vcle" || Mnemonic ==
"smlal" || Mnemonic ==
"umaal" ||
6516 Mnemonic ==
"umlal" || Mnemonic ==
"vabal" || Mnemonic ==
"vmlal" ||
6517 Mnemonic ==
"vpadal" || Mnemonic ==
"vqdmlal" || Mnemonic ==
"fmuls" ||
6518 Mnemonic ==
"vmaxnm" || Mnemonic ==
"vminnm" || Mnemonic ==
"vcvta" ||
6519 Mnemonic ==
"vcvtn" || Mnemonic ==
"vcvtp" || Mnemonic ==
"vcvtm" ||
6520 Mnemonic ==
"vrinta" || Mnemonic ==
"vrintn" || Mnemonic ==
"vrintp" ||
6521 Mnemonic ==
"vrintm" || Mnemonic ==
"hvc" ||
6522 Mnemonic.
starts_with(
"vsel") || Mnemonic ==
"vins" ||
6523 Mnemonic ==
"vmovx" || Mnemonic ==
"bxns" || Mnemonic ==
"blxns" ||
6524 Mnemonic ==
"vdot" || Mnemonic ==
"vmmla" || Mnemonic ==
"vudot" ||
6525 Mnemonic ==
"vsdot" || Mnemonic ==
"vcmla" || Mnemonic ==
"vcadd" ||
6526 Mnemonic ==
"vfmal" || Mnemonic ==
"vfmsl" || Mnemonic ==
"wls" ||
6527 Mnemonic ==
"le" || Mnemonic ==
"dls" || Mnemonic ==
"csel" ||
6528 Mnemonic ==
"csinc" || Mnemonic ==
"csinv" || Mnemonic ==
"csneg" ||
6529 Mnemonic ==
"cinc" || Mnemonic ==
"cinv" || Mnemonic ==
"cneg" ||
6530 Mnemonic ==
"cset" || Mnemonic ==
"csetm" || Mnemonic ==
"aut" ||
6531 Mnemonic ==
"pac" || Mnemonic ==
"pacbti" || Mnemonic ==
"bti")
6536 if (Mnemonic !=
"adcs" && Mnemonic !=
"bics" && Mnemonic !=
"movs" &&
6537 Mnemonic !=
"muls" && Mnemonic !=
"smlals" && Mnemonic !=
"smulls" &&
6538 Mnemonic !=
"umlals" && Mnemonic !=
"umulls" && Mnemonic !=
"lsls" &&
6539 Mnemonic !=
"sbcs" && Mnemonic !=
"rscs" &&
6541 (Mnemonic ==
"vmine" || Mnemonic ==
"vshle" || Mnemonic ==
"vshlt" ||
6542 Mnemonic ==
"vshllt" || Mnemonic ==
"vrshle" || Mnemonic ==
"vrshlt" ||
6543 Mnemonic ==
"vmvne" || Mnemonic ==
"vorne" || Mnemonic ==
"vnege" ||
6544 Mnemonic ==
"vnegt" || Mnemonic ==
"vmule" || Mnemonic ==
"vmult" ||
6545 Mnemonic ==
"vrintne" || Mnemonic ==
"vcmult" ||
6546 Mnemonic ==
"vcmule" || Mnemonic ==
"vpsele" || Mnemonic ==
"vpselt" ||
6550 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size() - 2);
6558 !(Mnemonic ==
"cps" || Mnemonic ==
"mls" || Mnemonic ==
"mrs" ||
6559 Mnemonic ==
"smmls" || Mnemonic ==
"vabs" || Mnemonic ==
"vcls" ||
6560 Mnemonic ==
"vmls" || Mnemonic ==
"vmrs" || Mnemonic ==
"vnmls" ||
6561 Mnemonic ==
"vqabs" || Mnemonic ==
"vrecps" || Mnemonic ==
"vrsqrts" ||
6562 Mnemonic ==
"srs" || Mnemonic ==
"flds" || Mnemonic ==
"fmrs" ||
6563 Mnemonic ==
"fsqrts" || Mnemonic ==
"fsubs" || Mnemonic ==
"fsts" ||
6564 Mnemonic ==
"fcpys" || Mnemonic ==
"fdivs" || Mnemonic ==
"fmuls" ||
6565 Mnemonic ==
"fcmps" || Mnemonic ==
"fcmpzs" || Mnemonic ==
"vfms" ||
6566 Mnemonic ==
"vfnms" || Mnemonic ==
"fconsts" || Mnemonic ==
"bxns" ||
6567 Mnemonic ==
"blxns" || Mnemonic ==
"vfmas" || Mnemonic ==
"vmlas" ||
6568 (Mnemonic ==
"movs" &&
isThumb()))) {
6569 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size() - 1);
6570 CarrySetting =
true;
6583 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size()-2);
6584 ProcessorIMod =
IMod;
6588 if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic !=
"vmovlt" &&
6589 Mnemonic !=
"vshllt" && Mnemonic !=
"vrshrnt" && Mnemonic !=
"vshrnt" &&
6590 Mnemonic !=
"vqrshrunt" && Mnemonic !=
"vqshrunt" &&
6591 Mnemonic !=
"vqrshrnt" && Mnemonic !=
"vqshrnt" && Mnemonic !=
"vmullt" &&
6592 Mnemonic !=
"vqmovnt" && Mnemonic !=
"vqmovunt" &&
6593 Mnemonic !=
"vqmovnt" && Mnemonic !=
"vmovnt" && Mnemonic !=
"vqdmullt" &&
6594 Mnemonic !=
"vpnot" && Mnemonic !=
"vcvtt" && Mnemonic !=
"vcvt") {
6598 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size()-1);
6606 ITMask = Mnemonic.
slice(2, Mnemonic.
size());
6607 Mnemonic = Mnemonic.
slice(0, 2);
6611 ITMask = Mnemonic.
slice(4, Mnemonic.
size());
6612 Mnemonic = Mnemonic.
slice(0, 4);
6614 ITMask = Mnemonic.
slice(3, Mnemonic.
size());
6615 Mnemonic = Mnemonic.
slice(0, 3);
6625void ARMAsmParser::getMnemonicAcceptInfo(
StringRef Mnemonic,
6628 bool &CanAcceptCarrySet,
6629 bool &CanAcceptPredicationCode,
6630 bool &CanAcceptVPTPredicationCode) {
6631 CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6634 Mnemonic ==
"and" || Mnemonic ==
"lsl" || Mnemonic ==
"lsr" ||
6635 Mnemonic ==
"rrx" || Mnemonic ==
"ror" || Mnemonic ==
"sub" ||
6636 Mnemonic ==
"add" || Mnemonic ==
"adc" || Mnemonic ==
"mul" ||
6637 Mnemonic ==
"bic" || Mnemonic ==
"asr" || Mnemonic ==
"orr" ||
6638 Mnemonic ==
"mvn" || Mnemonic ==
"rsb" || Mnemonic ==
"rsc" ||
6639 Mnemonic ==
"orn" || Mnemonic ==
"sbc" || Mnemonic ==
"eor" ||
6640 Mnemonic ==
"neg" || Mnemonic ==
"vfm" || Mnemonic ==
"vfnm" ||
6642 (Mnemonic ==
"smull" || Mnemonic ==
"mov" || Mnemonic ==
"mla" ||
6643 Mnemonic ==
"smlal" || Mnemonic ==
"umlal" || Mnemonic ==
"umull"));
6645 if (Mnemonic ==
"bkpt" || Mnemonic ==
"cbnz" || Mnemonic ==
"setend" ||
6646 Mnemonic ==
"cps" || Mnemonic ==
"it" || Mnemonic ==
"cbz" ||
6647 Mnemonic ==
"trap" || Mnemonic ==
"hlt" || Mnemonic ==
"udf" ||
6649 Mnemonic.
starts_with(
"vsel") || Mnemonic ==
"vmaxnm" ||
6650 Mnemonic ==
"vminnm" || Mnemonic ==
"vcvta" || Mnemonic ==
"vcvtn" ||
6651 Mnemonic ==
"vcvtp" || Mnemonic ==
"vcvtm" || Mnemonic ==
"vrinta" ||
6652 Mnemonic ==
"vrintn" || Mnemonic ==
"vrintp" || Mnemonic ==
"vrintm" ||
6653 Mnemonic.
starts_with(
"aes") || Mnemonic ==
"hvc" ||
6654 Mnemonic ==
"setpan" || Mnemonic.
starts_with(
"sha1") ||
6657 Mnemonic ==
"vmovx" || Mnemonic ==
"vins" || Mnemonic ==
"vudot" ||
6658 Mnemonic ==
"vsdot" || Mnemonic ==
"vcmla" || Mnemonic ==
"vcadd" ||
6659 Mnemonic ==
"vfmal" || Mnemonic ==
"vfmsl" || Mnemonic ==
"vfmat" ||
6660 Mnemonic ==
"vfmab" || Mnemonic ==
"vdot" || Mnemonic ==
"vmmla" ||
6661 Mnemonic ==
"sb" || Mnemonic ==
"ssbb" || Mnemonic ==
"pssbb" ||
6662 Mnemonic ==
"vsmmla" || Mnemonic ==
"vummla" || Mnemonic ==
"vusmmla" ||
6663 Mnemonic ==
"vusdot" || Mnemonic ==
"vsudot" || Mnemonic ==
"bfcsel" ||
6664 Mnemonic ==
"wls" || Mnemonic ==
"dls" || Mnemonic ==
"le" ||
6665 Mnemonic ==
"csel" || Mnemonic ==
"csinc" || Mnemonic ==
"csinv" ||
6666 Mnemonic ==
"csneg" || Mnemonic ==
"cinc" || Mnemonic ==
"cinv" ||
6667 Mnemonic ==
"cneg" || Mnemonic ==
"cset" || Mnemonic ==
"csetm" ||
6668 (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6669 !MS.isITPredicableCDEInstr(Mnemonic)) ||
6671 Mnemonic ==
"pac" || Mnemonic ==
"pacbti" || Mnemonic ==
"aut" ||
6672 Mnemonic ==
"bti" ||
6679 CanAcceptPredicationCode =
false;
6682 CanAcceptPredicationCode =
6683 Mnemonic !=
"cdp2" && Mnemonic !=
"clrex" && Mnemonic !=
"mcr2" &&
6684 Mnemonic !=
"mcrr2" && Mnemonic !=
"mrc2" && Mnemonic !=
"mrrc2" &&
6685 Mnemonic !=
"dmb" && Mnemonic !=
"dfb" && Mnemonic !=
"dsb" &&
6686 Mnemonic !=
"isb" && Mnemonic !=
"pld" && Mnemonic !=
"pli" &&
6687 Mnemonic !=
"pldw" && Mnemonic !=
"ldc2" && Mnemonic !=
"ldc2l" &&
6688 Mnemonic !=
"stc2" && Mnemonic !=
"stc2l" && Mnemonic !=
"tsb" &&
6690 }
else if (isThumbOne()) {
6692 CanAcceptPredicationCode = Mnemonic !=
"movs";
6694 CanAcceptPredicationCode = Mnemonic !=
"nop" && Mnemonic !=
"movs";
6696 CanAcceptPredicationCode =
true;
6700 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I) {
6701 auto &
Op =
static_cast<ARMOperand &
>(*
Operands[
I]);
6702 if (
Op.isToken() &&
Op.getToken() ==
".w")
6712void ARMAsmParser::tryConvertingToTwoOperandForm(
6718 if (
Operands.size() != MnemonicOpsEndInd + 3)
6721 const auto &Op3 =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
6722 auto &Op4 =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]);
6723 if (!Op3.isReg() || !Op4.isReg())
6726 auto Op3Reg = Op3.getReg();
6727 auto Op4Reg = Op4.getReg();
6733 auto &Op5 =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 2]);
6735 if (Mnemonic !=
"add")
6737 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6738 (Op5.isReg() && Op5.getReg() == ARM::PC);
6739 if (!TryTransform) {
6740 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6741 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6742 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6743 Op5.isImm() && !Op5.isImm0_508s4());
6747 }
else if (!isThumbOne())
6750 if (!(Mnemonic ==
"add" || Mnemonic ==
"sub" || Mnemonic ==
"and" ||
6751 Mnemonic ==
"eor" || Mnemonic ==
"lsl" || Mnemonic ==
"lsr" ||
6752 Mnemonic ==
"asr" || Mnemonic ==
"adc" || Mnemonic ==
"sbc" ||
6753 Mnemonic ==
"ror" || Mnemonic ==
"orr" || Mnemonic ==
"bic"))
6759 bool Transform = Op3Reg == Op4Reg;
6764 const ARMOperand *LastOp = &Op5;
6766 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6767 ((Mnemonic ==
"add" && Op4Reg != ARM::SP) ||
6768 Mnemonic ==
"and" || Mnemonic ==
"eor" ||
6769 Mnemonic ==
"adc" || Mnemonic ==
"orr")) {
6780 if (((Mnemonic ==
"add" && CarrySetting) || Mnemonic ==
"sub") &&
6786 if ((Mnemonic ==
"add" || Mnemonic ==
"sub") && LastOp->isImm0_7())
6800 ARMOperand &
Op =
static_cast<ARMOperand &
>(MCOp);
6806 const MCExpr *E = dyn_cast<MCExpr>(
Op.getImm());
6809 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6818bool ARMAsmParser::shouldOmitVectorPredicateOperand(
6820 if (!hasMVE() ||
Operands.size() <= MnemonicOpsEndInd)
6834 if (
static_cast<ARMOperand &
>(*Operand).isVectorIndex() ||
6835 ((*Operand).isReg() &&
6836 (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6837 (*Operand).getReg()) ||
6838 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6839 (*Operand).getReg())))) {
6849 if (
static_cast<ARMOperand &
>(*Operand).isVectorIndex() ||
6850 static_cast<ARMOperand &
>(*Operand).isQReg())
6866 unsigned VariantID);
6877void ARMAsmParser::fixupGNULDRDAlias(
StringRef Mnemonic,
6879 unsigned MnemonicOpsEndInd) {
6880 if (Mnemonic !=
"ldrd" && Mnemonic !=
"strd" && Mnemonic !=
"ldrexd" &&
6881 Mnemonic !=
"strexd" && Mnemonic !=
"ldaexd" && Mnemonic !=
"stlexd")
6884 unsigned IdX = Mnemonic ==
"strexd" || Mnemonic ==
"stlexd"
6885 ? MnemonicOpsEndInd + 1
6886 : MnemonicOpsEndInd;
6891 ARMOperand &Op2 =
static_cast<ARMOperand &
>(*
Operands[IdX]);
6892 ARMOperand &Op3 =
static_cast<ARMOperand &
>(*
Operands[IdX + 1]);
6896 if (!Op3.isGPRMem())
6903 unsigned RtEncoding =
MRI->getEncodingValue(Op2.getReg());
6904 if (!
isThumb() && (RtEncoding & 1)) {
6909 if (Op2.getReg() == ARM::PC)
6911 unsigned PairedReg = GPR.
getRegister(RtEncoding + 1);
6912 if (!PairedReg || PairedReg == ARM::PC ||
6913 (PairedReg == ARM::SP && !hasV8Ops()))
6918 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6926bool ARMAsmParser::CDEConvertDualRegOperand(
StringRef Mnemonic,
6928 unsigned MnemonicOpsEndInd) {
6929 assert(MS.isCDEDualRegInstr(Mnemonic));
6931 if (
Operands.size() < 3 + MnemonicOpsEndInd)
6935 "operand must be an even-numbered register in the range [r0, r10]");
6968 RPair = ARM::R10_R11;
6983 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
6984 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isCondCode()) {
6986 --MnemonicOpsEndInd;
6992 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
6993 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isCCOut()) {
6995 --MnemonicOpsEndInd;
7001 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
7002 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isVPTPred()) {
7004 --MnemonicOpsEndInd;
7019 const FeatureBitset &AvailableFeatures = getAvailableFeatures();
7020 unsigned AssemblerDialect = getParser().getAssemblerDialect();
7026 parseDirectiveReq(
Name, NameLoc);
7033 size_t Start = 0, Next =
Name.find(
'.');
7040 unsigned ProcessorIMod;
7043 Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
7044 CarrySetting, ProcessorIMod, ITMask);
7047 if (isThumbOne() && PredicationCode !=
ARMCC::AL && Mnemonic !=
"b") {
7048 return Error(NameLoc,
"conditional execution not supported in Thumb1");
7051 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
7064 if (Mnemonic ==
"it" || Mnemonic.
starts_with(
"vpt") ||
7067 Mnemonic ==
"vpt" ?
SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
7068 SMLoc::getFromPointer(NameLoc.getPointer() + 4);
7069 if (ITMask.
size() > 3) {
7070 if (Mnemonic ==
"it")
7071 return Error(Loc,
"too many conditions on IT instruction");
7072 return Error(Loc,
"too many conditions on VPT instruction");
7076 if (Pos !=
't' && Pos !=
'e') {
7077 return Error(Loc,
"illegal IT block condition mask '" + ITMask +
"'");
7083 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
7096 bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7097 getMnemonicAcceptInfo(Mnemonic, ExtraToken,
Name, CanAcceptCarrySet,
7098 CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7102 if (!CanAcceptCarrySet && CarrySetting) {
7103 return Error(NameLoc,
"instruction '" + Mnemonic +
7104 "' can not set flags, but 's' suffix specified");
7108 if (!CanAcceptPredicationCode && PredicationCode !=
ARMCC::AL) {
7109 return Error(NameLoc,
"instruction '" + Mnemonic +
7110 "' is not predicable, but condition code specified");
7115 if (!CanAcceptVPTPredicationCode && VPTPredicationCode !=
ARMVCC::None) {
7116 return Error(NameLoc,
"instruction '" + Mnemonic +
7117 "' is not VPT predicable, but VPT code T/E is specified");
7121 if (CanAcceptCarrySet && CarrySetting) {
7123 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
7131 Operands.push_back(ARMOperand::CreateCondCode(
7139 !(Mnemonic.
starts_with(
"vcvt") && Mnemonic !=
"vcvta" &&
7140 Mnemonic !=
"vcvtn" && Mnemonic !=
"vcvtp" && Mnemonic !=
"vcvtm")) {
7143 Operands.push_back(ARMOperand::CreateVPTPred(
7148 if (ProcessorIMod) {
7149 Operands.push_back(ARMOperand::CreateImm(
7152 }
else if (Mnemonic ==
"cps" && isMClass()) {
7153 return Error(NameLoc,
"instruction 'cps' requires effect for M-class");
7159 Next =
Name.find(
'.', Start + 1);
7160 ExtraToken =
Name.slice(Start, Next);
7169 if (ExtraToken ==
".n" && !
isThumb()) {
7171 return Error(Loc,
"instruction with .n (narrow) qualifier not allowed in "
7178 if (ExtraToken !=
".n" && (
isThumb() || ExtraToken !=
".w")) {
7180 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
7187 unsigned MnemonicOpsEndInd =
Operands.size();
7192 if (parseOperand(
Operands, Mnemonic)) {
7198 if (parseOperand(
Operands, Mnemonic)) {
7207 tryConvertingToTwoOperandForm(Mnemonic, PredicationCode, CarrySetting,
7210 if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7218 if (MS.isCDEDualRegInstr(Mnemonic)) {
7220 CDEConvertDualRegOperand(Mnemonic,
Operands, MnemonicOpsEndInd);
7227 if (!shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7228 MnemonicOpsEndInd) &&
7229 Mnemonic ==
"vmov" && PredicationCode ==
ARMCC::LT) {
7237 Mnemonic.
size() - 1 + CarrySetting);
7241 ARMOperand::CreateToken(
StringRef(
"vmovlt"), MLoc));
7242 }
else if (Mnemonic ==
"vcvt" && PredicationCode ==
ARMCC::NE &&
7243 !shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7244 MnemonicOpsEndInd)) {
7253 Mnemonic.
size() - 1 + CarrySetting);
7257 ARMOperand::CreateToken(
StringRef(
"vcvtn"), MLoc));
7258 }
else if (Mnemonic ==
"vmul" && PredicationCode ==
ARMCC::LT &&
7259 !shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7260 MnemonicOpsEndInd)) {
7268 ARMOperand::CreateToken(
StringRef(
"vmullt"), MLoc));
7273 if (!shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7274 MnemonicOpsEndInd)) {
7281 if (Mnemonic.
starts_with(
"vcvtt") && MnemonicOpsEndInd > 2) {
7283 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd - 2]);
7285 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd - 1]);
7286 if (!(Sz1.isToken() && Sz1.getToken().starts_with(
".f") &&
7287 Sz2.isToken() && Sz2.getToken().starts_with(
".f"))) {
7292 Mnemonic = Mnemonic.
substr(0, 4);
7294 ARMOperand::CreateToken(Mnemonic, MLoc));
7298 Mnemonic.
size() + CarrySetting);
7301 ARMOperand::CreateVPTPred(
7303 ++MnemonicOpsEndInd;
7305 }
else if (CanAcceptVPTPredicationCode) {
7309 if (shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7310 MnemonicOpsEndInd)) {
7317 bool usedVPTPredicationCode =
false;
7319 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isVPTPred())
7320 usedVPTPredicationCode =
true;
7321 if (!usedVPTPredicationCode) {
7329 Mnemonic =
Name.slice(0, Mnemonic.
size() + 1);
7332 ARMOperand::CreateToken(Mnemonic, NameLoc));
7341 if (!
isThumb() && Mnemonic ==
"blx" &&
7342 Operands.size() == MnemonicOpsEndInd + 1 &&
7343 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]).isImm())
7347 fixupGNULDRDAlias(Mnemonic,
Operands, MnemonicOpsEndInd);
7356 bool IsLoad = (Mnemonic ==
"ldrexd" || Mnemonic ==
"ldaexd");
7357 if (!
isThumb() &&
Operands.size() > MnemonicOpsEndInd + 1 + (!IsLoad) &&
7358 (Mnemonic ==
"ldrexd" || Mnemonic ==
"strexd" || Mnemonic ==
"ldaexd" ||
7359 Mnemonic ==
"stlexd")) {
7360 unsigned Idx = IsLoad ? MnemonicOpsEndInd : MnemonicOpsEndInd + 1;
7361 ARMOperand &Op1 =
static_cast<ARMOperand &
>(*
Operands[
Idx]);
7362 ARMOperand &Op2 =
static_cast<ARMOperand &
>(*
Operands[
Idx + 1]);
7366 if (Op1.isReg() && MRC.
contains(Op1.getReg())) {
7367 unsigned Reg1 = Op1.getReg();
7368 unsigned Rt =
MRI->getEncodingValue(Reg1);
7369 unsigned Reg2 = Op2.getReg();
7370 unsigned Rt2 =
MRI->getEncodingValue(Reg2);
7373 return Error(Op2.getStartLoc(),
7374 IsLoad ?
"destination operands must be sequential"
7375 :
"source operands must be sequential");
7381 IsLoad ?
"destination operands must start start at an even register"
7382 :
"source operands must start start at an even register");
7384 unsigned NewReg =
MRI->getMatchingSuperReg(
7385 Reg1, ARM::gsub_0, &(
MRI->getRegClass(ARM::GPRPairRegClassID)));
7388 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
7398 if (isThumbTwo() && Mnemonic ==
"sub" &&
7399 Operands.size() == MnemonicOpsEndInd + 3 &&
7400 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]).isReg() &&
7401 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]).getReg() ==
7403 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]).isReg() &&
7404 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]).getReg() ==
7406 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 2]).isImm()) {
7407 Operands.front() = ARMOperand::CreateToken(
Name, NameLoc);
7419 unsigned Reg,
unsigned HiReg,
7420 bool &containsReg) {
7421 containsReg =
false;
7447 return Inst.
getOpcode() == ARM::tBKPT ||
7454 unsigned MnemonicOpsEndInd) {
7455 for (
unsigned I = MnemonicOpsEndInd;
I <
Operands.size(); ++
I) {
7456 const ARMOperand &
Op =
static_cast<const ARMOperand &
>(*
Operands[
I]);
7457 if (
Op.isRegList()) {
7464bool ARMAsmParser::validatetLDMRegList(
const MCInst &Inst,
7466 unsigned MnemonicOpsEndInd,
7467 unsigned ListIndex,
bool IsARPop) {
7472 if (!IsARPop && ListContainsSP)
7475 "SP may not be in the register list");
7476 if (ListContainsPC && ListContainsLR)
7479 "PC and LR may not be in the register list simultaneously");
7483bool ARMAsmParser::validatetSTMRegList(
const MCInst &Inst,
7485 unsigned MnemonicOpsEndInd,
7486 unsigned ListIndex) {
7490 if (ListContainsSP && ListContainsPC)
7493 "SP and PC may not be in the register list");
7497 "SP may not be in the register list");
7501 "PC may not be in the register list");
7506 bool Load,
bool ARMMode,
bool Writeback,
7507 unsigned MnemonicOpsEndInd) {
7508 unsigned RtIndex =
Load || !Writeback ? 0 : 1;
7521 "Rt must be even-numbered");
7524 if (Rt2 != Rt + 1) {
7527 "destination operands must be sequential");
7530 "source operands must be sequential");
7537 if (!ARMMode && Load) {
7540 "destination operands can't be identical");
7546 if (Rn == Rt || Rn == Rt2) {
7549 "base register needs to be different from destination "
7552 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7553 "source register and base register can't be identical");
7576 ARMOperand &
Op =
static_cast<ARMOperand &
>(MCOp);
7582 const MCExpr *E = dyn_cast<MCExpr>(
Op.getImm());
7589bool ARMAsmParser::validateInstruction(
MCInst &Inst,
7591 unsigned MnemonicOpsEndInd) {
7601 return Error(Loc,
"instructions in IT block must be predicable");
7604 if (
Cond != currentITCond()) {
7608 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isCondCode())
7610 return Error(CondLoc,
"incorrect condition in IT block; got '" +
7612 "', but expected '" +
7621 return Error(Loc,
"predicated instructions must be in IT block");
7625 return Warning(Loc,
"predicated instructions should be in IT block");
7632 if (MCID.
operands()[i].isPredicate()) {
7634 return Error(Loc,
"instruction is not predicable");
7642 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7643 return Error(Loc,
"instruction must be outside of IT block or the last instruction in an IT block");
7647 unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7649 return Error(Loc,
"instruction in VPT block must be predicable");
7652 if (Pred != VPTPred) {
7655 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isVPTPred())
7657 return Error(PredLoc,
"incorrect predication in VPT block; got '" +
7659 "', but expected '" +
7666 return Error(Loc,
"VPT predicated instructions must be in VPT block");
7668 const unsigned Opcode = Inst.
getOpcode();
7673 case ARM::VLSTM_T2: {
7677 MnemonicOpsEndInd + 2) {
7678 ARMOperand &
Op =
static_cast<ARMOperand &
>(
7681 auto &RegList =
Op.getRegList();
7683 if (RegList.size() == 32 && !hasV8_1MMainline()) {
7684 return Error(
Op.getEndLoc(),
"T2 version requires v8.1-M.Main");
7687 if (hasD32() && RegList.size() != 32) {
7688 return Error(
Op.getEndLoc(),
"operand must be exactly {d0-d31}");
7691 if (!hasD32() && (RegList.size() != 16 && RegList.size() != 32)) {
7693 "operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)");
7709 return Error(Loc,
"unpredictable IT predicate sequence");
7713 if (validateLDRDSTRD(Inst,
Operands,
true,
true,
7714 false, MnemonicOpsEndInd))
7718 case ARM::LDRD_POST:
7719 if (validateLDRDSTRD(Inst,
Operands,
true,
true,
7720 true, MnemonicOpsEndInd))
7724 if (validateLDRDSTRD(Inst,
Operands,
true,
false,
7725 false, MnemonicOpsEndInd))
7728 case ARM::t2LDRD_PRE:
7729 case ARM::t2LDRD_POST:
7730 if (validateLDRDSTRD(Inst,
Operands,
true,
false,
7731 true, MnemonicOpsEndInd))
7737 if (RmReg == ARM::SP && !hasV8Ops())
7739 "r13 (SP) is an unpredictable operand to BXJ");
7743 if (validateLDRDSTRD(Inst,
Operands,
false,
true,
7744 false, MnemonicOpsEndInd))
7748 case ARM::STRD_POST:
7749 if (validateLDRDSTRD(Inst,
Operands,
false,
true,
7750 true, MnemonicOpsEndInd))
7753 case ARM::t2STRD_PRE:
7754 case ARM::t2STRD_POST:
7755 if (validateLDRDSTRD(Inst,
Operands,
false,
false,
7756 true, MnemonicOpsEndInd))
7759 case ARM::STR_PRE_IMM:
7760 case ARM::STR_PRE_REG:
7761 case ARM::t2STR_PRE:
7762 case ARM::STR_POST_IMM:
7763 case ARM::STR_POST_REG:
7764 case ARM::t2STR_POST:
7766 case ARM::t2STRH_PRE:
7767 case ARM::STRH_POST:
7768 case ARM::t2STRH_POST:
7769 case ARM::STRB_PRE_IMM:
7770 case ARM::STRB_PRE_REG:
7771 case ARM::t2STRB_PRE:
7772 case ARM::STRB_POST_IMM:
7773 case ARM::STRB_POST_REG:
7774 case ARM::t2STRB_POST: {
7780 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
7781 "source register and base register can't be identical");
7784 case ARM::t2LDR_PRE_imm:
7785 case ARM::t2LDR_POST_imm:
7786 case ARM::t2STR_PRE_imm:
7787 case ARM::t2STR_POST_imm: {
7794 "destination register and base register can't be identical");
7795 if (Inst.
getOpcode() == ARM::t2LDR_POST_imm ||
7796 Inst.
getOpcode() == ARM::t2STR_POST_imm) {
7798 if (Imm > 255 || Imm < -255)
7799 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7800 "operand must be in range [-255, 255]");
7802 if (Inst.
getOpcode() == ARM::t2STR_PRE_imm ||
7803 Inst.
getOpcode() == ARM::t2STR_POST_imm) {
7806 "operand must be a register in range [r0, r14]");
7812 case ARM::t2LDRB_OFFSET_imm:
7813 case ARM::t2LDRB_PRE_imm:
7814 case ARM::t2LDRB_POST_imm:
7815 case ARM::t2STRB_OFFSET_imm:
7816 case ARM::t2STRB_PRE_imm:
7817 case ARM::t2STRB_POST_imm: {
7818 if (Inst.
getOpcode() == ARM::t2LDRB_POST_imm ||
7819 Inst.
getOpcode() == ARM::t2STRB_POST_imm ||
7820 Inst.
getOpcode() == ARM::t2LDRB_PRE_imm ||
7821 Inst.
getOpcode() == ARM::t2STRB_PRE_imm) {
7823 if (Imm > 255 || Imm < -255)
7824 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7825 "operand must be in range [-255, 255]");
7826 }
else if (Inst.
getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7827 Inst.
getOpcode() == ARM::t2STRB_OFFSET_imm) {
7829 if (Imm > 0 || Imm < -255)
7830 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7831 "operand must be in range [0, 255] with a negative sign");
7835 "if operand is PC, should call the LDRB (literal)");
7840 case ARM::t2LDRH_OFFSET_imm:
7841 case ARM::t2LDRH_PRE_imm:
7842 case ARM::t2LDRH_POST_imm:
7843 case ARM::t2STRH_OFFSET_imm:
7844 case ARM::t2STRH_PRE_imm:
7845 case ARM::t2STRH_POST_imm: {
7846 if (Inst.
getOpcode() == ARM::t2LDRH_POST_imm ||
7847 Inst.
getOpcode() == ARM::t2STRH_POST_imm ||
7848 Inst.
getOpcode() == ARM::t2LDRH_PRE_imm ||
7849 Inst.
getOpcode() == ARM::t2STRH_PRE_imm) {
7851 if (Imm > 255 || Imm < -255)
7852 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7853 "operand must be in range [-255, 255]");
7854 }
else if (Inst.
getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7855 Inst.
getOpcode() == ARM::t2STRH_OFFSET_imm) {
7857 if (Imm > 0 || Imm < -255)
7858 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7859 "operand must be in range [0, 255] with a negative sign");
7863 "if operand is PC, should call the LDRH (literal)");
7868 case ARM::t2LDRSB_OFFSET_imm:
7869 case ARM::t2LDRSB_PRE_imm:
7870 case ARM::t2LDRSB_POST_imm: {
7871 if (Inst.
getOpcode() == ARM::t2LDRSB_POST_imm ||
7872 Inst.
getOpcode() == ARM::t2LDRSB_PRE_imm) {
7874 if (Imm > 255 || Imm < -255)
7875 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7876 "operand must be in range [-255, 255]");
7877 }
else if (Inst.
getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7879 if (Imm > 0 || Imm < -255)
7880 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7881 "operand must be in range [0, 255] with a negative sign");
7884 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7885 "if operand is PC, should call the LDRH (literal)");
7890 case ARM::t2LDRSH_OFFSET_imm:
7891 case ARM::t2LDRSH_PRE_imm:
7892 case ARM::t2LDRSH_POST_imm: {
7893 if (Inst.
getOpcode() == ARM::t2LDRSH_POST_imm ||
7894 Inst.
getOpcode() == ARM::t2LDRSH_PRE_imm) {
7896 if (Imm > 255 || Imm < -255)
7897 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7898 "operand must be in range [-255, 255]");
7899 }
else if (Inst.
getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7901 if (Imm > 0 || Imm < -255)
7902 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7903 "operand must be in range [0, 255] with a negative sign");
7907 "if operand is PC, should call the LDRH (literal)");
7912 case ARM::LDR_PRE_IMM:
7913 case ARM::LDR_PRE_REG:
7914 case ARM::t2LDR_PRE:
7915 case ARM::LDR_POST_IMM:
7916 case ARM::LDR_POST_REG:
7917 case ARM::t2LDR_POST:
7919 case ARM::t2LDRH_PRE:
7920 case ARM::LDRH_POST:
7921 case ARM::t2LDRH_POST:
7922 case ARM::LDRSH_PRE:
7923 case ARM::t2LDRSH_PRE:
7924 case ARM::LDRSH_POST:
7925 case ARM::t2LDRSH_POST:
7926 case ARM::LDRB_PRE_IMM:
7927 case ARM::LDRB_PRE_REG:
7928 case ARM::t2LDRB_PRE:
7929 case ARM::LDRB_POST_IMM:
7930 case ARM::LDRB_POST_REG:
7931 case ARM::t2LDRB_POST:
7932 case ARM::LDRSB_PRE:
7933 case ARM::t2LDRSB_PRE:
7934 case ARM::LDRSB_POST:
7935 case ARM::t2LDRSB_POST: {
7942 "destination register and base register can't be identical");
7946 case ARM::MVE_VLDRBU8_rq:
7947 case ARM::MVE_VLDRBU16_rq:
7948 case ARM::MVE_VLDRBS16_rq:
7949 case ARM::MVE_VLDRBU32_rq:
7950 case ARM::MVE_VLDRBS32_rq:
7951 case ARM::MVE_VLDRHU16_rq:
7952 case ARM::MVE_VLDRHU16_rq_u:
7953 case ARM::MVE_VLDRHU32_rq:
7954 case ARM::MVE_VLDRHU32_rq_u:
7955 case ARM::MVE_VLDRHS32_rq:
7956 case ARM::MVE_VLDRHS32_rq_u:
7957 case ARM::MVE_VLDRWU32_rq:
7958 case ARM::MVE_VLDRWU32_rq_u:
7959 case ARM::MVE_VLDRDU64_rq:
7960 case ARM::MVE_VLDRDU64_rq_u:
7961 case ARM::MVE_VLDRWU32_qi:
7962 case ARM::MVE_VLDRWU32_qi_pre:
7963 case ARM::MVE_VLDRDU64_qi:
7964 case ARM::MVE_VLDRDU64_qi_pre: {
7966 unsigned QdIdx = 0, QmIdx = 2;
7967 bool QmIsPointer =
false;
7969 case ARM::MVE_VLDRWU32_qi:
7970 case ARM::MVE_VLDRDU64_qi:
7974 case ARM::MVE_VLDRWU32_qi_pre:
7975 case ARM::MVE_VLDRDU64_qi_pre:
7986 Twine(
"destination vector register and vector ") +
7987 (QmIsPointer ?
"pointer" :
"offset") +
7988 " register can't be identical");
8000 if (Widthm1 >= 32 - LSB)
8001 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8002 "bitfield width must be in range [1,32-lsb]");
8014 bool HasWritebackToken =
8015 (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8017 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8018 .getToken() ==
"!");
8020 bool ListContainsBase;
8024 "registers must be in range r0-r7");
8026 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
8029 "writeback operator '!' expected");
8032 if (ListContainsBase && HasWritebackToken)
8033 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8034 "writeback operator '!' not allowed when base register "
8035 "in register list");
8037 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8041 case ARM::LDMIA_UPD:
8042 case ARM::LDMDB_UPD:
8043 case ARM::LDMIB_UPD:
8044 case ARM::LDMDA_UPD:
8051 "writeback register not allowed in register list");
8055 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8060 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8063 case ARM::t2LDMIA_UPD:
8064 case ARM::t2LDMDB_UPD:
8065 case ARM::t2STMIA_UPD:
8066 case ARM::t2STMDB_UPD:
8069 "writeback register not allowed in register list");
8071 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
8072 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8075 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8080 case ARM::sysLDMIA_UPD:
8081 case ARM::sysLDMDA_UPD:
8082 case ARM::sysLDMDB_UPD:
8083 case ARM::sysLDMIB_UPD:
8085 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8086 "writeback register only allowed on system LDM "
8087 "if PC in register-list");
8089 case ARM::sysSTMIA_UPD:
8090 case ARM::sysSTMDA_UPD:
8091 case ARM::sysSTMDB_UPD:
8092 case ARM::sysSTMIB_UPD:
8094 "system STM cannot have writeback register");
8099 bool ListContainsBase;
8103 "registers must be in range r0-r7 or pc");
8104 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 2, !isMClass()))
8109 bool ListContainsBase;
8113 "registers must be in range r0-r7 or lr");
8114 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 2))
8118 case ARM::tSTMIA_UPD: {
8119 bool ListContainsBase, InvalidLowList;
8121 0, ListContainsBase);
8122 if (InvalidLowList && !isThumbTwo())
8123 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8124 "registers must be in range r0-r7");
8128 if (InvalidLowList && ListContainsBase)
8130 "writeback operator '!' not allowed when base register "
8131 "in register list");
8133 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 4))
8140 if (!isThumbTwo() &&
8142 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8143 "source register must be the same as destination");
8153 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8154 "source register must be sp if destination is sp");
8159 if (!(
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]))
8160 .isSignedOffset<11, 1>())
8162 "branch target out of range");
8165 int op = (
Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8166 : MnemonicOpsEndInd + 1;
8167 ARMOperand &Operand =
static_cast<ARMOperand &
>(*
Operands[
op]);
8169 if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8170 !Operand.isSignedOffset<24, 1>())
8171 return Error(
Operands[
op]->getStartLoc(),
"branch target out of range");
8176 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd])
8177 .isSignedOffset<8, 1>())
8179 "branch target out of range");
8182 int Op = (
Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8183 : MnemonicOpsEndInd + 1;
8184 if (!
static_cast<ARMOperand &
>(*
Operands[
Op]).isSignedOffset<20, 1>())
8185 return Error(
Operands[
Op]->getStartLoc(),
"branch target out of range");
8190 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8191 .isUnsignedOffset<6, 1>())
8192 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8193 "branch target out of range");
8199 case ARM::t2MOVTi16:
8207 int i = (
Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8208 : MnemonicOpsEndInd + 1;
8209 ARMOperand &
Op =
static_cast<ARMOperand &
>(*
Operands[i]);
8212 const MCExpr *E = dyn_cast<MCExpr>(
Op.getImm());
8214 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
8219 "immediate expression for mov requires :lower16: or :upper16");
8225 return Error(
Op.getStartLoc(),
8226 "Immediate expression for Thumb adds requires :lower0_7:,"
8227 " :lower8_15:, :upper0_7: or :upper8_15:");
8233 return Error(
Op.getStartLoc(),
8234 "Immediate expression for Thumb movs requires :lower0_7:,"
8235 " :lower8_15:, :upper0_7: or :upper8_15:");
8244 if (Imm8 == 0x10 && Pred !=
ARMCC::AL && hasRAS())
8245 return Error(
Operands[1]->getStartLoc(),
"instruction 'esb' is not "
8246 "predicable, but condition "
8249 return Error(
Operands[1]->getStartLoc(),
"instruction 'csdb' is not "
8250 "predicable, but condition "
8258 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd])
8259 .isUnsignedOffset<4, 1>() ||
8262 "branch location out of range or not a multiple of 2");
8265 if (Opcode == ARM::t2BFi) {
8266 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8267 .isSignedOffset<16, 1>())
8269 "branch target out of range or not a multiple of 2");
8270 }
else if (Opcode == ARM::t2BFLi) {
8271 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8272 .isSignedOffset<18, 1>())
8274 "branch target out of range or not a multiple of 2");
8279 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd])
8280 .isUnsignedOffset<4, 1>() ||
8283 "branch location out of range or not a multiple of 2");
8285 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8286 .isSignedOffset<16, 1>())
8287 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8288 "branch target out of range or not a multiple of 2");
8291 "branch location and else branch target should either both be "
8292 "immediates or both labels");
8296 if (Diff != 4 && Diff != 2)
8299 "else branch target must be 2 or 4 greater than the branch location");
8306 !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8309 "invalid register in register list. Valid registers are "
8310 "r0-r12, lr/r14 and APSR.");
8327 "instruction 'ssbb' is not predicable, but condition code "
8331 "instruction 'pssbb' is not predicable, but condition code "
8335 case ARM::VMOVRRS: {
8340 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8341 "source operands must be sequential");
8344 case ARM::VMOVSRR: {
8350 "destination operands must be sequential");
8354 case ARM::VSTMDIA: {
8356 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]);
8357 auto &RegList =
Op.getRegList();
8358 if (RegList.size() < 1 || RegList.size() > 16)
8359 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8360 "list of registers must be at least 1 and at most 16");
8363 case ARM::MVE_VQDMULLs32bh:
8364 case ARM::MVE_VQDMULLs32th:
8365 case ARM::MVE_VCMULf32:
8366 case ARM::MVE_VMULLBs32:
8367 case ARM::MVE_VMULLTs32:
8368 case ARM::MVE_VMULLBu32:
8369 case ARM::MVE_VMULLTu32: {
8371 Operands[MnemonicOpsEndInd + 1]->getReg()) {
8373 "Qd register and Qn register can't be identical");
8378 "Qd register and Qm register can't be identical");
8382 case ARM::MVE_VREV64_8:
8383 case ARM::MVE_VREV64_16:
8384 case ARM::MVE_VREV64_32:
8385 case ARM::MVE_VQDMULL_qr_s32bh:
8386 case ARM::MVE_VQDMULL_qr_s32th: {
8390 "Qd register and Qn register can't be identical");
8394 case ARM::MVE_VCADDi32:
8395 case ARM::MVE_VCADDf32:
8396 case ARM::MVE_VHCADDs32: {
8400 "Qd register and Qm register can't be identical");
8404 case ARM::MVE_VMOV_rr_q: {
8407 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8408 "Q-registers must be the same");
8409 if (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 3])
8410 .getVectorIndex() !=
8411 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 5])
8414 return Error(
Operands[MnemonicOpsEndInd + 3]->getStartLoc(),
8415 "Q-register indexes must be 2 and 0 or 3 and 1");
8418 case ARM::MVE_VMOV_q_rr: {
8422 "Q-registers must be the same");
8423 if (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8424 .getVectorIndex() !=
8425 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 3])
8428 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8429 "Q-register indexes must be 2 and 0 or 3 and 1");
8432 case ARM::MVE_SQRSHR:
8433 case ARM::MVE_UQRSHL: {
8437 "Rda register and Rm register can't be identical");
8458 case ARM::t2SMLALBB:
8459 case ARM::t2SMLALBT:
8461 case ARM::t2SMLALDX:
8462 case ARM::t2SMLALTB:
8463 case ARM::t2SMLALTT:
8465 case ARM::t2SMLSLDX:
8466 case ARM::t2SMULL: {
8471 "unpredictable instruction, RdHi and RdLo must be different");
8479 case ARM::CDE_CX1DA:
8483 case ARM::CDE_CX2DA:
8487 case ARM::CDE_CX3DA:
8488 case ARM::CDE_VCX1_vec:
8489 case ARM::CDE_VCX1_fpsp:
8490 case ARM::CDE_VCX1_fpdp:
8491 case ARM::CDE_VCX1A_vec:
8492 case ARM::CDE_VCX1A_fpsp:
8493 case ARM::CDE_VCX1A_fpdp:
8494 case ARM::CDE_VCX2_vec:
8495 case ARM::CDE_VCX2_fpsp:
8496 case ARM::CDE_VCX2_fpdp:
8497 case ARM::CDE_VCX2A_vec:
8498 case ARM::CDE_VCX2A_fpsp:
8499 case ARM::CDE_VCX2A_fpdp:
8500 case ARM::CDE_VCX3_vec:
8501 case ARM::CDE_VCX3_fpsp:
8502 case ARM::CDE_VCX3_fpdp:
8503 case ARM::CDE_VCX3A_vec:
8504 case ARM::CDE_VCX3A_fpsp:
8505 case ARM::CDE_VCX3A_fpdp: {
8507 "CDE operand 1 must be a coprocessor ID");
8511 "coprocessor must be configured as CDE");
8512 else if (Coproc >= 8)
8514 "coprocessor must be in the range [p0, p7]");
8520 case ARM::t2LDC2L_OFFSET:
8521 case ARM::t2LDC2L_OPTION:
8522 case ARM::t2LDC2L_POST:
8523 case ARM::t2LDC2L_PRE:
8524 case ARM::t2LDC2_OFFSET:
8525 case ARM::t2LDC2_OPTION:
8526 case ARM::t2LDC2_POST:
8527 case ARM::t2LDC2_PRE:
8528 case ARM::t2LDCL_OFFSET:
8529 case ARM::t2LDCL_OPTION:
8530 case ARM::t2LDCL_POST:
8531 case ARM::t2LDCL_PRE:
8532 case ARM::t2LDC_OFFSET:
8533 case ARM::t2LDC_OPTION:
8534 case ARM::t2LDC_POST:
8535 case ARM::t2LDC_PRE:
8544 case ARM::t2STC2L_OFFSET:
8545 case ARM::t2STC2L_OPTION:
8546 case ARM::t2STC2L_POST:
8547 case ARM::t2STC2L_PRE:
8548 case ARM::t2STC2_OFFSET:
8549 case ARM::t2STC2_OPTION:
8550 case ARM::t2STC2_POST:
8551 case ARM::t2STC2_PRE:
8552 case ARM::t2STCL_OFFSET:
8553 case ARM::t2STCL_OPTION:
8554 case ARM::t2STCL_POST:
8555 case ARM::t2STCL_PRE:
8556 case ARM::t2STC_OFFSET:
8557 case ARM::t2STC_OPTION:
8558 case ARM::t2STC_POST:
8559 case ARM::t2STC_PRE: {
8564 if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8566 else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8569 "Operand must be a coprocessor ID");
8574 "coprocessor must be configured as GCP");
8586 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST1LNd8_UPD;
8587 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST1LNd16_UPD;
8588 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST1LNd32_UPD;
8589 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST1LNd8_UPD;
8590 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST1LNd16_UPD;
8591 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST1LNd32_UPD;
8592 case ARM::VST1LNdAsm_8: Spacing = 1;
return ARM::VST1LNd8;
8593 case ARM::VST1LNdAsm_16: Spacing = 1;
return ARM::VST1LNd16;
8594 case ARM::VST1LNdAsm_32: Spacing = 1;
return ARM::VST1LNd32;
8597 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST2LNd8_UPD;
8598 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST2LNd16_UPD;
8599 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST2LNd32_UPD;
8600 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2;
return ARM::VST2LNq16_UPD;
8601 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST2LNq32_UPD;
8603 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST2LNd8_UPD;
8604 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST2LNd16_UPD;
8605 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST2LNd32_UPD;
8606 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST2LNq16_UPD;
8607 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST2LNq32_UPD;
8609 case ARM::VST2LNdAsm_8: Spacing = 1;
return ARM::VST2LNd8;
8610 case ARM::VST2LNdAsm_16: Spacing = 1;
return ARM::VST2LNd16;
8611 case ARM::VST2LNdAsm_32: Spacing = 1;
return ARM::VST2LNd32;
8612 case ARM::VST2LNqAsm_16: Spacing = 2;
return ARM::VST2LNq16;
8613 case ARM::VST2LNqAsm_32: Spacing = 2;
return ARM::VST2LNq32;
8616 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST3LNd8_UPD;
8617 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3LNd16_UPD;
8618 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST3LNd32_UPD;
8619 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3LNq16_UPD;
8620 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST3LNq32_UPD;
8621 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST3LNd8_UPD;
8622 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST3LNd16_UPD;
8623 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST3LNd32_UPD;
8624 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST3LNq16_UPD;
8625 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST3LNq32_UPD;
8626 case ARM::VST3LNdAsm_8: Spacing = 1;
return ARM::VST3LNd8;
8627 case ARM::VST3LNdAsm_16: Spacing = 1;
return ARM::VST3LNd16;
8628 case ARM::VST3LNdAsm_32: Spacing = 1;
return ARM::VST3LNd32;
8629 case ARM::VST3LNqAsm_16: Spacing = 2;
return ARM::VST3LNq16;
8630 case ARM::VST3LNqAsm_32: Spacing = 2;
return ARM::VST3LNq32;
8633 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1;
return ARM::VST3d8_UPD;
8634 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3d16_UPD;
8635 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1;
return ARM::VST3d32_UPD;
8636 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2;
return ARM::VST3q8_UPD;
8637 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2;
return ARM::VST3q16_UPD;
8638 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2;
return ARM::VST3q32_UPD;
8639 case ARM::VST3dWB_register_Asm_8: Spacing = 1;
return ARM::VST3d8_UPD;
8640 case ARM::VST3dWB_register_Asm_16: Spacing = 1;
return ARM::VST3d16_UPD;
8641 case ARM::VST3dWB_register_Asm_32: Spacing = 1;
return ARM::VST3d32_UPD;
8642 case ARM::VST3qWB_register_Asm_8: Spacing = 2;
return ARM::VST3q8_UPD;
8643 case ARM::VST3qWB_register_Asm_16: Spacing = 2;
return ARM::VST3q16_UPD;
8644 case ARM::VST3qWB_register_Asm_32: Spacing = 2;
return ARM::VST3q32_UPD;
8645 case ARM::VST3dAsm_8: Spacing = 1;
return ARM::VST3d8;
8646 case ARM::VST3dAsm_16: Spacing = 1;
return ARM::VST3d16;
8647 case ARM::VST3dAsm_32: Spacing = 1;
return ARM::VST3d32;
8648 case ARM::VST3qAsm_8: Spacing = 2;
return ARM::VST3q8;
8649 case ARM::VST3qAsm_16: Spacing = 2;
return ARM::VST3q16;
8650 case ARM::VST3qAsm_32: Spacing = 2;
return ARM::VST3q32;
8653 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST4LNd8_UPD;
8654 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4LNd16_UPD;
8655 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST4LNd32_UPD;
8656 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4LNq16_UPD;
8657 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST4LNq32_UPD;
8658 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST4LNd8_UPD;
8659 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST4LNd16_UPD;
8660 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST4LNd32_UPD;
8661 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST4LNq16_UPD;
8662 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST4LNq32_UPD;
8663 case ARM::VST4LNdAsm_8: Spacing = 1;
return ARM::VST4LNd8;
8664 case ARM::VST4LNdAsm_16: Spacing = 1;
return ARM::VST4LNd16;
8665 case ARM::VST4LNdAsm_32: Spacing = 1;
return ARM::VST4LNd32;
8666 case ARM::VST4LNqAsm_16: Spacing = 2;
return ARM::VST4LNq16;
8667 case ARM::VST4LNqAsm_32: Spacing = 2;
return ARM::VST4LNq32;
8670 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1;
return ARM::VST4d8_UPD;
8671 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4d16_UPD;
8672 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1;
return ARM::VST4d32_UPD;
8673 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2;
return ARM::VST4q8_UPD;
8674 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2;
return ARM::VST4q16_UPD;
8675 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2;
return ARM::VST4q32_UPD;
8676 case ARM::VST4dWB_register_Asm_8: Spacing = 1;
return ARM::VST4d8_UPD;
8677 case ARM::VST4dWB_register_Asm_16: Spacing = 1;
return ARM::VST4d16_UPD;
8678 case ARM::VST4dWB_register_Asm_32: Spacing = 1;
return ARM::VST4d32_UPD;
8679 case ARM::VST4qWB_register_Asm_8: Spacing = 2;
return ARM::VST4q8_UPD;
8680 case ARM::VST4qWB_register_Asm_16: Spacing = 2;
return ARM::VST4q16_UPD;
8681 case ARM::VST4qWB_register_Asm_32: Spacing = 2;
return ARM::VST4q32_UPD;
8682 case ARM::VST4dAsm_8: Spacing = 1;
return ARM::VST4d8;
8683 case ARM::VST4dAsm_16: Spacing = 1;
return ARM::VST4d16;
8684 case ARM::VST4dAsm_32: Spacing = 1;
return ARM::VST4d32;
8685 case ARM::VST4qAsm_8: Spacing = 2;
return ARM::VST4q8;
8686 case ARM::VST4qAsm_16: Spacing = 2;
return ARM::VST4q16;
8687 case ARM::VST4qAsm_32: Spacing = 2;
return ARM::VST4q32;
8695 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD1LNd8_UPD;
8696 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD1LNd16_UPD;
8697 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD1LNd32_UPD;
8698 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD1LNd8_UPD;
8699 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD1LNd16_UPD;
8700 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD1LNd32_UPD;
8701 case ARM::VLD1LNdAsm_8: Spacing = 1;
return ARM::VLD1LNd8;
8702 case ARM::VLD1LNdAsm_16: Spacing = 1;
return ARM::VLD1LNd16;
8703 case ARM::VLD1LNdAsm_32: Spacing = 1;
return ARM::VLD1LNd32;
8706 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD2LNd8_UPD;
8707 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD2LNd16_UPD;
8708 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD2LNd32_UPD;
8709 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD2LNq16_UPD;
8710 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD2LNq32_UPD;
8711 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD2LNd8_UPD;
8712 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD2LNd16_UPD;
8713 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD2LNd32_UPD;
8714 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD2LNq16_UPD;
8715 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD2LNq32_UPD;
8716 case ARM::VLD2LNdAsm_8: Spacing = 1;
return ARM::VLD2LNd8;
8717 case ARM::VLD2LNdAsm_16: Spacing = 1;
return ARM::VLD2LNd16;
8718 case ARM::VLD2LNdAsm_32: Spacing = 1;
return ARM::VLD2LNd32;
8719 case ARM::VLD2LNqAsm_16: Spacing = 2;
return ARM::VLD2LNq16;
8720 case ARM::VLD2LNqAsm_32: Spacing = 2;
return ARM::VLD2LNq32;
8723 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3DUPd8_UPD;
8724 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3DUPd16_UPD;
8725 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3DUPd32_UPD;
8726 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3DUPq8_UPD;
8727 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD3DUPq16_UPD;
8728 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3DUPq32_UPD;
8729 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1;
return ARM::VLD3DUPd8_UPD;
8730 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1;
return ARM::VLD3DUPd16_UPD;
8731 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1;
return ARM::VLD3DUPd32_UPD;
8732 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2;
return ARM::VLD3DUPq8_UPD;
8733 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2;
return ARM::VLD3DUPq16_UPD;
8734 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2;
return ARM::VLD3DUPq32_UPD;
8735 case ARM::VLD3DUPdAsm_8: Spacing = 1;
return ARM::VLD3DUPd8;
8736 case ARM::VLD3DUPdAsm_16: Spacing = 1;
return ARM::VLD3DUPd16;
8737 case ARM::VLD3DUPdAsm_32: Spacing = 1;
return ARM::VLD3DUPd32;
8738 case ARM::VLD3DUPqAsm_8: Spacing = 2;
return ARM::VLD3DUPq8;
8739 case ARM::VLD3DUPqAsm_16: Spacing = 2;
return ARM::VLD3DUPq16;
8740 case ARM::VLD3DUPqAsm_32: Spacing = 2;
return ARM::VLD3DUPq32;
8743 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3LNd8_UPD;
8744 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3LNd16_UPD;
8745 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3LNd32_UPD;
8746 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3LNq16_UPD;
8747 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3LNq32_UPD;
8748 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD3LNd8_UPD;
8749 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD3LNd16_UPD;
8750 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD3LNd32_UPD;
8751 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD3LNq16_UPD;
8752 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD3LNq32_UPD;
8753 case ARM::VLD3LNdAsm_8: Spacing = 1;
return ARM::VLD3LNd8;
8754 case ARM::VLD3LNdAsm_16: Spacing = 1;
return ARM::VLD3LNd16;
8755 case ARM::VLD3LNdAsm_32: Spacing = 1;
return ARM::VLD3LNd32;
8756 case ARM::VLD3LNqAsm_16: Spacing = 2;
return ARM::VLD3LNq16;
8757 case ARM::VLD3LNqAsm_32: Spacing = 2;
return ARM::VLD3LNq32;
8760 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3d8_UPD;
8761 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3d16_UPD;
8762 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3d32_UPD;
8763 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2;
return ARM::VLD3q8_UPD;
8764 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD3q16_UPD;
8765 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3q32_UPD;
8766 case ARM::VLD3dWB_register_Asm_8: Spacing = 1;
return ARM::VLD3d8_UPD;
8767 case ARM::VLD3dWB_register_Asm_16: Spacing = 1;
return ARM::VLD3d16_UPD;
8768 case ARM::VLD3dWB_register_Asm_32: Spacing = 1;
return ARM::VLD3d32_UPD;
8769 case ARM::VLD3qWB_register_Asm_8: Spacing = 2;
return ARM::VLD3q8_UPD;
8770 case ARM::VLD3qWB_register_Asm_16: Spacing = 2;
return ARM::VLD3q16_UPD;
8771 case ARM::VLD3qWB_register_Asm_32: Spacing = 2;
return ARM::VLD3q32_UPD;
8772 case ARM::VLD3dAsm_8: Spacing = 1;
return ARM::VLD3d8;
8773 case ARM::VLD3dAsm_16: Spacing = 1;
return ARM::VLD3d16;
8774 case ARM::VLD3dAsm_32: Spacing = 1;
return ARM::VLD3d32;
8775 case ARM::VLD3qAsm_8: Spacing = 2;
return ARM::VLD3q8;
8776 case ARM::VLD3qAsm_16: Spacing = 2;
return ARM::VLD3q16;
8777 case ARM::VLD3qAsm_32: Spacing = 2;
return ARM::VLD3q32;
8780 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4LNd8_UPD;
8781 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4LNd16_UPD;
8782 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4LNd32_UPD;
8783 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD4LNq16_UPD;
8784 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4LNq32_UPD;
8785 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD4LNd8_UPD;
8786 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD4LNd16_UPD;
8787 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD4LNd32_UPD;
8788 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD4LNq16_UPD;
8789 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD4LNq32_UPD;
8790 case ARM::VLD4LNdAsm_8: Spacing = 1;
return ARM::VLD4LNd8;
8791 case ARM::VLD4LNdAsm_16: Spacing = 1;
return ARM::VLD4LNd16;
8792 case ARM::VLD4LNdAsm_32: Spacing = 1;
return ARM::VLD4LNd32;
8793 case ARM::VLD4LNqAsm_16: Spacing = 2;
return ARM::VLD4LNq16;
8794 case ARM::VLD4LNqAsm_32: Spacing = 2;
return ARM::VLD4LNq32;
8797 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4DUPd8_UPD;
8798 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4DUPd16_UPD;
8799 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4DUPd32_UPD;
8800 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4DUPq8_UPD;
8801 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4DUPq16_UPD;
8802 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4DUPq32_UPD;
8803 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1;
return ARM::VLD4DUPd8_UPD;
8804 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1;
return ARM::VLD4DUPd16_UPD;
8805 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1;
return ARM::VLD4DUPd32_UPD;
8806 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2;
return ARM::VLD4DUPq8_UPD;
8807 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2;
return ARM::VLD4DUPq16_UPD;
8808 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2;
return ARM::VLD4DUPq32_UPD;
8809 case ARM::VLD4DUPdAsm_8: Spacing = 1;
return ARM::VLD4DUPd8;
8810 case ARM::VLD4DUPdAsm_16: Spacing = 1;
return ARM::VLD4DUPd16;
8811 case ARM::VLD4DUPdAsm_32: Spacing = 1;
return ARM::VLD4DUPd32;
8812 case ARM::VLD4DUPqAsm_8: Spacing = 2;
return ARM::VLD4DUPq8;
8813 case ARM::VLD4DUPqAsm_16: Spacing = 2;
return ARM::VLD4DUPq16;
8814 case ARM::VLD4DUPqAsm_32: Spacing = 2;
return ARM::VLD4DUPq32;
8817 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4d8_UPD;
8818 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4d16_UPD;
8819 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4d32_UPD;
8820 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2;
return ARM::VLD4q8_UPD;
8821 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD4q16_UPD;
8822 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4q32_UPD;
8823 case ARM::VLD4dWB_register_Asm_8: Spacing = 1;
return ARM::VLD4d8_UPD;
8824 case ARM::VLD4dWB_register_Asm_16: Spacing = 1;
return ARM::VLD4d16_UPD;
8825 case ARM::VLD4dWB_register_Asm_32: Spacing = 1;
return ARM::VLD4d32_UPD;
8826 case ARM::VLD4qWB_register_Asm_8: Spacing = 2;
return ARM::VLD4q8_UPD;
8827 case ARM::VLD4qWB_register_Asm_16: Spacing = 2;
return ARM::VLD4q16_UPD;
8828 case ARM::VLD4qWB_register_Asm_32: Spacing = 2;
return ARM::VLD4q32_UPD;
8829 case ARM::VLD4dAsm_8: Spacing = 1;
return ARM::VLD4d8;
8830 case ARM::VLD4dAsm_16: Spacing = 1;
return ARM::VLD4d16;
8831 case ARM::VLD4dAsm_32: Spacing = 1;
return ARM::VLD4d32;
8832 case ARM::VLD4qAsm_8: Spacing = 2;
return ARM::VLD4q8;
8833 case ARM::VLD4qAsm_16: Spacing = 2;
return ARM::VLD4q16;
8834 case ARM::VLD4qAsm_32: Spacing = 2;
return ARM::VLD4q32;
8838bool ARMAsmParser::processInstruction(
MCInst &Inst,
8840 unsigned MnemonicOpsEndInd,
8844 bool HasWideQualifier =
false;
8846 ARMOperand &ARMOp =
static_cast<ARMOperand&
>(*Op);
8847 if (ARMOp.isToken() && ARMOp.getToken() ==
".w") {
8848 HasWideQualifier =
true;
8859 MnemonicOpsEndInd + 2) {
8860 ARMOperand &
Op =
static_cast<ARMOperand &
>(
8863 auto &RegList =
Op.getRegList();
8866 if (RegList.size() == 32) {
8867 const unsigned Opcode =
8868 (Inst.
getOpcode() == ARM::VLLDM) ? ARM::VLLDM_T2 : ARM::VLSTM_T2;
8882 case ARM::LDRT_POST:
8883 case ARM::LDRBT_POST: {
8884 const unsigned Opcode =
8885 (Inst.
getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8886 : ARM::LDRBT_POST_IMM;
8902 case ARM::LDRSHTii: {
8907 else if (Inst.
getOpcode() == ARM::LDRHTii)
8909 else if (Inst.
getOpcode() == ARM::LDRSHTii)
8920 case ARM::STRT_POST:
8921 case ARM::STRBT_POST: {
8922 const unsigned Opcode =
8923 (Inst.
getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
8924 : ARM::STRBT_POST_IMM;
8951 llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
8956 MCSymbol *Dot = getContext().createTempSymbol();
8975 case ARM::t2LDR_PRE_imm:
8976 case ARM::t2LDR_POST_imm: {
8989 case ARM::t2STR_PRE_imm:
8990 case ARM::t2STR_POST_imm: {
9003 case ARM::t2LDRB_OFFSET_imm: {
9013 case ARM::t2LDRB_PRE_imm:
9014 case ARM::t2LDRB_POST_imm: {
9018 : ARM::t2LDRB_POST);
9028 case ARM::t2STRB_OFFSET_imm: {
9038 case ARM::t2STRB_PRE_imm:
9039 case ARM::t2STRB_POST_imm: {
9043 : ARM::t2STRB_POST);
9053 case ARM::t2LDRH_OFFSET_imm: {
9063 case ARM::t2LDRH_PRE_imm:
9064 case ARM::t2LDRH_POST_imm: {
9068 : ARM::t2LDRH_POST);
9078 case ARM::t2STRH_OFFSET_imm: {
9088 case ARM::t2STRH_PRE_imm:
9089 case ARM::t2STRH_POST_imm: {
9093 : ARM::t2STRH_POST);
9103 case ARM::t2LDRSB_OFFSET_imm: {
9113 case ARM::t2LDRSB_PRE_imm:
9114 case ARM::t2LDRSB_POST_imm: {
9118 : ARM::t2LDRSB_POST);
9128 case ARM::t2LDRSH_OFFSET_imm: {
9138 case ARM::t2LDRSH_PRE_imm:
9139 case ARM::t2LDRSH_POST_imm: {
9143 : ARM::t2LDRSH_POST);
9153 case ARM::t2LDRpcrel:
9162 case ARM::t2LDRBpcrel:
9165 case ARM::t2LDRHpcrel:
9168 case ARM::t2LDRSBpcrel:
9171 case ARM::t2LDRSHpcrel:
9174 case ARM::LDRConstPool:
9175 case ARM::tLDRConstPool:
9176 case ARM::t2LDRConstPool: {
9181 if (Inst.
getOpcode() == ARM::LDRConstPool)
9183 else if (Inst.
getOpcode() == ARM::tLDRConstPool)
9185 else if (Inst.
getOpcode() == ARM::t2LDRConstPool)
9187 const ARMOperand &PoolOperand =
9188 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]);
9189 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9191 if (isa<MCConstantExpr>(SubExprVal) &&
9195 (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9197 bool MovHasS =
true;
9198 if (Inst.
getOpcode() == ARM::LDRConstPool) {
9208 else if (hasV6T2Ops() &&
9221 else if (hasThumb2() &&
9226 else if (hasV8MBaseline() &&
9247 getTargetStreamer().addConstantPoolEntry(SubExprVal,
9248 PoolOperand.getStartLoc());
9259 case ARM::VST1LNdWB_register_Asm_8:
9260 case ARM::VST1LNdWB_register_Asm_16:
9261 case ARM::VST1LNdWB_register_Asm_32: {
9279 case ARM::VST2LNdWB_register_Asm_8:
9280 case ARM::VST2LNdWB_register_Asm_16:
9281 case ARM::VST2LNdWB_register_Asm_32:
9282 case ARM::VST2LNqWB_register_Asm_16:
9283 case ARM::VST2LNqWB_register_Asm_32: {
9303 case ARM::VST3LNdWB_register_Asm_8:
9304 case ARM::VST3LNdWB_register_Asm_16:
9305 case ARM::VST3LNdWB_register_Asm_32:
9306 case ARM::VST3LNqWB_register_Asm_16:
9307 case ARM::VST3LNqWB_register_Asm_32: {
9329 case ARM::VST4LNdWB_register_Asm_8:
9330 case ARM::VST4LNdWB_register_Asm_16:
9331 case ARM::VST4LNdWB_register_Asm_32:
9332 case ARM::VST4LNqWB_register_Asm_16:
9333 case ARM::VST4LNqWB_register_Asm_32: {
9357 case ARM::VST1LNdWB_fixed_Asm_8:
9358 case ARM::VST1LNdWB_fixed_Asm_16:
9359 case ARM::VST1LNdWB_fixed_Asm_32: {
9377 case ARM::VST2LNdWB_fixed_Asm_8:
9378 case ARM::VST2LNdWB_fixed_Asm_16:
9379 case ARM::VST2LNdWB_fixed_Asm_32:
9380 case ARM::VST2LNqWB_fixed_Asm_16:
9381 case ARM::VST2LNqWB_fixed_Asm_32: {
9401 case ARM::VST3LNdWB_fixed_Asm_8:
9402 case ARM::VST3LNdWB_fixed_Asm_16:
9403 case ARM::VST3LNdWB_fixed_Asm_32:
9404 case ARM::VST3LNqWB_fixed_Asm_16:
9405 case ARM::VST3LNqWB_fixed_Asm_32: {
9427 case ARM::VST4LNdWB_fixed_Asm_8:
9428 case ARM::VST4LNdWB_fixed_Asm_16:
9429 case ARM::VST4LNdWB_fixed_Asm_32:
9430 case ARM::VST4LNqWB_fixed_Asm_16:
9431 case ARM::VST4LNqWB_fixed_Asm_32: {
9455 case ARM::VST1LNdAsm_8:
9456 case ARM::VST1LNdAsm_16:
9457 case ARM::VST1LNdAsm_32: {
9473 case ARM::VST2LNdAsm_8:
9474 case ARM::VST2LNdAsm_16:
9475 case ARM::VST2LNdAsm_32:
9476 case ARM::VST2LNqAsm_16:
9477 case ARM::VST2LNqAsm_32: {
9495 case ARM::VST3LNdAsm_8:
9496 case ARM::VST3LNdAsm_16:
9497 case ARM::VST3LNdAsm_32:
9498 case ARM::VST3LNqAsm_16:
9499 case ARM::VST3LNqAsm_32: {
9519 case ARM::VST4LNdAsm_8:
9520 case ARM::VST4LNdAsm_16:
9521 case ARM::VST4LNdAsm_32:
9522 case ARM::VST4LNqAsm_16:
9523 case ARM::VST4LNqAsm_32: {
9546 case ARM::VLD1LNdWB_register_Asm_8:
9547 case ARM::VLD1LNdWB_register_Asm_16:
9548 case ARM::VLD1LNdWB_register_Asm_32: {
9567 case ARM::VLD2LNdWB_register_Asm_8:
9568 case ARM::VLD2LNdWB_register_Asm_16:
9569 case ARM::VLD2LNdWB_register_Asm_32:
9570 case ARM::VLD2LNqWB_register_Asm_16:
9571 case ARM::VLD2LNqWB_register_Asm_32: {
9594 case ARM::VLD3LNdWB_register_Asm_8:
9595 case ARM::VLD3LNdWB_register_Asm_16:
9596 case ARM::VLD3LNdWB_register_Asm_32:
9597 case ARM::VLD3LNqWB_register_Asm_16:
9598 case ARM::VLD3LNqWB_register_Asm_32: {
9625 case ARM::VLD4LNdWB_register_Asm_8:
9626 case ARM::VLD4LNdWB_register_Asm_16:
9627 case ARM::VLD4LNdWB_register_Asm_32:
9628 case ARM::VLD4LNqWB_register_Asm_16:
9629 case ARM::VLD4LNqWB_register_Asm_32: {
9660 case ARM::VLD1LNdWB_fixed_Asm_8:
9661 case ARM::VLD1LNdWB_fixed_Asm_16:
9662 case ARM::VLD1LNdWB_fixed_Asm_32: {
9681 case ARM::VLD2LNdWB_fixed_Asm_8:
9682 case ARM::VLD2LNdWB_fixed_Asm_16:
9683 case ARM::VLD2LNdWB_fixed_Asm_32:
9684 case ARM::VLD2LNqWB_fixed_Asm_16:
9685 case ARM::VLD2LNqWB_fixed_Asm_32: {
9708 case ARM::VLD3LNdWB_fixed_Asm_8:
9709 case ARM::VLD3LNdWB_fixed_Asm_16:
9710 case ARM::VLD3LNdWB_fixed_Asm_32:
9711 case ARM::VLD3LNqWB_fixed_Asm_16:
9712 case ARM::VLD3LNqWB_fixed_Asm_32: {
9739 case ARM::VLD4LNdWB_fixed_Asm_8:
9740 case ARM::VLD4LNdWB_fixed_Asm_16:
9741 case ARM::VLD4LNdWB_fixed_Asm_32:
9742 case ARM::VLD4LNqWB_fixed_Asm_16:
9743 case ARM::VLD4LNqWB_fixed_Asm_32: {
9774 case ARM::VLD1LNdAsm_8:
9775 case ARM::VLD1LNdAsm_16:
9776 case ARM::VLD1LNdAsm_32: {
9793 case ARM::VLD2LNdAsm_8:
9794 case ARM::VLD2LNdAsm_16:
9795 case ARM::VLD2LNdAsm_32:
9796 case ARM::VLD2LNqAsm_16:
9797 case ARM::VLD2LNqAsm_32: {
9818 case ARM::VLD3LNdAsm_8:
9819 case ARM::VLD3LNdAsm_16:
9820 case ARM::VLD3LNdAsm_32:
9821 case ARM::VLD3LNqAsm_16:
9822 case ARM::VLD3LNqAsm_32: {
9847 case ARM::VLD4LNdAsm_8:
9848 case ARM::VLD4LNdAsm_16:
9849 case ARM::VLD4LNdAsm_32:
9850 case ARM::VLD4LNqAsm_16:
9851 case ARM::VLD4LNqAsm_32: {
9881 case ARM::VLD3DUPdAsm_8:
9882 case ARM::VLD3DUPdAsm_16:
9883 case ARM::VLD3DUPdAsm_32:
9884 case ARM::VLD3DUPqAsm_8:
9885 case ARM::VLD3DUPqAsm_16:
9886 case ARM::VLD3DUPqAsm_32: {
9903 case ARM::VLD3DUPdWB_fixed_Asm_8:
9904 case ARM::VLD3DUPdWB_fixed_Asm_16:
9905 case ARM::VLD3DUPdWB_fixed_Asm_32:
9906 case ARM::VLD3DUPqWB_fixed_Asm_8:
9907 case ARM::VLD3DUPqWB_fixed_Asm_16:
9908 case ARM::VLD3DUPqWB_fixed_Asm_32: {
9927 case ARM::VLD3DUPdWB_register_Asm_8:
9928 case ARM::VLD3DUPdWB_register_Asm_16:
9929 case ARM::VLD3DUPdWB_register_Asm_32:
9930 case ARM::VLD3DUPqWB_register_Asm_8:
9931 case ARM::VLD3DUPqWB_register_Asm_16:
9932 case ARM::VLD3DUPqWB_register_Asm_32: {
9952 case ARM::VLD3dAsm_8:
9953 case ARM::VLD3dAsm_16:
9954 case ARM::VLD3dAsm_32:
9955 case ARM::VLD3qAsm_8:
9956 case ARM::VLD3qAsm_16:
9957 case ARM::VLD3qAsm_32: {
9974 case ARM::VLD3dWB_fixed_Asm_8:
9975 case ARM::VLD3dWB_fixed_Asm_16:
9976 case ARM::VLD3dWB_fixed_Asm_32:
9977 case ARM::VLD3qWB_fixed_Asm_8:
9978 case ARM::VLD3qWB_fixed_Asm_16:
9979 case ARM::VLD3qWB_fixed_Asm_32: {
9998 case ARM::VLD3dWB_register_Asm_8:
9999 case ARM::VLD3dWB_register_Asm_16:
10000 case ARM::VLD3dWB_register_Asm_32:
10001 case ARM::VLD3qWB_register_Asm_8:
10002 case ARM::VLD3qWB_register_Asm_16:
10003 case ARM::VLD3qWB_register_Asm_32: {
10023 case ARM::VLD4DUPdAsm_8:
10024 case ARM::VLD4DUPdAsm_16:
10025 case ARM::VLD4DUPdAsm_32:
10026 case ARM::VLD4DUPqAsm_8:
10027 case ARM::VLD4DUPqAsm_16:
10028 case ARM::VLD4DUPqAsm_32: {
10047 case ARM::VLD4DUPdWB_fixed_Asm_8:
10048 case ARM::VLD4DUPdWB_fixed_Asm_16:
10049 case ARM::VLD4DUPdWB_fixed_Asm_32:
10050 case ARM::VLD4DUPqWB_fixed_Asm_8:
10051 case ARM::VLD4DUPqWB_fixed_Asm_16:
10052 case ARM::VLD4DUPqWB_fixed_Asm_32: {
10073 case ARM::VLD4DUPdWB_register_Asm_8:
10074 case ARM::VLD4DUPdWB_register_Asm_16:
10075 case ARM::VLD4DUPdWB_register_Asm_32:
10076 case ARM::VLD4DUPqWB_register_Asm_8:
10077 case ARM::VLD4DUPqWB_register_Asm_16:
10078 case ARM::VLD4DUPqWB_register_Asm_32: {
10100 case ARM::VLD4dAsm_8:
10101 case ARM::VLD4dAsm_16:
10102 case ARM::VLD4dAsm_32:
10103 case ARM::VLD4qAsm_8:
10104 case ARM::VLD4qAsm_16:
10105 case ARM::VLD4qAsm_32: {
10124 case ARM::VLD4dWB_fixed_Asm_8:
10125 case ARM::VLD4dWB_fixed_Asm_16:
10126 case ARM::VLD4dWB_fixed_Asm_32:
10127 case ARM::VLD4qWB_fixed_Asm_8:
10128 case ARM::VLD4qWB_fixed_Asm_16:
10129 case ARM::VLD4qWB_fixed_Asm_32: {
10150 case ARM::VLD4dWB_register_Asm_8:
10151 case ARM::VLD4dWB_register_Asm_16:
10152 case ARM::VLD4dWB_register_Asm_32:
10153 case ARM::VLD4qWB_register_Asm_8:
10154 case ARM::VLD4qWB_register_Asm_16:
10155 case ARM::VLD4qWB_register_Asm_32: {
10177 case ARM::VST3dAsm_8:
10178 case ARM::VST3dAsm_16:
10179 case ARM::VST3dAsm_32:
10180 case ARM::VST3qAsm_8:
10181 case ARM::VST3qAsm_16:
10182 case ARM::VST3qAsm_32: {
10199 case ARM::VST3dWB_fixed_Asm_8:
10200 case ARM::VST3dWB_fixed_Asm_16:
10201 case ARM::VST3dWB_fixed_Asm_32:
10202 case ARM::VST3qWB_fixed_Asm_8:
10203 case ARM::VST3qWB_fixed_Asm_16:
10204 case ARM::VST3qWB_fixed_Asm_32: {
10223 case ARM::VST3dWB_register_Asm_8:
10224 case ARM::VST3dWB_register_Asm_16:
10225 case ARM::VST3dWB_register_Asm_32:
10226 case ARM::VST3qWB_register_Asm_8:
10227 case ARM::VST3qWB_register_Asm_16:
10228 case ARM::VST3qWB_register_Asm_32: {
10248 case ARM::VST4dAsm_8:
10249 case ARM::VST4dAsm_16:
10250 case ARM::VST4dAsm_32:
10251 case ARM::VST4qAsm_8:
10252 case ARM::VST4qAsm_16:
10253 case ARM::VST4qAsm_32: {
10272 case ARM::VST4dWB_fixed_Asm_8:
10273 case ARM::VST4dWB_fixed_Asm_16:
10274 case ARM::VST4dWB_fixed_Asm_32:
10275 case ARM::VST4qWB_fixed_Asm_8:
10276 case ARM::VST4qWB_fixed_Asm_16:
10277 case ARM::VST4qWB_fixed_Asm_32: {
10298 case ARM::VST4dWB_register_Asm_8:
10299 case ARM::VST4dWB_register_Asm_16:
10300 case ARM::VST4dWB_register_Asm_32:
10301 case ARM::VST4qWB_register_Asm_8:
10302 case ARM::VST4qWB_register_Asm_16:
10303 case ARM::VST4qWB_register_Asm_32: {
10331 !HasWideQualifier) {
10335 case ARM::t2LSLri: NewOpc = ARM::tLSLri;
break;
10336 case ARM::t2LSRri: NewOpc = ARM::tLSRri;
break;
10337 case ARM::t2ASRri: NewOpc = ARM::tASRri;
break;
10355 case ARM::t2MOVSsr: {
10359 bool isNarrow =
false;
10364 inITBlock() == (Inst.
getOpcode() == ARM::t2MOVsr) &&
10371 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr;
break;
10372 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr;
break;
10373 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr;
break;
10374 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr;
break;
10380 Inst.
getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10387 Inst.
getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10392 case ARM::t2MOVSsi: {
10396 bool isNarrow =
false;
10399 inITBlock() == (Inst.
getOpcode() == ARM::t2MOVsi) &&
10406 bool isMov =
false;
10417 newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10421 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri;
break;
10422 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri;
break;
10423 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri;
break;
10424 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow =
false;
break;
10425 case ARM_AM::rrx: isNarrow =
false; newOpc = ARM::t2RRX;
break;
10428 if (Amount == 32) Amount = 0;
10431 if (isNarrow && !isMov)
10433 Inst.
getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10435 if (newOpc != ARM::t2RRX && !isMov)
10441 Inst.
getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10485 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10494 if (Opc == ARM::MOVsi)
10515 case ARM::t2LDMIA_UPD: {
10531 case ARM::t2STMDB_UPD: {
10547 case ARM::LDMIA_UPD:
10550 if (
static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"pop" &&
10565 case ARM::STMDB_UPD:
10568 if (
static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"push" &&
10581 case ARM::t2ADDri12:
10582 case ARM::t2SUBri12:
10583 case ARM::t2ADDspImm12:
10584 case ARM::t2SUBspImm12: {
10588 if ((Token !=
"add" && Token !=
"sub") ||
10592 case ARM::t2ADDri12:
10595 case ARM::t2SUBri12:
10598 case ARM::t2ADDspImm12:
10601 case ARM::t2SUBspImm12:
10616 Operands.size() == MnemonicOpsEndInd + 3) {
10627 Operands.size() == MnemonicOpsEndInd + 3) {
10633 case ARM::t2SUBri: {
10647 ARM::tADDi8 : ARM::tSUBi8);
10657 case ARM::t2ADDspImm:
10658 case ARM::t2SUBspImm: {
10663 if (V & 3 || V > ((1 << 7) - 1) << 2)
10676 case ARM::t2ADDrr: {
10740 case ARM::tLDMIA: {
10746 bool hasWritebackToken =
10747 (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
10749 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
10750 .getToken() ==
"!");
10751 bool listContainsBase;
10753 (!listContainsBase && !hasWritebackToken) ||
10754 (listContainsBase && hasWritebackToken)) {
10757 Inst.
setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10760 if (hasWritebackToken)
10767 case ARM::tSTMIA_UPD: {
10772 bool listContainsBase;
10782 bool listContainsBase;
10796 bool listContainsBase;
10813 !HasWideQualifier) {
10834 !HasWideQualifier) {
10841 if (
Op == ARM::tMOVr) {
10859 !HasWideQualifier) {
10863 case ARM::t2SXTH: NewOpc = ARM::tSXTH;
break;
10864 case ARM::t2SXTB: NewOpc = ARM::tSXTB;
break;
10865 case ARM::t2UXTH: NewOpc = ARM::tUXTH;
break;
10866 case ARM::t2UXTB: NewOpc = ARM::tUXTB;
break;
10904 case ARM::ADDrsi: {
10910 case ARM::ANDrsi: newOpc = ARM::ANDrr;
break;
10911 case ARM::ORRrsi: newOpc = ARM::ORRrr;
break;
10912 case ARM::EORrsi: newOpc = ARM::EORrr;
break;
10913 case ARM::BICrsi: newOpc = ARM::BICrr;
break;
10914 case ARM::SUBrsi: newOpc = ARM::SUBrr;
break;
10915 case ARM::ADDrsi: newOpc = ARM::ADDrr;
break;
10938 assert(!inITBlock() &&
"nested IT blocks?!");
10954 !HasWideQualifier) {
10958 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr;
break;
10959 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr;
break;
10960 case ARM::t2ASRrr: NewOpc = ARM::tASRrr;
break;
10961 case ARM::t2SBCrr: NewOpc = ARM::tSBC;
break;
10962 case ARM::t2RORrr: NewOpc = ARM::tROR;
break;
10963 case ARM::t2BICrr: NewOpc = ARM::tBIC;
break;
10990 !HasWideQualifier) {
10994 case ARM::t2ADCrr: NewOpc = ARM::tADC;
break;
10995 case ARM::t2ANDrr: NewOpc = ARM::tAND;
break;
10996 case ARM::t2EORrr: NewOpc = ARM::tEOR;
break;
10997 case ARM::t2ORRrr: NewOpc = ARM::tORR;
break;
11016 case ARM::MVE_VPST:
11017 case ARM::MVE_VPTv16i8:
11018 case ARM::MVE_VPTv8i16:
11019 case ARM::MVE_VPTv4i32:
11020 case ARM::MVE_VPTv16u8:
11021 case ARM::MVE_VPTv8u16:
11022 case ARM::MVE_VPTv4u32:
11023 case ARM::MVE_VPTv16s8:
11024 case ARM::MVE_VPTv8s16:
11025 case ARM::MVE_VPTv4s32:
11026 case ARM::MVE_VPTv4f32:
11027 case ARM::MVE_VPTv8f16:
11028 case ARM::MVE_VPTv16i8r:
11029 case ARM::MVE_VPTv8i16r:
11030 case ARM::MVE_VPTv4i32r:
11031 case ARM::MVE_VPTv16u8r:
11032 case ARM::MVE_VPTv8u16r:
11033 case ARM::MVE_VPTv4u32r:
11034 case ARM::MVE_VPTv16s8r:
11035 case ARM::MVE_VPTv8s16r:
11036 case ARM::MVE_VPTv4s32r:
11037 case ARM::MVE_VPTv4f32r:
11038 case ARM::MVE_VPTv8f16r: {
11039 assert(!inVPTBlock() &&
"Nested VPT blocks are not allowed");
11041 VPTState.Mask = MO.
getImm();
11042 VPTState.CurPosition = 0;
11050ARMAsmParser::checkEarlyTargetMatchPredicate(
MCInst &Inst,
11058 static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"nop" &&
11059 ((
isThumb() && !isThumbOne()) || hasV6MOps())) {
11060 return Match_MnemonicFail;
11065 return Match_Success;
11069unsigned ARMAsmParser::checkTargetMatchPredicate(
MCInst &Inst) {
11076 "optionally flag setting instruction missing optional def operand");
11078 "operand count mismatch!");
11079 bool IsCPSR =
false;
11081 for (
unsigned OpNo = 0; OpNo < MCID.
NumOperands; ++OpNo) {
11082 if (MCID.
operands()[OpNo].isOptionalDef() &&
11089 if (isThumbOne() && !IsCPSR)
11090 return Match_RequiresFlagSetting;
11093 if (isThumbTwo() && !IsCPSR && !inITBlock())
11094 return Match_RequiresITBlock;
11095 if (isThumbTwo() && IsCPSR && inITBlock())
11096 return Match_RequiresNotITBlock;
11098 if (Opc == ARM::tLSLri && Inst.
getOperand(3).
getImm() == 0 && inITBlock())
11099 return Match_RequiresNotITBlock;
11100 }
else if (isThumbOne()) {
11103 if (Opc == ARM::tADDhirr && !hasV6MOps() &&
11106 return Match_RequiresThumb2;
11108 else if (Opc == ARM::tMOVr && !hasV6Ops() &&
11111 return Match_RequiresV6;
11117 if (Opc == ARM::t2MOVr && !hasV8Ops())
11122 return Match_RequiresV8;
11127 return Match_RequiresV8;
11133 case ARM::VMRS_FPCXTS:
11134 case ARM::VMRS_FPCXTNS:
11135 case ARM::VMSR_FPCXTS:
11136 case ARM::VMSR_FPCXTNS:
11137 case ARM::VMRS_FPSCR_NZCVQC:
11138 case ARM::VMSR_FPSCR_NZCVQC:
11140 case ARM::VMRS_VPR:
11142 case ARM::VMSR_VPR:
11148 return Match_InvalidOperand;
11154 return Match_RequiresV8;
11162 return Match_InvalidTiedOperand;
11169 if (MCID.
operands()[
I].RegClass == ARM::rGPRRegClassID) {
11184 unsigned Reg =
Op.getReg();
11185 if ((Reg == ARM::SP) && !hasV8Ops())
11186 return Match_RequiresV8;
11187 else if (Reg == ARM::PC)
11188 return Match_InvalidOperand;
11191 return Match_Success;
11204bool ARMAsmParser::isITBlockTerminator(
MCInst &Inst)
const {
11223 bool MatchingInlineAsm,
11224 bool &EmitInITBlock,
11227 if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11228 return MatchInstructionImpl(
Operands, Inst, &NearMisses, MatchingInlineAsm);
11232 if (inImplicitITBlock()) {
11233 extendImplicitITBlock(ITState.Cond);
11234 if (MatchInstructionImpl(
Operands, Inst,
nullptr, MatchingInlineAsm) ==
11244 if (InstCond == ITCond) {
11245 EmitInITBlock =
true;
11246 return Match_Success;
11248 invertCurrentITCondition();
11249 EmitInITBlock =
true;
11250 return Match_Success;
11254 rewindImplicitITPosition();
11258 flushPendingInstructions(Out);
11259 unsigned PlainMatchResult =
11260 MatchInstructionImpl(
Operands, Inst, &NearMisses, MatchingInlineAsm);
11261 if (PlainMatchResult == Match_Success) {
11270 EmitInITBlock =
false;
11271 return Match_Success;
11274 EmitInITBlock =
false;
11275 return Match_Success;
11278 EmitInITBlock =
false;
11279 return Match_Success;
11286 startImplicitITBlock();
11287 if (MatchInstructionImpl(
Operands, Inst,
nullptr, MatchingInlineAsm) ==
11294 EmitInITBlock =
true;
11295 return Match_Success;
11298 discardImplicitITBlock();
11302 EmitInITBlock =
false;
11303 return PlainMatchResult;
11307 unsigned VariantID = 0);
11310bool ARMAsmParser::MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
11313 bool MatchingInlineAsm) {
11315 unsigned MatchResult;
11316 bool PendConditionalInstruction =
false;
11319 MatchResult = MatchInstruction(
Operands, Inst, NearMisses, MatchingInlineAsm,
11320 PendConditionalInstruction, Out);
11325 switch (MatchResult) {
11326 case Match_Success:
11333 if (validateInstruction(Inst,
Operands, MnemonicOpsEndInd)) {
11336 forwardITPosition();
11337 forwardVPTPosition();
11346 while (processInstruction(Inst,
Operands, MnemonicOpsEndInd, Out))
11355 forwardITPosition();
11356 forwardVPTPosition();
11364 if (PendConditionalInstruction) {
11365 PendingConditionalInsts.push_back(Inst);
11366 if (isITBlockFull() || isITBlockTerminator(Inst))
11367 flushPendingInstructions(Out);
11372 case Match_NearMisses:
11373 ReportNearMisses(NearMisses, IDLoc,
Operands);
11375 case Match_MnemonicFail: {
11376 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11378 ((ARMOperand &)*
Operands[0]).getToken(), FBS);
11379 return Error(IDLoc,
"invalid instruction" + Suggestion,
11380 ((ARMOperand &)*
Operands[0]).getLocRange());
11388bool ARMAsmParser::ParseDirective(
AsmToken DirectiveID) {
11394 if (IDVal ==
".word")
11395 parseLiteralValues(4, DirectiveID.
getLoc());
11396 else if (IDVal ==
".short" || IDVal ==
".hword")
11397 parseLiteralValues(2, DirectiveID.
getLoc());
11398 else if (IDVal ==
".thumb")
11399 parseDirectiveThumb(DirectiveID.
getLoc());
11400 else if (IDVal ==
".arm")
11401 parseDirectiveARM(DirectiveID.
getLoc());
11402 else if (IDVal ==
".thumb_func")
11403 parseDirectiveThumbFunc(DirectiveID.
getLoc());
11404 else if (IDVal ==
".code")
11405 parseDirectiveCode(DirectiveID.
getLoc());
11406 else if (IDVal ==
".syntax")
11407 parseDirectiveSyntax(DirectiveID.
getLoc());
11408 else if (IDVal ==
".unreq")
11409 parseDirectiveUnreq(DirectiveID.
getLoc());
11410 else if (IDVal ==
".fnend")
11411 parseDirectiveFnEnd(DirectiveID.
getLoc());
11412 else if (IDVal ==
".cantunwind")
11413 parseDirectiveCantUnwind(DirectiveID.
getLoc());
11414 else if (IDVal ==
".personality")
11415 parseDirectivePersonality(DirectiveID.
getLoc());
11416 else if (IDVal ==
".handlerdata")
11417 parseDirectiveHandlerData(DirectiveID.
getLoc());
11418 else if (IDVal ==
".setfp")
11419 parseDirectiveSetFP(DirectiveID.
getLoc());
11420 else if (IDVal ==
".pad")
11421 parseDirectivePad(DirectiveID.
getLoc());
11422 else if (IDVal ==
".save")
11423 parseDirectiveRegSave(DirectiveID.
getLoc(),
false);
11424 else if (IDVal ==
".vsave")
11425 parseDirectiveRegSave(DirectiveID.
getLoc(),
true);
11426 else if (IDVal ==
".ltorg" || IDVal ==
".pool")
11427 parseDirectiveLtorg(DirectiveID.
getLoc());
11428 else if (IDVal ==
".even")
11429 parseDirectiveEven(DirectiveID.
getLoc());
11430 else if (IDVal ==
".personalityindex")
11431 parseDirectivePersonalityIndex(DirectiveID.
getLoc());
11432 else if (IDVal ==
".unwind_raw")
11433 parseDirectiveUnwindRaw(DirectiveID.
getLoc());
11434 else if (IDVal ==
".movsp")
11435 parseDirectiveMovSP(DirectiveID.
getLoc());
11436 else if (IDVal ==
".arch_extension")
11437 parseDirectiveArchExtension(DirectiveID.
getLoc());
11438 else if (IDVal ==
".align")
11439 return parseDirectiveAlign(DirectiveID.
getLoc());
11440 else if (IDVal ==
".thumb_set")
11441 parseDirectiveThumbSet(DirectiveID.
getLoc());
11442 else if (IDVal ==
".inst")
11443 parseDirectiveInst(DirectiveID.
getLoc());
11444 else if (IDVal ==
".inst.n")
11445 parseDirectiveInst(DirectiveID.
getLoc(),
'n');
11446 else if (IDVal ==
".inst.w")
11447 parseDirectiveInst(DirectiveID.
getLoc(),
'w');
11448 else if (!IsMachO && !IsCOFF) {
11449 if (IDVal ==
".arch")
11450 parseDirectiveArch(DirectiveID.
getLoc());
11451 else if (IDVal ==
".cpu")
11452 parseDirectiveCPU(DirectiveID.
getLoc());
11453 else if (IDVal ==
".eabi_attribute")
11454 parseDirectiveEabiAttr(DirectiveID.
getLoc());
11455 else if (IDVal ==
".fpu")
11456 parseDirectiveFPU(DirectiveID.
getLoc());
11457 else if (IDVal ==
".fnstart")
11458 parseDirectiveFnStart(DirectiveID.
getLoc());
11459 else if (IDVal ==
".object_arch")
11460 parseDirectiveObjectArch(DirectiveID.
getLoc());
11461 else if (IDVal ==
".tlsdescseq")
11462 parseDirectiveTLSDescSeq(DirectiveID.
getLoc());
11465 }
else if (IsCOFF) {
11466 if (IDVal ==
".seh_stackalloc")
11467 parseDirectiveSEHAllocStack(DirectiveID.
getLoc(),
false);
11468 else if (IDVal ==
".seh_stackalloc_w")
11469 parseDirectiveSEHAllocStack(DirectiveID.
getLoc(),
true);
11470 else if (IDVal ==
".seh_save_regs")
11471 parseDirectiveSEHSaveRegs(DirectiveID.
getLoc(),
false);
11472 else if (IDVal ==
".seh_save_regs_w")
11473 parseDirectiveSEHSaveRegs(DirectiveID.
getLoc(),
true);
11474 else if (IDVal ==
".seh_save_sp")
11475 parseDirectiveSEHSaveSP(DirectiveID.
getLoc());
11476 else if (IDVal ==
".seh_save_fregs")
11477 parseDirectiveSEHSaveFRegs(DirectiveID.
getLoc());
11478 else if (IDVal ==
".seh_save_lr")
11479 parseDirectiveSEHSaveLR(DirectiveID.
getLoc());
11480 else if (IDVal ==
".seh_endprologue")
11481 parseDirectiveSEHPrologEnd(DirectiveID.
getLoc(),
false);
11482 else if (IDVal ==
".seh_endprologue_fragment")
11483 parseDirectiveSEHPrologEnd(DirectiveID.
getLoc(),
true);
11484 else if (IDVal ==
".seh_nop")
11485 parseDirectiveSEHNop(DirectiveID.
getLoc(),
false);
11486 else if (IDVal ==
".seh_nop_w")
11487 parseDirectiveSEHNop(DirectiveID.
getLoc(),
true);
11488 else if (IDVal ==
".seh_startepilogue")
11489 parseDirectiveSEHEpilogStart(DirectiveID.
getLoc(),
false);
11490 else if (IDVal ==
".seh_startepilogue_cond")
11491 parseDirectiveSEHEpilogStart(DirectiveID.
getLoc(),
true);
11492 else if (IDVal ==
".seh_endepilogue")
11493 parseDirectiveSEHEpilogEnd(DirectiveID.
getLoc());
11494 else if (IDVal ==
".seh_custom")
11495 parseDirectiveSEHCustom(DirectiveID.
getLoc());
11507bool ARMAsmParser::parseLiteralValues(
unsigned Size,
SMLoc L) {
11508 auto parseOne = [&]() ->
bool {
11510 if (getParser().parseExpression(
Value))
11512 getParser().getStreamer().emitValue(
Value,
Size, L);
11515 return (parseMany(parseOne));
11520bool ARMAsmParser::parseDirectiveThumb(
SMLoc L) {
11521 if (parseEOL() ||
check(!hasThumb(), L,
"target does not support Thumb mode"))
11527 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11528 getParser().getStreamer().emitCodeAlignment(
Align(2), &getSTI(), 0);
11534bool ARMAsmParser::parseDirectiveARM(
SMLoc L) {
11535 if (parseEOL() ||
check(!hasARM(), L,
"target does not support ARM mode"))
11540 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
11541 getParser().getStreamer().emitCodeAlignment(
Align(4), &getSTI(), 0);
11576void ARMAsmParser::doBeforeLabelEmit(
MCSymbol *Symbol,
SMLoc IDLoc) {
11579 flushPendingInstructions(getStreamer());
11582void ARMAsmParser::onLabelParsed(
MCSymbol *Symbol) {
11583 if (NextSymbolIsThumb) {
11584 getParser().getStreamer().emitThumbFunc(Symbol);
11585 NextSymbolIsThumb =
false;
11591bool ARMAsmParser::parseDirectiveThumbFunc(
SMLoc L) {
11593 const auto Format = getContext().getObjectFileType();
11602 MCSymbol *
Func = getParser().getContext().getOrCreateSymbol(
11604 getParser().getStreamer().emitThumbFunc(Func);
11619 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11621 NextSymbolIsThumb =
true;
11627bool ARMAsmParser::parseDirectiveSyntax(
SMLoc L) {
11631 Error(L,
"unexpected token in .syntax directive");
11637 if (
check(Mode ==
"divided" || Mode ==
"DIVIDED", L,
11638 "'.syntax divided' arm assembly not supported") ||
11639 check(Mode !=
"unified" && Mode !=
"UNIFIED", L,
11640 "unrecognized syntax mode in .syntax directive") ||
11651bool ARMAsmParser::parseDirectiveCode(
SMLoc L) {
11655 return Error(L,
"unexpected token in .code directive");
11657 if (Val != 16 && Val != 32) {
11658 Error(L,
"invalid operand to .code directive");
11668 return Error(L,
"target does not support Thumb mode");
11672 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11675 return Error(L,
"target does not support ARM mode");
11679 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
11691 SMLoc SRegLoc, ERegLoc;
11692 if (
check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11693 "register name expected") ||
11697 if (RegisterReqs.
insert(std::make_pair(
Name, Reg)).first->second != Reg)
11698 return Error(SRegLoc,
11699 "redefinition of '" +
Name +
"' does not match original.");
11706bool ARMAsmParser::parseDirectiveUnreq(
SMLoc L) {
11709 return Error(L,
"unexpected input in .unreq directive.");
11718void ARMAsmParser::FixModeAfterArchChange(
bool WasThumb,
SMLoc Loc) {
11720 if (WasThumb && hasThumb()) {
11723 }
else if (!WasThumb && hasARM()) {
11734 (WasThumb ?
"thumb" :
"arm") +
" mode, switching to " +
11735 (!WasThumb ?
"thumb" :
"arm") +
" mode");
11742bool ARMAsmParser::parseDirectiveArch(
SMLoc L) {
11743 StringRef Arch = getParser().parseStringToEndOfStatement().
trim();
11746 if (
ID == ARM::ArchKind::INVALID)
11747 return Error(L,
"Unknown arch name");
11754 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11755 FixModeAfterArchChange(WasThumb, L);
11757 getTargetStreamer().emitArch(
ID);
11764bool ARMAsmParser::parseDirectiveEabiAttr(
SMLoc L) {
11774 Error(TagLoc,
"attribute name not recognised: " +
Name);
11787 if (
check(!CE, TagLoc,
"expected numeric constant"))
11790 Tag =
CE->getValue();
11797 bool IsStringValue =
false;
11799 int64_t IntegerValue = 0;
11800 bool IsIntegerValue =
false;
11803 IsStringValue =
true;
11805 IsStringValue =
true;
11806 IsIntegerValue =
true;
11807 }
else if (
Tag < 32 ||
Tag % 2 == 0)
11808 IsIntegerValue =
true;
11809 else if (
Tag % 2 == 1)
11810 IsStringValue =
true;
11814 if (IsIntegerValue) {
11815 const MCExpr *ValueExpr;
11822 return Error(ValueExprLoc,
"expected numeric constant");
11823 IntegerValue =
CE->getValue();
11831 std::string EscapedValue;
11832 if (IsStringValue) {
11840 StringValue = EscapedValue;
11850 if (IsIntegerValue && IsStringValue) {
11852 getTargetStreamer().emitIntTextAttribute(
Tag, IntegerValue, StringValue);
11853 }
else if (IsIntegerValue)
11854 getTargetStreamer().emitAttribute(
Tag, IntegerValue);
11855 else if (IsStringValue)
11856 getTargetStreamer().emitTextAttribute(
Tag, StringValue);
11862bool ARMAsmParser::parseDirectiveCPU(
SMLoc L) {
11863 StringRef CPU = getParser().parseStringToEndOfStatement().
trim();
11868 if (!getSTI().isCPUStringValid(CPU))
11869 return Error(L,
"Unknown CPU name");
11874 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11875 FixModeAfterArchChange(WasThumb, L);
11882bool ARMAsmParser::parseDirectiveFPU(
SMLoc L) {
11883 SMLoc FPUNameLoc = getTok().getLoc();
11884 StringRef FPU = getParser().parseStringToEndOfStatement().
trim();
11887 std::vector<StringRef> Features;
11889 return Error(FPUNameLoc,
"Unknown FPU name");
11892 for (
auto Feature : Features)
11894 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11896 getTargetStreamer().emitFPU(
ID);
11902bool ARMAsmParser::parseDirectiveFnStart(
SMLoc L) {
11906 if (UC.hasFnStart()) {
11907 Error(L,
".fnstart starts before the end of previous one");
11908 UC.emitFnStartLocNotes();
11915 getTargetStreamer().emitFnStart();
11917 UC.recordFnStart(L);
11923bool ARMAsmParser::parseDirectiveFnEnd(
SMLoc L) {
11927 if (!UC.hasFnStart())
11928 return Error(L,
".fnstart must precede .fnend directive");
11931 getTargetStreamer().emitFnEnd();
11939bool ARMAsmParser::parseDirectiveCantUnwind(
SMLoc L) {
11943 UC.recordCantUnwind(L);
11945 if (
check(!UC.hasFnStart(), L,
".fnstart must precede .cantunwind directive"))
11948 if (UC.hasHandlerData()) {
11949 Error(L,
".cantunwind can't be used with .handlerdata directive");
11950 UC.emitHandlerDataLocNotes();
11953 if (UC.hasPersonality()) {
11954 Error(L,
".cantunwind can't be used with .personality directive");
11955 UC.emitPersonalityLocNotes();
11959 getTargetStreamer().emitCantUnwind();
11965bool ARMAsmParser::parseDirectivePersonality(
SMLoc L) {
11967 bool HasExistingPersonality = UC.hasPersonality();
11971 return Error(L,
"unexpected input in .personality directive.");
11978 UC.recordPersonality(L);
11981 if (!UC.hasFnStart())
11982 return Error(L,
".fnstart must precede .personality directive");
11983 if (UC.cantUnwind()) {
11984 Error(L,
".personality can't be used with .cantunwind directive");
11985 UC.emitCantUnwindLocNotes();
11988 if (UC.hasHandlerData()) {
11989 Error(L,
".personality must precede .handlerdata directive");
11990 UC.emitHandlerDataLocNotes();
11993 if (HasExistingPersonality) {
11994 Error(L,
"multiple personality directives");
11995 UC.emitPersonalityLocNotes();
11999 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(
Name);
12000 getTargetStreamer().emitPersonality(PR);
12006bool ARMAsmParser::parseDirectiveHandlerData(
SMLoc L) {
12010 UC.recordHandlerData(L);
12012 if (!UC.hasFnStart())
12013 return Error(L,
".fnstart must precede .personality directive");
12014 if (UC.cantUnwind()) {
12015 Error(L,
".handlerdata can't be used with .cantunwind directive");
12016 UC.emitCantUnwindLocNotes();
12020 getTargetStreamer().emitHandlerData();
12026bool ARMAsmParser::parseDirectiveSetFP(
SMLoc L) {
12029 if (
check(!UC.hasFnStart(), L,
".fnstart must precede .setfp directive") ||
12030 check(UC.hasHandlerData(), L,
12031 ".setfp must precede .handlerdata directive"))
12036 int FPReg = tryParseRegister();
12038 if (
check(FPReg == -1, FPRegLoc,
"frame pointer register expected") ||
12044 int SPReg = tryParseRegister();
12045 if (
check(SPReg == -1, SPRegLoc,
"stack pointer register expected") ||
12046 check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
12047 "register should be either $sp or the latest fp register"))
12051 UC.saveFPReg(FPReg);
12061 const MCExpr *OffsetExpr;
12064 if (getParser().parseExpression(OffsetExpr, EndLoc))
12065 return Error(ExLoc,
"malformed setfp offset");
12067 if (
check(!CE, ExLoc,
"setfp offset must be an immediate"))
12075 getTargetStreamer().emitSetFP(
static_cast<unsigned>(FPReg),
12076 static_cast<unsigned>(SPReg),
Offset);
12082bool ARMAsmParser::parseDirectivePad(
SMLoc L) {
12085 if (!UC.hasFnStart())
12086 return Error(L,
".fnstart must precede .pad directive");
12087 if (UC.hasHandlerData())
12088 return Error(L,
".pad must precede .handlerdata directive");
12096 const MCExpr *OffsetExpr;
12099 if (getParser().parseExpression(OffsetExpr, EndLoc))
12100 return Error(ExLoc,
"malformed pad offset");
12103 return Error(ExLoc,
"pad offset must be an immediate");
12108 getTargetStreamer().emitPad(
CE->getValue());
12115bool ARMAsmParser::parseDirectiveRegSave(
SMLoc L,
bool IsVector) {
12117 if (!UC.hasFnStart())
12118 return Error(L,
".fnstart must precede .save or .vsave directives");
12119 if (UC.hasHandlerData())
12120 return Error(L,
".save or .vsave must precede .handlerdata directive");
12126 if (parseRegisterList(
Operands,
true,
true) || parseEOL())
12128 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12129 if (!IsVector && !
Op.isRegList())
12130 return Error(L,
".save expects GPR registers");
12131 if (IsVector && !
Op.isDPRRegList())
12132 return Error(L,
".vsave expects DPR registers");
12134 getTargetStreamer().emitRegSave(
Op.getRegList(), IsVector);
12142bool ARMAsmParser::parseDirectiveInst(
SMLoc Loc,
char Suffix) {
12158 return Error(Loc,
"width suffixes are invalid in ARM mode");
12161 auto parseOne = [&]() ->
bool {
12163 if (getParser().parseExpression(Expr))
12167 return Error(Loc,
"expected constant expression");
12170 char CurSuffix = Suffix;
12173 if (
Value->getValue() > 0xffff)
12174 return Error(Loc,
"inst.n operand is too big, use inst.w instead");
12177 if (
Value->getValue() > 0xffffffff)
12179 " operand is too big");
12183 if (
Value->getValue() < 0xe800)
12185 else if (
Value->getValue() >= 0xe8000000)
12188 return Error(Loc,
"cannot determine Thumb instruction size, "
12189 "use inst.n/inst.w instead");
12195 getTargetStreamer().emitInst(
Value->getValue(), CurSuffix);
12196 forwardITPosition();
12197 forwardVPTPosition();
12202 return Error(Loc,
"expected expression following directive");
12203 if (parseMany(parseOne))
12210bool ARMAsmParser::parseDirectiveLtorg(
SMLoc L) {
12213 getTargetStreamer().emitCurrentConstantPool();
12217bool ARMAsmParser::parseDirectiveEven(
SMLoc L) {
12224 getStreamer().initSections(
false, getSTI());
12225 Section = getStreamer().getCurrentSectionOnly();
12228 assert(Section &&
"must have section to emit alignment");
12230 getStreamer().emitCodeAlignment(
Align(2), &getSTI());
12232 getStreamer().emitValueToAlignment(
Align(2));
12239bool ARMAsmParser::parseDirectivePersonalityIndex(
SMLoc L) {
12241 bool HasExistingPersonality = UC.hasPersonality();
12243 const MCExpr *IndexExpression;
12249 UC.recordPersonalityIndex(L);
12251 if (!UC.hasFnStart()) {
12252 return Error(L,
".fnstart must precede .personalityindex directive");
12254 if (UC.cantUnwind()) {
12255 Error(L,
".personalityindex cannot be used with .cantunwind");
12256 UC.emitCantUnwindLocNotes();
12259 if (UC.hasHandlerData()) {
12260 Error(L,
".personalityindex must precede .handlerdata directive");
12261 UC.emitHandlerDataLocNotes();
12264 if (HasExistingPersonality) {
12265 Error(L,
"multiple personality directives");
12266 UC.emitPersonalityLocNotes();
12272 return Error(IndexLoc,
"index must be a constant number");
12274 return Error(IndexLoc,
12275 "personality routine index should be in range [0-3]");
12277 getTargetStreamer().emitPersonalityIndex(
CE->getValue());
12283bool ARMAsmParser::parseDirectiveUnwindRaw(
SMLoc L) {
12286 const MCExpr *OffsetExpr;
12287 SMLoc OffsetLoc = getLexer().getLoc();
12289 if (!UC.hasFnStart())
12290 return Error(L,
".fnstart must precede .unwind_raw directives");
12291 if (getParser().parseExpression(OffsetExpr))
12292 return Error(OffsetLoc,
"expected expression");
12296 return Error(OffsetLoc,
"offset must be a constant");
12305 auto parseOne = [&]() ->
bool {
12306 const MCExpr *OE =
nullptr;
12307 SMLoc OpcodeLoc = getLexer().getLoc();
12310 OpcodeLoc,
"expected opcode expression"))
12314 return Error(OpcodeLoc,
"opcode value must be a constant");
12315 const int64_t Opcode =
OC->getValue();
12316 if (Opcode & ~0xff)
12317 return Error(OpcodeLoc,
"invalid opcode");
12323 SMLoc OpcodeLoc = getLexer().getLoc();
12325 return Error(OpcodeLoc,
"expected opcode expression");
12326 if (parseMany(parseOne))
12329 getTargetStreamer().emitUnwindRaw(
StackOffset, Opcodes);
12335bool ARMAsmParser::parseDirectiveTLSDescSeq(
SMLoc L) {
12339 return TokError(
"expected variable after '.tlsdescseq' directive");
12349 getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12355bool ARMAsmParser::parseDirectiveMovSP(
SMLoc L) {
12357 if (!UC.hasFnStart())
12358 return Error(L,
".fnstart must precede .movsp directives");
12359 if (UC.getFPReg() != ARM::SP)
12360 return Error(L,
"unexpected .movsp directive");
12363 int SPReg = tryParseRegister();
12365 return Error(SPRegLoc,
"register expected");
12366 if (SPReg == ARM::SP || SPReg == ARM::PC)
12367 return Error(SPRegLoc,
"sp and pc are not permitted in .movsp directive");
12374 const MCExpr *OffsetExpr;
12378 return Error(OffsetLoc,
"malformed offset expression");
12382 return Error(OffsetLoc,
"offset must be an immediate constant");
12390 getTargetStreamer().emitMovSP(SPReg,
Offset);
12391 UC.saveFPReg(SPReg);
12398bool ARMAsmParser::parseDirectiveObjectArch(
SMLoc L) {
12401 return Error(getLexer().getLoc(),
"unexpected token");
12409 if (
ID == ARM::ArchKind::INVALID)
12410 return Error(ArchLoc,
"unknown architecture '" + Arch +
"'");
12414 getTargetStreamer().emitObjectArch(
ID);
12420bool ARMAsmParser::parseDirectiveAlign(
SMLoc L) {
12426 assert(Section &&
"must have section to emit alignment");
12428 getStreamer().emitCodeAlignment(
Align(4), &getSTI(), 0);
12430 getStreamer().emitValueToAlignment(
Align(4), 0, 1, 0);
12438bool ARMAsmParser::parseDirectiveThumbSet(
SMLoc L) {
12443 "expected identifier after '.thumb_set'") ||
12453 getTargetStreamer().emitThumbSet(
Sym,
Value);
12460bool ARMAsmParser::parseDirectiveSEHAllocStack(
SMLoc L,
bool Wide) {
12462 if (parseImmExpr(
Size))
12464 getTargetStreamer().emitARMWinCFIAllocStack(
Size, Wide);
12471bool ARMAsmParser::parseDirectiveSEHSaveRegs(
SMLoc L,
bool Wide) {
12474 if (parseRegisterList(
Operands) || parseEOL())
12476 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12477 if (!
Op.isRegList())
12478 return Error(L,
".seh_save_regs{_w} expects GPR registers");
12481 for (
size_t i = 0; i < RegList.
size(); ++i) {
12482 unsigned Reg =
MRI->getEncodingValue(RegList[i]);
12486 return Error(L,
".seh_save_regs{_w} can't include SP");
12487 assert(Reg < 16U &&
"Register out of range");
12488 unsigned Bit = (1u <<
Reg);
12491 if (!Wide && (Mask & 0x1f00) != 0)
12493 ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12494 getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12500bool ARMAsmParser::parseDirectiveSEHSaveSP(
SMLoc L) {
12501 int Reg = tryParseRegister();
12502 if (Reg == -1 || !
MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12503 return Error(L,
"expected GPR");
12504 unsigned Index =
MRI->getEncodingValue(Reg);
12506 return Error(L,
"invalid register for .seh_save_sp");
12507 getTargetStreamer().emitARMWinCFISaveSP(
Index);
12513bool ARMAsmParser::parseDirectiveSEHSaveFRegs(
SMLoc L) {
12516 if (parseRegisterList(
Operands) || parseEOL())
12518 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12519 if (!
Op.isDPRRegList())
12520 return Error(L,
".seh_save_fregs expects DPR registers");
12523 for (
size_t i = 0; i < RegList.
size(); ++i) {
12524 unsigned Reg =
MRI->getEncodingValue(RegList[i]);
12525 assert(Reg < 32U &&
"Register out of range");
12526 unsigned Bit = (1u <<
Reg);
12531 return Error(L,
".seh_save_fregs missing registers");
12533 unsigned First = 0;
12534 while ((Mask & 1) == 0) {
12538 if (((Mask + 1) & Mask) != 0)
12540 ".seh_save_fregs must take a contiguous range of registers");
12542 while ((Mask & 2) != 0) {
12546 if (First < 16 && Last >= 16)
12547 return Error(L,
".seh_save_fregs must be all d0-d15 or d16-d31");
12548 getTargetStreamer().emitARMWinCFISaveFRegs(
First,
Last);
12554bool ARMAsmParser::parseDirectiveSEHSaveLR(
SMLoc L) {
12556 if (parseImmExpr(
Offset))
12558 getTargetStreamer().emitARMWinCFISaveLR(
Offset);
12565bool ARMAsmParser::parseDirectiveSEHPrologEnd(
SMLoc L,
bool Fragment) {
12566 getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12573bool ARMAsmParser::parseDirectiveSEHNop(
SMLoc L,
bool Wide) {
12574 getTargetStreamer().emitARMWinCFINop(Wide);
12581bool ARMAsmParser::parseDirectiveSEHEpilogStart(
SMLoc L,
bool Condition) {
12588 return Error(S,
".seh_startepilogue_cond missing condition");
12591 return Error(S,
"invalid condition");
12595 getTargetStreamer().emitARMWinCFIEpilogStart(
CC);
12601bool ARMAsmParser::parseDirectiveSEHEpilogEnd(
SMLoc L) {
12602 getTargetStreamer().emitARMWinCFIEpilogEnd();
12608bool ARMAsmParser::parseDirectiveSEHCustom(
SMLoc L) {
12609 unsigned Opcode = 0;
12612 if (parseImmExpr(Byte))
12614 if (Byte > 0xff || Byte < 0)
12615 return Error(L,
"Invalid byte value in .seh_custom");
12616 if (Opcode > 0x00ffffff)
12617 return Error(L,
"Too many bytes in .seh_custom");
12620 Opcode = (Opcode << 8) | Byte;
12622 getTargetStreamer().emitARMWinCFICustom(Opcode);
12634#define GET_REGISTER_MATCHER
12635#define GET_SUBTARGET_FEATURE_NAME
12636#define GET_MATCHER_IMPLEMENTATION
12637#define GET_MNEMONIC_SPELL_CHECKER
12638#include "ARMGenAsmMatcher.inc"
12644ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12645 switch (MatchError) {
12648 return hasV8Ops() ?
"operand must be a register in range [r0, r14]"
12649 :
"operand must be a register in range [r0, r12] or r14";
12652 return hasD32() ?
"operand must be a register in range [d0, d31]"
12653 :
"operand must be a register in range [d0, d15]";
12654 case Match_DPR_RegList:
12655 return hasD32() ?
"operand must be a list of registers in range [d0, d31]"
12656 :
"operand must be a list of registers in range [d0, d15]";
12660 return getMatchKindDiag(MatchError);
12683 std::multimap<unsigned, unsigned> OperandMissesSeen;
12685 bool ReportedTooFewOperands =
false;
12692 switch (
I.getKind()) {
12695 ((ARMOperand &)*
Operands[
I.getOperandIndex()]).getStartLoc();
12696 const char *OperandDiag =
12697 getCustomOperandDiag((ARMMatchResultTy)
I.getOperandError());
12704 unsigned DupCheckMatchClass = OperandDiag ?
I.getOperandClass() : ~0
U;
12705 auto PrevReports = OperandMissesSeen.equal_range(
I.getOperandIndex());
12706 if (std::any_of(PrevReports.first, PrevReports.second,
12707 [DupCheckMatchClass](
12708 const std::pair<unsigned, unsigned> Pair) {
12709 if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12710 return Pair.second == DupCheckMatchClass;
12712 return isSubclass((MatchClassKind)DupCheckMatchClass,
12713 (MatchClassKind)Pair.second);
12716 OperandMissesSeen.insert(
12717 std::make_pair(
I.getOperandIndex(), DupCheckMatchClass));
12719 NearMissMessage Message;
12720 Message.Loc = OperandLoc;
12722 Message.Message = OperandDiag;
12723 }
else if (
I.getOperandClass() == InvalidMatchClass) {
12724 Message.Message =
"too many operands for instruction";
12726 Message.Message =
"invalid operand for instruction";
12728 dbgs() <<
"Missing diagnostic string for operand class "
12729 << getMatchClassName((MatchClassKind)
I.getOperandClass())
12730 <<
I.getOperandClass() <<
", error " <<
I.getOperandError()
12731 <<
", opcode " << MII.getName(
I.getOpcode()) <<
"\n");
12739 if (FeatureMissesSeen.
count(MissingFeatures))
12741 FeatureMissesSeen.
insert(MissingFeatures);
12745 if (MissingFeatures.
test(Feature_IsARMBit) && !hasARM())
12749 if (
isThumb() && MissingFeatures.
test(Feature_IsARMBit) &&
12750 MissingFeatures.
count() > 1)
12752 if (!
isThumb() && MissingFeatures.
test(Feature_IsThumbBit) &&
12753 MissingFeatures.
count() > 1)
12755 if (!
isThumb() && MissingFeatures.
test(Feature_IsThumb2Bit) &&
12757 Feature_IsThumbBit})).
any())
12759 if (isMClass() && MissingFeatures.
test(Feature_HasNEONBit))
12762 NearMissMessage Message;
12763 Message.Loc = IDLoc;
12766 OS <<
"instruction requires:";
12767 for (
unsigned i = 0, e = MissingFeatures.
size(); i != e; ++i)
12768 if (MissingFeatures.
test(i))
12776 NearMissMessage Message;
12777 Message.Loc = IDLoc;
12778 switch (
I.getPredicateError()) {
12779 case Match_RequiresNotITBlock:
12780 Message.Message =
"flag setting instruction only valid outside IT block";
12782 case Match_RequiresITBlock:
12783 Message.Message =
"instruction only valid inside IT block";
12785 case Match_RequiresV6:
12786 Message.Message =
"instruction variant requires ARMv6 or later";
12788 case Match_RequiresThumb2:
12789 Message.Message =
"instruction variant requires Thumb2";
12791 case Match_RequiresV8:
12792 Message.Message =
"instruction variant requires ARMv8 or later";
12794 case Match_RequiresFlagSetting:
12795 Message.Message =
"no flag-preserving variant of this instruction available";
12797 case Match_InvalidTiedOperand: {
12798 ARMOperand &
Op =
static_cast<ARMOperand &
>(*
Operands[0]);
12799 if (
Op.isToken() &&
Op.getToken() ==
"mul") {
12800 Message.Message =
"destination register must match a source register";
12801 Message.Loc =
Operands[MnemonicOpsEndInd]->getStartLoc();
12807 case Match_InvalidOperand:
12808 Message.Message =
"invalid operand for instruction";
12818 if (!ReportedTooFewOperands) {
12819 SMLoc EndLoc = ((ARMOperand &)*
Operands.back()).getEndLoc();
12821 EndLoc,
StringRef(
"too few operands for instruction")});
12822 ReportedTooFewOperands =
true;
12837 FilterNearMisses(NearMisses, Messages, IDLoc,
Operands);
12839 if (Messages.
size() == 0) {
12842 Error(IDLoc,
"invalid instruction");
12843 }
else if (Messages.
size() == 1) {
12845 Error(Messages[0].Loc, Messages[0].Message);
12849 Error(IDLoc,
"invalid instruction, any one of the following would fix this:");
12850 for (
auto &M : Messages) {
12860 static const struct {
12865 {
ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12867 {Feature_HasV8Bit},
12868 {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12870 {Feature_HasV8Bit},
12871 {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12873 {Feature_HasV8Bit},
12874 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12876 {Feature_HasV8_1MMainlineBit},
12877 {ARM::HasMVEFloatOps}},
12879 {Feature_HasV8Bit},
12880 {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12882 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12883 {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12885 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12888 {Feature_HasV8Bit},
12889 {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12890 {
ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12892 {
ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12894 {Feature_HasV8_2aBit},
12895 {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12896 {
ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12897 {
ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12898 {
ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12906 bool EnableFeature = !
Name.consume_front_insensitive(
"no");
12909 return Error(ExtLoc,
"unknown architectural extension: " +
Name);
12916 return Error(ExtLoc,
"unsupported architectural extension: " +
Name);
12919 return Error(ExtLoc,
"architectural extension '" +
Name +
12921 "allowed for the current base architecture");
12924 if (EnableFeature) {
12930 setAvailableFeatures(Features);
12938bool ARMAsmParser::parseDirectiveArchExtension(
SMLoc L) {
12943 return Error(getLexer().getLoc(),
"expected architecture extension name");
12952 if (
Name ==
"nocrypto") {
12953 enableArchExtFeature(
"nosha2", ExtLoc);
12954 enableArchExtFeature(
"noaes", ExtLoc);
12957 if (enableArchExtFeature(
Name, ExtLoc))
12960 return Error(ExtLoc,
"unknown architectural extension: " +
Name);
12967 ARMOperand &
Op =
static_cast<ARMOperand &
>(AsmOp);
12976 if (
CE->getValue() == 0)
12977 return Match_Success;
12982 if (
CE->getValue() == 8)
12983 return Match_Success;
12988 if (
CE->getValue() == 16)
12989 return Match_Success;
12993 const MCExpr *SOExpr =
Op.getImm();
12995 if (!SOExpr->evaluateAsAbsolute(
Value))
12996 return Match_Success;
12997 assert((
Value >= std::numeric_limits<int32_t>::min() &&
12998 Value <= std::numeric_limits<uint32_t>::max()) &&
12999 "expression value must be representable in 32 bits");
13003 if (hasV8Ops() &&
Op.isReg() &&
Op.getReg() == ARM::SP)
13004 return Match_Success;
13011 case MCK_VecListDPair:
13012 if (
Op.isQReg() && !hasMVE()) {
13013 auto DPair = getDRegFromQReg(
Op.getReg());
13014 DPair =
MRI->getMatchingSuperReg(
13015 DPair, ARM::dsub_0, &ARMMCRegisterClasses[ARM::DPairRegClassID]);
13016 Op.setVecListDPair(DPair);
13017 return Match_Success;
13019 return Match_InvalidOperand;
13023 case MCK_VecListOneD:
13024 if (
Op.isDReg() && !hasMVE()) {
13025 Op.setVecListOneD(
Op.getReg());
13026 return Match_Success;
13028 return Match_InvalidOperand;
13030 return Match_InvalidOperand;
13033bool ARMAsmParser::isMnemonicVPTPredicable(
StringRef Mnemonic,
13038 if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
13039 (Mnemonic.
starts_with(
"vldrh") && Mnemonic !=
"vldrhi") ||
13041 !(ExtraToken ==
".f16" || ExtraToken ==
".32" || ExtraToken ==
".16" ||
13042 ExtraToken ==
".8")) ||
13043 (Mnemonic.
starts_with(
"vrint") && Mnemonic !=
"vrintr") ||
13044 (Mnemonic.
starts_with(
"vstrh") && Mnemonic !=
"vstrhi"))
13047 const char *predicable_prefixes[] = {
13048 "vabav",
"vabd",
"vabs",
"vadc",
"vadd",
13049 "vaddlv",
"vaddv",
"vand",
"vbic",
"vbrsr",
13050 "vcadd",
"vcls",
"vclz",
"vcmla",
"vcmp",
13051 "vcmul",
"vctp",
"vcvt",
"vddup",
"vdup",
13052 "vdwdup",
"veor",
"vfma",
"vfmas",
"vfms",
13053 "vhadd",
"vhcadd",
"vhsub",
"vidup",
"viwdup",
13054 "vldrb",
"vldrd",
"vldrw",
"vmax",
"vmaxa",
13055 "vmaxav",
"vmaxnm",
"vmaxnma",
"vmaxnmav",
"vmaxnmv",
13056 "vmaxv",
"vmin",
"vminav",
"vminnm",
"vminnmav",
13057 "vminnmv",
"vminv",
"vmla",
"vmladav",
"vmlaldav",
13058 "vmlalv",
"vmlas",
"vmlav",
"vmlsdav",
"vmlsldav",
13059 "vmovlb",
"vmovlt",
"vmovnb",
"vmovnt",
"vmul",
13060 "vmvn",
"vneg",
"vorn",
"vorr",
"vpnot",
13061 "vpsel",
"vqabs",
"vqadd",
"vqdmladh",
"vqdmlah",
13062 "vqdmlash",
"vqdmlsdh",
"vqdmulh",
"vqdmull",
"vqmovn",
13063 "vqmovun",
"vqneg",
"vqrdmladh",
"vqrdmlah",
"vqrdmlash",
13064 "vqrdmlsdh",
"vqrdmulh",
"vqrshl",
"vqrshrn",
"vqrshrun",
13065 "vqshl",
"vqshrn",
"vqshrun",
"vqsub",
"vrev16",
13066 "vrev32",
"vrev64",
"vrhadd",
"vrmlaldavh",
"vrmlalvh",
13067 "vrmlsldavh",
"vrmulh",
"vrshl",
"vrshr",
"vrshrn",
13068 "vsbc",
"vshl",
"vshlc",
"vshll",
"vshr",
13069 "vshrn",
"vsli",
"vsri",
"vstrb",
"vstrd",
13072 return std::any_of(
13073 std::begin(predicable_prefixes), std::end(predicable_prefixes),
13074 [&Mnemonic](
const char *prefix) {
return Mnemonic.
starts_with(prefix); });
13077std::unique_ptr<ARMOperand> ARMAsmParser::defaultCondCodeOp() {
13081std::unique_ptr<ARMOperand> ARMAsmParser::defaultCCOutOp() {
13082 return ARMOperand::CreateCCOut(0,
SMLoc());
13085std::unique_ptr<ARMOperand> ARMAsmParser::defaultVPTPredOp() {
unsigned const MachineRegisterInfo * MRI
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static unsigned getNextRegister(unsigned Reg)
static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing)
static bool instIsBreakpoint(const MCInst &Inst)
unsigned findCCOutInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo, unsigned Reg, unsigned HiReg, bool &containsReg)
static bool isDataTypeToken(StringRef Tok)
}
static MCRegister MatchRegisterName(StringRef Name)
static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing)
unsigned getRegListInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
static const char * getSubtargetFeatureName(uint64_t Val)
static bool isVectorPredicable(const MCInstrDesc &MCID)
static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp)
MatchCoprocessorOperandName - Try to parse an coprocessor related instruction with a symbolic operand...
static void applyMnemonicAliases(StringRef &Mnemonic, const FeatureBitset &Features, unsigned VariantID)
void removeCCOut(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT)
static bool insertNoDuplicates(SmallVectorImpl< std::pair< unsigned, unsigned > > &Regs, unsigned Enc, unsigned Reg)
static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID)
static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp)
bool operandsContainWide(OperandVector &Operands, unsigned MnemonicOpsEndInd)
static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg)
void removeCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser()
Force static initialization.
static unsigned getMnemonicOpsEndInd(const OperandVector &Operands)
static bool isARMMCExpr(MCParsedAsmOperand &MCOp)
unsigned findCondCodeInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
void removeVPTCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool isThumb(const MCSubtargetInfo &STI)
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Register getFPReg(const CSKYSubtarget &STI)
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
#define LLVM_EXTERNAL_VISIBILITY
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > AddBuildAttributes("hexagon-add-build-attributes")
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Pre allocate WWM Registers
static cl::opt< std::set< SPIRV::Extension::Extension >, false, SPIRVExtensionsParser > Extensions("spirv-ext", cl::desc("Specify list of enabled SPIR-V extensions"))
This file implements the SmallBitVector class.
This file defines the SmallSet class.
This file defines the SmallVector class.
StringSet - A set-like wrapper for the StringMap.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=ARM::NoRegAltName)
VariantKind getKind() const
getOpcode - Get the kind of this expression.
static const ARMMCExpr * create(VariantKind Kind, const MCExpr *Expr, MCContext &Ctx)
Target independent representation for an assembler token.
int64_t getIntVal() const
bool isNot(TokenKind K) const
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
StringRef getStringContents() const
Get the contents of a string token (without quotes).
bool is(TokenKind K) const
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
This class represents an Operation in the Expression.
Implements a dense probed hash-table based set.
Base class for user error types.
Lightweight error class with error context and mandatory checking.
Container class for subtarget features.
constexpr bool test(unsigned I) const
constexpr size_t size() const
Generic assembler lexer interface, for use by target specific assembly lexers.
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
MCStreamer & getStreamer()
MCAsmParser & getParser()
Generic assembler parser interface, for use by target specific assembly parsers.
bool parseToken(AsmToken::TokenKind T, const Twine &Msg="unexpected token")
virtual bool parseEscapedString(std::string &Data)=0
Parse the current token as a string which may include escaped characters and return the string conten...
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual void Note(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a note at the location L, with the message Msg.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
bool parseOptionalToken(AsmToken::TokenKind T)
Attempt to parse and consume token, returning true on success.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual bool Warning(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a warning at the location L, with the message Msg.
bool Error(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)
Return an error at the location L, with the message Msg.
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Context object for machine code objects.
const MCRegisterInfo * getRegisterInfo() const
Base class for the full range of assembler expressions which are needed for parsing.
@ Constant
Constant expressions.
Instances of this class represent a single low-level machine instruction.
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCRegisterInfo *RegInfo=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
unsigned getNumOperands() const
unsigned getOpcode() const
iterator insert(iterator I, const MCOperand &Op)
void addOperand(const MCOperand Op)
void setOpcode(unsigned Op)
const MCOperand & getOperand(unsigned i) const
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool isIndirectBranch() const
Return true if this is an indirect branch, such as a branch through a register.
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
bool hasDefOfPhysReg(const MCInst &MI, unsigned Reg, const MCRegisterInfo &RI) const
Return true if this instruction defines the specified physical register, either explicitly or implici...
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned short NumOperands
bool isBranch() const
Returns true if this is a conditional, unconditional, or indirect branch.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
bool isCall() const
Return true if the instruction is a call.
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
bool isReturn() const
Return true if the instruction is a return.
Interface to description of machine instruction set.
This holds information about one operand of a machine instruction, indicating the register class for ...
Instances of this class represent operands of the MCInst class.
static MCOperand createReg(unsigned Reg)
static MCOperand createExpr(const MCExpr *Val)
static MCOperand createImm(int64_t Val)
unsigned getReg() const
Returns the register number.
const MCExpr * getExpr() const
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual SMLoc getStartLoc() const =0
getStartLoc - Get the location of the first token of this operand.
virtual bool isReg() const =0
isReg - Is this a register operand?
virtual bool isMem() const =0
isMem - Is this a memory operand?
virtual MCRegister getReg() const =0
virtual void print(raw_ostream &OS) const =0
print - Print a debug representation of the operand to the given stream.
virtual bool isToken() const =0
isToken - Is this a token operand?
virtual bool isImm() const =0
isImm - Is this an immediate operand?
virtual SMLoc getEndLoc() const =0
getEndLoc - Get the location of the last token of this operand.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
unsigned getNumRegs() const
getNumRegs - Return the number of registers in this class.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Streaming machine code generation interface.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const FeatureBitset & getFeatureBits() const
FeatureBitset ApplyFeatureFlag(StringRef FS)
Apply a feature flag and return the re-computed feature bits, including all feature bits implied by t...
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
Represent a reference to a symbol from inside an expression.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual void onLabelParsed(MCSymbol *Symbol)
MCSubtargetInfo & copySTI()
Create a copy of STI and return a non-const reference to it.
@ FIRST_TARGET_MATCH_RESULT_TY
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual unsigned checkEarlyTargetMatchPredicate(MCInst &Inst, const OperandVector &Operands)
Validate the instruction match against any complex target predicates before rendering any operands to...
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual void flushPendingInstructions(MCStreamer &Out)
Ensure that all previously parsed instructions have been emitted to the output streamer,...
void setAvailableFeatures(const FeatureBitset &Value)
virtual MCSymbolRefExpr::VariantKind getVariantKindForName(StringRef Name) const
const MCSubtargetInfo & getSTI() const
virtual void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc)
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
ParseInstruction - Parse one assembly instruction.
virtual unsigned checkTargetMatchPredicate(MCInst &Inst)
checkTargetMatchPredicate - Validate the instruction match against any complex target predicates not ...
virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
MatchAndEmitInstruction - Recognize a series of operands of a parsed instruction as an actual MCInst ...
Target specific streamer interface.
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
Represents a location in source code.
static SMLoc getFromPointer(const char *Ptr)
constexpr const char * getPointer() const
Represents a range in source code.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
typename SuperClass::const_iterator const_iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
iterator find(StringRef Key)
size_type count(StringRef Key) const
count - Return 1 if the element is in the map, 0 otherwise.
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
std::string lower() const
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
static constexpr size_t npos
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
StringSet - A wrapper for StringMap that provides set-like functionality.
std::pair< typename Base::iterator, bool > insert(StringRef key)
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
Triple - Helper class for working with autoconf configuration names.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
LLVM Value Representation.
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an SmallVector or SmallString.
This class provides various memory handling functions that manipulate MemoryBlock instances.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const TagNameMap & getARMAttributeTags()
static CondCodes getOppositeCondition(CondCodes CC)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
unsigned encodeNEONi16splat(unsigned Value)
float getFPImmFloat(unsigned Imm)
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO, unsigned IdxMode=0)
unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset)
getAM5Opc - This function encodes the addrmode5 opc field.
ShiftOpc getSORegShOp(unsigned Op)
bool isNEONi16splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getAM5FP16Opc(AddrOpc Opc, unsigned char Offset)
getAM5FP16Opc - This function encodes the addrmode5fp16 opc field.
unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset, unsigned IdxMode=0)
getAM3Opc - This function encodes the addrmode3 opc field.
bool isNEONi32splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
unsigned encodeNEONi32splat(unsigned Value)
Encode NEON 32 bits Splat immediate for instructions like VBIC/VORR.
const StringRef getShiftOpcStr(ShiftOpc Op)
static const char * IFlagsToString(unsigned val)
bool getFPUFeatures(FPUKind FPUKind, std::vector< StringRef > &Features)
StringRef getArchName(ArchKind AK)
uint64_t parseArchExt(StringRef ArchExt)
ArchKind parseArch(StringRef Arch)
bool isVpred(OperandType op)
FPUKind parseFPU(StringRef FPU)
bool isCDECoproc(size_t Coproc, const MCSubtargetInfo &STI)
@ D16
Only 16 D registers.
constexpr bool any(E Val)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
std::optional< unsigned > attrTypeFromString(StringRef tag, TagNameMap tagNameMap)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool parseAssignmentExpression(StringRef Name, bool allow_redef, MCAsmParser &Parser, MCSymbol *&Symbol, const MCExpr *&Value)
Parse a value expression and return whether it can be assigned to a symbol with the given name.
@ CE
Windows NT (Windows on ARM)
Reg
All possible values of the reg field in the ModR/M byte.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
static const char * ARMVPTPredToString(ARMVCC::VPTCodes CC)
int popcount(T Value) noexcept
Count the number of set bits in a value.
Target & getTheThumbBETarget()
static unsigned ARMCondCodeFromString(StringRef CC)
const ARMInstrTable ARMDescs
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
static bool isARMLowRegister(unsigned Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
auto reverse(ContainerTy &&C)
@ Never
Never set the bit.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
bool IsCPSRDead< MCInst >(const MCInst *Instr)
static bool isValidCoprocessorNumber(unsigned Num, const FeatureBitset &featureBits)
isValidCoprocessorNumber - decide whether an explicit coprocessor number is legal in generic instruct...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ MCAF_Code16
.code16 (X86) / .code 16 (ARM)
@ MCAF_Code32
.code32 (X86) / .code 32 (ARM)
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static unsigned ARMVectorCondCodeFromString(StringRef CC)
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
Target & getTheARMLETarget()
Target & getTheARMBETarget()
Target & getTheThumbLETarget()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
const FeatureBitset Features
MCOperandInfo OperandInfo[3026]
MCPhysReg ImplicitOps[130]
This struct is a compact representation of a valid (non-zero power of two) alignment.
Holds functions to get, set or test bitfields.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...