71#define DEBUG_TYPE "asm-parser"
78enum class ImplicitItModeTy {
Always,
Never, ARMOnly, ThumbOnly };
81 "arm-implicit-it",
cl::init(ImplicitItModeTy::ARMOnly),
82 cl::desc(
"Allow conditional instructions outdside of an IT block"),
84 "Accept in both ISAs, emit implicit ITs in Thumb"),
86 "Warn in ARM, reject in Thumb"),
88 "Accept in ARM, reject in Thumb"),
89 clEnumValN(ImplicitItModeTy::ThumbOnly,
"thumb",
90 "Warn in ARM, emit implicit ITs in Thumb")));
95enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
97static inline unsigned extractITMaskBit(
unsigned Mask,
unsigned Position) {
104 return (Mask >> (5 - Position) & 1);
113 Locs PersonalityLocs;
114 Locs PersonalityIndexLocs;
115 Locs HandlerDataLocs;
121 bool hasFnStart()
const {
return !FnStartLocs.empty(); }
122 bool cantUnwind()
const {
return !CantUnwindLocs.empty(); }
123 bool hasHandlerData()
const {
return !HandlerDataLocs.empty(); }
125 bool hasPersonality()
const {
126 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
129 void recordFnStart(
SMLoc L) { FnStartLocs.push_back(L); }
130 void recordCantUnwind(
SMLoc L) { CantUnwindLocs.push_back(L); }
131 void recordPersonality(
SMLoc L) { PersonalityLocs.push_back(L); }
132 void recordHandlerData(
SMLoc L) { HandlerDataLocs.push_back(L); }
133 void recordPersonalityIndex(
SMLoc L) { PersonalityIndexLocs.push_back(L); }
135 void saveFPReg(
int Reg) { FPReg = Reg; }
136 int getFPReg()
const {
return FPReg; }
138 void emitFnStartLocNotes()
const {
139 for (
const SMLoc &Loc : FnStartLocs)
140 Parser.
Note(Loc,
".fnstart was specified here");
143 void emitCantUnwindLocNotes()
const {
144 for (
const SMLoc &Loc : CantUnwindLocs)
145 Parser.
Note(Loc,
".cantunwind was specified here");
148 void emitHandlerDataLocNotes()
const {
149 for (
const SMLoc &Loc : HandlerDataLocs)
150 Parser.
Note(Loc,
".handlerdata was specified here");
153 void emitPersonalityLocNotes()
const {
155 PE = PersonalityLocs.end(),
156 PII = PersonalityIndexLocs.begin(),
157 PIE = PersonalityIndexLocs.end();
158 PI != PE || PII != PIE;) {
159 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
160 Parser.
Note(*PI++,
".personality was specified here");
161 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
162 Parser.
Note(*PII++,
".personalityindex was specified here");
165 "at the same location");
170 FnStartLocs = Locs();
171 CantUnwindLocs = Locs();
172 PersonalityLocs = Locs();
173 HandlerDataLocs = Locs();
174 PersonalityIndexLocs = Locs();
180class ARMMnemonicSets {
191 return CDE.
count(Mnemonic);
196 bool isVPTPredicableCDEInstr(
StringRef Mnemonic) {
199 return CDEWithVPTSuffix.
count(Mnemonic);
204 bool isITPredicableCDEInstr(
StringRef Mnemonic) {
214 bool isCDEDualRegInstr(
StringRef Mnemonic) {
217 return Mnemonic ==
"cx1d" || Mnemonic ==
"cx1da" ||
218 Mnemonic ==
"cx2d" || Mnemonic ==
"cx2da" ||
219 Mnemonic ==
"cx3d" || Mnemonic ==
"cx3da";
224 for (
StringRef Mnemonic: {
"cx1",
"cx1a",
"cx1d",
"cx1da",
225 "cx2",
"cx2a",
"cx2d",
"cx2da",
226 "cx3",
"cx3a",
"cx3d",
"cx3da", })
227 CDE.insert(Mnemonic);
229 {
"vcx1",
"vcx1a",
"vcx2",
"vcx2a",
"vcx3",
"vcx3a"}) {
230 CDE.insert(Mnemonic);
231 CDEWithVPTSuffix.insert(Mnemonic);
232 CDEWithVPTSuffix.insert(std::string(Mnemonic) +
"t");
233 CDEWithVPTSuffix.insert(std::string(Mnemonic) +
"e");
244 "do not have a target streamer");
252 bool NextSymbolIsThumb;
254 bool useImplicitITThumb()
const {
255 return ImplicitItMode == ImplicitItModeTy::Always ||
256 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
259 bool useImplicitITARM()
const {
260 return ImplicitItMode == ImplicitItModeTy::Always ||
261 ImplicitItMode == ImplicitItModeTy::ARMOnly;
276 unsigned CurPosition;
292 if (!inImplicitITBlock()) {
306 for (
const MCInst &Inst : PendingConditionalInsts) {
309 PendingConditionalInsts.clear();
313 ITState.CurPosition = ~0U;
316 bool inITBlock() {
return ITState.CurPosition != ~0U; }
317 bool inExplicitITBlock() {
return inITBlock() && ITState.IsExplicit; }
318 bool inImplicitITBlock() {
return inITBlock() && !ITState.IsExplicit; }
320 bool lastInITBlock() {
324 void forwardITPosition() {
325 if (!inITBlock())
return;
330 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
331 ITState.CurPosition = ~0U;
335 void rewindImplicitITPosition() {
336 assert(inImplicitITBlock());
337 assert(ITState.CurPosition > 1);
338 ITState.CurPosition--;
340 unsigned NewMask = 0;
341 NewMask |= ITState.Mask & (0xC << TZ);
342 NewMask |= 0x2 << TZ;
343 ITState.Mask = NewMask;
348 void discardImplicitITBlock() {
349 assert(inImplicitITBlock());
350 assert(ITState.CurPosition == 1);
351 ITState.CurPosition = ~0U;
356 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
362 void invertCurrentITCondition() {
363 if (ITState.CurPosition == 1) {
366 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
371 bool isITBlockFull() {
372 return inITBlock() && (ITState.Mask & 1);
378 assert(inImplicitITBlock());
383 unsigned NewMask = 0;
385 NewMask |= ITState.Mask & (0xE << TZ);
387 NewMask |= (
Cond != ITState.Cond) << TZ;
389 NewMask |= 1 << (TZ - 1);
390 ITState.Mask = NewMask;
394 void startImplicitITBlock() {
398 ITState.CurPosition = 1;
399 ITState.IsExplicit =
false;
410 ITState.CurPosition = 0;
411 ITState.IsExplicit =
true;
416 unsigned CurPosition;
418 bool inVPTBlock() {
return VPTState.CurPosition != ~0U; }
419 void forwardVPTPosition() {
420 if (!inVPTBlock())
return;
422 if (++VPTState.CurPosition == 5 - TZ)
423 VPTState.CurPosition = ~0U;
439 unsigned MnemonicOpsEndInd,
unsigned ListIndex,
440 bool IsARPop =
false);
442 unsigned MnemonicOpsEndInd,
unsigned ListIndex);
447 std::optional<ARM_AM::ShiftOpc> tryParseShiftToken();
448 bool parseRegisterList(
OperandVector &,
bool EnforceOrder =
true,
449 bool AllowRAAC =
false,
450 bool AllowOutOfBoundReg =
false);
453 bool parseImmExpr(int64_t &Out);
456 unsigned &ShiftAmount);
457 bool parseLiteralValues(
unsigned Size,
SMLoc L);
458 bool parseDirectiveThumb(
SMLoc L);
459 bool parseDirectiveARM(
SMLoc L);
460 bool parseDirectiveThumbFunc(
SMLoc L);
461 bool parseDirectiveCode(
SMLoc L);
462 bool parseDirectiveSyntax(
SMLoc L);
464 bool parseDirectiveUnreq(
SMLoc L);
465 bool parseDirectiveArch(
SMLoc L);
466 bool parseDirectiveEabiAttr(
SMLoc L);
467 bool parseDirectiveCPU(
SMLoc L);
468 bool parseDirectiveFPU(
SMLoc L);
469 bool parseDirectiveFnStart(
SMLoc L);
470 bool parseDirectiveFnEnd(
SMLoc L);
471 bool parseDirectiveCantUnwind(
SMLoc L);
472 bool parseDirectivePersonality(
SMLoc L);
473 bool parseDirectiveHandlerData(
SMLoc L);
474 bool parseDirectiveSetFP(
SMLoc L);
475 bool parseDirectivePad(
SMLoc L);
476 bool parseDirectiveRegSave(
SMLoc L,
bool IsVector);
477 bool parseDirectiveInst(
SMLoc L,
char Suffix =
'\0');
478 bool parseDirectiveLtorg(
SMLoc L);
479 bool parseDirectiveEven(
SMLoc L);
480 bool parseDirectivePersonalityIndex(
SMLoc L);
481 bool parseDirectiveUnwindRaw(
SMLoc L);
482 bool parseDirectiveTLSDescSeq(
SMLoc L);
483 bool parseDirectiveMovSP(
SMLoc L);
484 bool parseDirectiveObjectArch(
SMLoc L);
485 bool parseDirectiveArchExtension(
SMLoc L);
486 bool parseDirectiveAlign(
SMLoc L);
487 bool parseDirectiveThumbSet(
SMLoc L);
489 bool parseDirectiveSEHAllocStack(
SMLoc L,
bool Wide);
490 bool parseDirectiveSEHSaveRegs(
SMLoc L,
bool Wide);
491 bool parseDirectiveSEHSaveSP(
SMLoc L);
492 bool parseDirectiveSEHSaveFRegs(
SMLoc L);
493 bool parseDirectiveSEHSaveLR(
SMLoc L);
494 bool parseDirectiveSEHPrologEnd(
SMLoc L,
bool Fragment);
495 bool parseDirectiveSEHNop(
SMLoc L,
bool Wide);
496 bool parseDirectiveSEHEpilogStart(
SMLoc L,
bool Condition);
497 bool parseDirectiveSEHEpilogEnd(
SMLoc L);
498 bool parseDirectiveSEHCustom(
SMLoc L);
500 std::unique_ptr<ARMOperand> defaultCondCodeOp();
501 std::unique_ptr<ARMOperand> defaultCCOutOp();
502 std::unique_ptr<ARMOperand> defaultVPTPredOp();
508 bool &CarrySetting,
unsigned &ProcessorIMod,
511 StringRef FullInst,
bool &CanAcceptCarrySet,
512 bool &CanAcceptPredicationCode,
513 bool &CanAcceptVPTPredicationCode);
516 void tryConvertingToTwoOperandForm(
StringRef Mnemonic,
519 unsigned MnemonicOpsEndInd);
522 unsigned MnemonicOpsEndInd);
529 bool isThumbOne()
const {
533 bool isThumbTwo()
const {
537 bool hasThumb()
const {
541 bool hasThumb2()
const {
545 bool hasV6Ops()
const {
549 bool hasV6T2Ops()
const {
553 bool hasV6MOps()
const {
557 bool hasV7Ops()
const {
561 bool hasV8Ops()
const {
565 bool hasV8MBaseline()
const {
569 bool hasV8MMainline()
const {
572 bool hasV8_1MMainline()
const {
575 bool hasMVEFloat()
const {
578 bool hasCDE()
const {
581 bool has8MSecExt()
const {
585 bool hasARM()
const {
589 bool hasDSP()
const {
593 bool hasD32()
const {
597 bool hasV8_1aOps()
const {
601 bool hasRAS()
const {
607 auto FB = ComputeAvailableFeatures(STI.
ToggleFeature(ARM::ModeThumb));
611 void FixModeAfterArchChange(
bool WasThumb,
SMLoc Loc);
613 bool isMClass()
const {
620#define GET_ASSEMBLER_HEADER
621#include "ARMGenAsmMatcher.inc"
661 unsigned MnemonicOpsEndInd);
664 bool shouldOmitVectorPredicateOperand(
StringRef Mnemonic,
666 unsigned MnemonicOpsEndInd);
667 bool isITBlockTerminator(
MCInst &Inst)
const;
670 unsigned MnemonicOpsEndInd);
672 bool ARMMode,
bool Writeback,
673 unsigned MnemonicOpsEndInd);
676 enum ARMMatchResultTy {
678 Match_RequiresNotITBlock,
680 Match_RequiresThumb2,
682 Match_RequiresFlagSetting,
683#define GET_OPERAND_DIAGNOSTIC_TYPES
684#include "ARMGenAsmMatcher.inc"
701 getTargetStreamer().emitTargetAttributes(STI);
704 ITState.CurPosition = ~0
U;
706 VPTState.CurPosition = ~0
U;
708 NextSymbolIsThumb =
false;
714 SMLoc &EndLoc)
override;
720 unsigned Kind)
override;
729 bool MatchingInlineAsm)
override;
732 bool MatchingInlineAsm,
bool &EmitInITBlock,
735 struct NearMissMessage {
740 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
755 const MCInstrDesc &getInstrDesc(
unsigned int Opcode)
const {
756 return MII.get(Opcode);
762 unsigned getDRegFromQReg(
unsigned QReg)
const {
763 return MRI->getSubReg(QReg, ARM::dsub_0);
782 k_InstSyncBarrierOpt,
783 k_TraceSyncBarrierOpt,
792 k_RegisterListWithAPSR,
795 k_FPSRegisterListWithVPR,
796 k_FPDRegisterListWithVPR,
798 k_VectorListAllLanes,
805 k_ConstantPoolImmediate,
806 k_BitfieldDescriptor,
810 SMLoc StartLoc, EndLoc, AlignmentLoc;
813 ARMAsmParser *Parser;
827 struct CoprocOptionOp {
869 struct VectorListOp {
876 struct VectorIndexOp {
890 unsigned OffsetRegNum;
895 unsigned isNegative : 1;
898 struct PostIdxRegOp {
905 struct ShifterImmOp {
910 struct RegShiftedRegOp {
917 struct RegShiftedImmOp {
941 struct CoprocOptionOp CoprocOption;
942 struct MBOptOp MBOpt;
943 struct ISBOptOp ISBOpt;
944 struct TSBOptOp TSBOpt;
945 struct ITMaskOp ITMask;
947 struct MMaskOp MMask;
948 struct BankedRegOp BankedReg;
951 struct VectorListOp VectorList;
952 struct VectorIndexOp VectorIndex;
955 struct PostIdxRegOp PostIdxReg;
956 struct ShifterImmOp ShifterImm;
957 struct RegShiftedRegOp RegShiftedReg;
958 struct RegShiftedImmOp RegShiftedImm;
959 struct RotImmOp RotImm;
960 struct ModImmOp ModImm;
965 ARMOperand(KindTy K, ARMAsmParser &Parser) :
Kind(
K), Parser(&Parser) {}
978 SMLoc getAlignmentLoc()
const {
979 assert(Kind == k_Memory &&
"Invalid access!");
984 assert(Kind == k_CondCode &&
"Invalid access!");
989 assert(isVPTPred() &&
"Invalid access!");
993 unsigned getCoproc()
const {
994 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) &&
"Invalid access!");
999 assert(Kind == k_Token &&
"Invalid access!");
1004 assert((Kind == k_Register || Kind == k_CCOut) &&
"Invalid access!");
1009 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
1010 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
1011 Kind == k_FPSRegisterListWithVPR ||
1012 Kind == k_FPDRegisterListWithVPR) &&
1017 const MCExpr *getImm()
const {
1022 const MCExpr *getConstantPoolImm()
const {
1023 assert(isConstantPoolImm() &&
"Invalid access!");
1027 unsigned getVectorIndex()
const {
1028 assert(Kind == k_VectorIndex &&
"Invalid access!");
1029 return VectorIndex.Val;
1033 assert(Kind == k_MemBarrierOpt &&
"Invalid access!");
1038 assert(Kind == k_InstSyncBarrierOpt &&
"Invalid access!");
1043 assert(Kind == k_TraceSyncBarrierOpt &&
"Invalid access!");
1048 assert(Kind == k_ProcIFlags &&
"Invalid access!");
1052 unsigned getMSRMask()
const {
1053 assert(Kind == k_MSRMask &&
"Invalid access!");
1057 unsigned getBankedReg()
const {
1058 assert(Kind == k_BankedReg &&
"Invalid access!");
1059 return BankedReg.Val;
1062 bool isCoprocNum()
const {
return Kind == k_CoprocNum; }
1063 bool isCoprocReg()
const {
return Kind == k_CoprocReg; }
1064 bool isCoprocOption()
const {
return Kind == k_CoprocOption; }
1065 bool isCondCode()
const {
return Kind == k_CondCode; }
1066 bool isVPTPred()
const {
return Kind == k_VPTPred; }
1067 bool isCCOut()
const {
return Kind == k_CCOut; }
1068 bool isITMask()
const {
return Kind == k_ITCondMask; }
1069 bool isITCondCode()
const {
return Kind == k_CondCode; }
1070 bool isImm()
const override {
1071 return Kind == k_Immediate;
1074 bool isARMBranchTarget()
const {
1075 if (!
isImm())
return false;
1077 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1078 return CE->getValue() % 4 == 0;
1083 bool isThumbBranchTarget()
const {
1084 if (!
isImm())
return false;
1086 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1087 return CE->getValue() % 2 == 0;
1093 template<
unsigned w
idth,
unsigned scale>
1094 bool isUnsignedOffset()
const {
1095 if (!
isImm())
return false;
1096 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1098 int64_t Val =
CE->getValue();
1100 int64_t
Max =
Align * ((1LL << width) - 1);
1101 return ((Val %
Align) == 0) && (Val >= 0) && (Val <= Max);
1108 template<
unsigned w
idth,
unsigned scale>
1109 bool isSignedOffset()
const {
1110 if (!
isImm())
return false;
1111 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1113 int64_t Val =
CE->getValue();
1115 int64_t
Max =
Align * ((1LL << (width-1)) - 1);
1116 int64_t Min = -
Align * (1LL << (width-1));
1117 return ((Val %
Align) == 0) && (Val >= Min) && (Val <= Max);
1124 bool isLEOffset()
const {
1125 if (!
isImm())
return false;
1126 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1128 int64_t Val =
CE->getValue();
1129 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1138 bool isThumbMemPC()
const {
1141 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1143 if (!CE)
return false;
1144 Val =
CE->getValue();
1146 else if (isGPRMem()) {
1147 if(!
Memory.OffsetImm ||
Memory.OffsetRegNum)
return false;
1148 if(
Memory.BaseRegNum != ARM::PC)
return false;
1149 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
1150 Val =
CE->getValue();
1155 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1158 bool isFPImm()
const {
1159 if (!
isImm())
return false;
1161 if (!CE)
return false;
1166 template<
int64_t N,
int64_t M>
1167 bool isImmediate()
const {
1168 if (!
isImm())
return false;
1170 if (!CE)
return false;
1171 int64_t
Value =
CE->getValue();
1175 template<
int64_t N,
int64_t M>
1176 bool isImmediateS4()
const {
1177 if (!
isImm())
return false;
1179 if (!CE)
return false;
1180 int64_t
Value =
CE->getValue();
1183 template<
int64_t N,
int64_t M>
1184 bool isImmediateS2()
const {
1185 if (!
isImm())
return false;
1187 if (!CE)
return false;
1188 int64_t
Value =
CE->getValue();
1191 bool isFBits16()
const {
1192 return isImmediate<0, 17>();
1194 bool isFBits32()
const {
1195 return isImmediate<1, 33>();
1197 bool isImm8s4()
const {
1198 return isImmediateS4<-1020, 1020>();
1200 bool isImm7s4()
const {
1201 return isImmediateS4<-508, 508>();
1203 bool isImm7Shift0()
const {
1204 return isImmediate<-127, 127>();
1206 bool isImm7Shift1()
const {
1207 return isImmediateS2<-255, 255>();
1209 bool isImm7Shift2()
const {
1210 return isImmediateS4<-511, 511>();
1212 bool isImm7()
const {
1213 return isImmediate<-127, 127>();
1215 bool isImm0_1020s4()
const {
1216 return isImmediateS4<0, 1020>();
1218 bool isImm0_508s4()
const {
1219 return isImmediateS4<0, 508>();
1221 bool isImm0_508s4Neg()
const {
1222 if (!
isImm())
return false;
1224 if (!CE)
return false;
1225 int64_t
Value = -
CE->getValue();
1230 bool isImm0_4095Neg()
const {
1231 if (!
isImm())
return false;
1233 if (!CE)
return false;
1238 if ((
CE->getValue() >> 32) > 0)
return false;
1243 bool isImm0_7()
const {
1244 return isImmediate<0, 7>();
1247 bool isImm1_16()
const {
1248 return isImmediate<1, 16>();
1251 bool isImm1_32()
const {
1252 return isImmediate<1, 32>();
1255 bool isImm8_255()
const {
1256 return isImmediate<8, 255>();
1259 bool isImm0_255Expr()
const {
1267 int64_t
Value =
CE->getValue();
1268 return isUInt<8>(
Value);
1271 bool isImm256_65535Expr()
const {
1272 if (!
isImm())
return false;
1276 if (!CE)
return true;
1277 int64_t
Value =
CE->getValue();
1281 bool isImm0_65535Expr()
const {
1282 if (!
isImm())
return false;
1286 if (!CE)
return true;
1287 int64_t
Value =
CE->getValue();
1291 bool isImm24bit()
const {
1292 return isImmediate<0, 0xffffff + 1>();
1295 bool isImmThumbSR()
const {
1296 return isImmediate<1, 33>();
1299 bool isPKHLSLImm()
const {
1300 return isImmediate<0, 32>();
1303 bool isPKHASRImm()
const {
1304 return isImmediate<0, 33>();
1307 bool isAdrLabel()
const {
1310 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1314 if (!
isImm())
return false;
1316 if (!CE)
return false;
1317 int64_t
Value =
CE->getValue();
1322 bool isT2SOImm()
const {
1325 if (
isImm() && !isa<MCConstantExpr>(getImm())) {
1328 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1332 if (!
isImm())
return false;
1334 if (!CE)
return false;
1335 int64_t
Value =
CE->getValue();
1339 bool isT2SOImmNot()
const {
1340 if (!
isImm())
return false;
1342 if (!CE)
return false;
1343 int64_t
Value =
CE->getValue();
1348 bool isT2SOImmNeg()
const {
1349 if (!
isImm())
return false;
1351 if (!CE)
return false;
1352 int64_t
Value =
CE->getValue();
1358 bool isSetEndImm()
const {
1359 if (!
isImm())
return false;
1361 if (!CE)
return false;
1362 int64_t
Value =
CE->getValue();
1366 bool isReg()
const override {
return Kind == k_Register; }
1367 bool isRegList()
const {
return Kind == k_RegisterList; }
1368 bool isRegListWithAPSR()
const {
1369 return Kind == k_RegisterListWithAPSR ||
Kind == k_RegisterList;
1371 bool isDReg()
const {
1373 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
Reg.RegNum);
1375 bool isQReg()
const {
1377 ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
Reg.RegNum);
1379 bool isDPRRegList()
const {
return Kind == k_DPRRegisterList; }
1380 bool isSPRRegList()
const {
return Kind == k_SPRRegisterList; }
1381 bool isFPSRegListWithVPR()
const {
return Kind == k_FPSRegisterListWithVPR; }
1382 bool isFPDRegListWithVPR()
const {
return Kind == k_FPDRegisterListWithVPR; }
1383 bool isToken()
const override {
return Kind == k_Token; }
1384 bool isMemBarrierOpt()
const {
return Kind == k_MemBarrierOpt; }
1385 bool isInstSyncBarrierOpt()
const {
return Kind == k_InstSyncBarrierOpt; }
1386 bool isTraceSyncBarrierOpt()
const {
return Kind == k_TraceSyncBarrierOpt; }
1387 bool isMem()
const override {
1388 return isGPRMem() || isMVEMem();
1390 bool isMVEMem()
const {
1391 if (Kind != k_Memory)
1394 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.BaseRegNum) &&
1395 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
Memory.BaseRegNum))
1397 if (
Memory.OffsetRegNum &&
1398 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1403 bool isGPRMem()
const {
1404 if (Kind != k_Memory)
1407 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.BaseRegNum))
1409 if (
Memory.OffsetRegNum &&
1410 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.OffsetRegNum))
1414 bool isShifterImm()
const {
return Kind == k_ShifterImmediate; }
1415 bool isRegShiftedReg()
const {
1416 return Kind == k_ShiftedRegister &&
1417 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1418 RegShiftedReg.SrcReg) &&
1419 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1420 RegShiftedReg.ShiftReg);
1422 bool isRegShiftedImm()
const {
1423 return Kind == k_ShiftedImmediate &&
1424 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1425 RegShiftedImm.SrcReg);
1427 bool isRotImm()
const {
return Kind == k_RotateImmediate; }
1429 template<
unsigned Min,
unsigned Max>
1430 bool isPowerTwoInRange()
const {
1431 if (!
isImm())
return false;
1433 if (!CE)
return false;
1434 int64_t
Value =
CE->getValue();
1438 bool isModImm()
const {
return Kind == k_ModifiedImmediate; }
1440 bool isModImmNot()
const {
1441 if (!
isImm())
return false;
1443 if (!CE)
return false;
1444 int64_t
Value =
CE->getValue();
1448 bool isModImmNeg()
const {
1449 if (!
isImm())
return false;
1451 if (!CE)
return false;
1452 int64_t
Value =
CE->getValue();
1457 bool isThumbModImmNeg1_7()
const {
1458 if (!
isImm())
return false;
1460 if (!CE)
return false;
1461 int32_t
Value = -(int32_t)
CE->getValue();
1465 bool isThumbModImmNeg8_255()
const {
1466 if (!
isImm())
return false;
1468 if (!CE)
return false;
1469 int32_t
Value = -(int32_t)
CE->getValue();
1473 bool isConstantPoolImm()
const {
return Kind == k_ConstantPoolImmediate; }
1474 bool isBitfield()
const {
return Kind == k_BitfieldDescriptor; }
1475 bool isPostIdxRegShifted()
const {
1476 return Kind == k_PostIndexRegister &&
1477 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1479 bool isPostIdxReg()
const {
1482 bool isMemNoOffset(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1486 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1487 (alignOK ||
Memory.Alignment == Alignment);
1489 bool isMemNoOffsetT2(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1493 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1498 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1499 (alignOK ||
Memory.Alignment == Alignment);
1501 bool isMemNoOffsetT2NoSp(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1505 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].
contains(
1510 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1511 (alignOK ||
Memory.Alignment == Alignment);
1513 bool isMemNoOffsetT(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1517 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].
contains(
1522 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1523 (alignOK ||
Memory.Alignment == Alignment);
1525 bool isMemPCRelImm12()
const {
1526 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1529 if (
Memory.BaseRegNum != ARM::PC)
1532 if (!
Memory.OffsetImm)
return true;
1533 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1534 int64_t Val =
CE->getValue();
1535 return (Val > -4096 && Val < 4096) ||
1536 (Val == std::numeric_limits<int32_t>::min());
1541 bool isAlignedMemory()
const {
1542 return isMemNoOffset(
true);
1545 bool isAlignedMemoryNone()
const {
1546 return isMemNoOffset(
false, 0);
1549 bool isDupAlignedMemoryNone()
const {
1550 return isMemNoOffset(
false, 0);
1553 bool isAlignedMemory16()
const {
1554 if (isMemNoOffset(
false, 2))
1556 return isMemNoOffset(
false, 0);
1559 bool isDupAlignedMemory16()
const {
1560 if (isMemNoOffset(
false, 2))
1562 return isMemNoOffset(
false, 0);
1565 bool isAlignedMemory32()
const {
1566 if (isMemNoOffset(
false, 4))
1568 return isMemNoOffset(
false, 0);
1571 bool isDupAlignedMemory32()
const {
1572 if (isMemNoOffset(
false, 4))
1574 return isMemNoOffset(
false, 0);
1577 bool isAlignedMemory64()
const {
1578 if (isMemNoOffset(
false, 8))
1580 return isMemNoOffset(
false, 0);
1583 bool isDupAlignedMemory64()
const {
1584 if (isMemNoOffset(
false, 8))
1586 return isMemNoOffset(
false, 0);
1589 bool isAlignedMemory64or128()
const {
1590 if (isMemNoOffset(
false, 8))
1592 if (isMemNoOffset(
false, 16))
1594 return isMemNoOffset(
false, 0);
1597 bool isDupAlignedMemory64or128()
const {
1598 if (isMemNoOffset(
false, 8))
1600 if (isMemNoOffset(
false, 16))
1602 return isMemNoOffset(
false, 0);
1605 bool isAlignedMemory64or128or256()
const {
1606 if (isMemNoOffset(
false, 8))
1608 if (isMemNoOffset(
false, 16))
1610 if (isMemNoOffset(
false, 32))
1612 return isMemNoOffset(
false, 0);
1615 bool isAddrMode2()
const {
1616 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1618 if (
Memory.OffsetRegNum)
return true;
1620 if (!
Memory.OffsetImm)
return true;
1621 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1622 int64_t Val =
CE->getValue();
1623 return Val > -4096 && Val < 4096;
1628 bool isAM2OffsetImm()
const {
1629 if (!
isImm())
return false;
1632 if (!CE)
return false;
1633 int64_t Val =
CE->getValue();
1634 return (Val == std::numeric_limits<int32_t>::min()) ||
1635 (Val > -4096 && Val < 4096);
1638 bool isAddrMode3()
const {
1642 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1644 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1648 if (
Memory.OffsetRegNum)
return true;
1650 if (!
Memory.OffsetImm)
return true;
1651 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1652 int64_t Val =
CE->getValue();
1655 return (Val > -256 && Val < 256) ||
1656 Val == std::numeric_limits<int32_t>::min();
1661 bool isAM3Offset()
const {
1668 if (!CE)
return false;
1669 int64_t Val =
CE->getValue();
1671 return (Val > -256 && Val < 256) ||
1672 Val == std::numeric_limits<int32_t>::min();
1675 bool isAddrMode5()
const {
1679 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1681 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1683 if (
Memory.OffsetRegNum)
return false;
1685 if (!
Memory.OffsetImm)
return true;
1686 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1687 int64_t Val =
CE->getValue();
1688 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1689 Val == std::numeric_limits<int32_t>::min();
1694 bool isAddrMode5FP16()
const {
1698 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1700 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1702 if (
Memory.OffsetRegNum)
return false;
1704 if (!
Memory.OffsetImm)
return true;
1705 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1706 int64_t Val =
CE->getValue();
1707 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1708 Val == std::numeric_limits<int32_t>::min();
1713 bool isMemTBB()
const {
1714 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1720 bool isMemTBH()
const {
1721 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1728 bool isMemRegOffset()
const {
1729 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1734 bool isT2MemRegOffset()
const {
1735 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1746 bool isMemThumbRR()
const {
1749 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1756 bool isMemThumbRIs4()
const {
1757 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1761 if (!
Memory.OffsetImm)
return true;
1762 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1763 int64_t Val =
CE->getValue();
1764 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1769 bool isMemThumbRIs2()
const {
1770 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1774 if (!
Memory.OffsetImm)
return true;
1775 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1776 int64_t Val =
CE->getValue();
1777 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1782 bool isMemThumbRIs1()
const {
1783 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1787 if (!
Memory.OffsetImm)
return true;
1788 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1789 int64_t Val =
CE->getValue();
1790 return Val >= 0 && Val <= 31;
1795 bool isMemThumbSPI()
const {
1796 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1800 if (!
Memory.OffsetImm)
return true;
1801 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1802 int64_t Val =
CE->getValue();
1803 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1808 bool isMemImm8s4Offset()
const {
1812 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1814 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1817 if (!
Memory.OffsetImm)
return true;
1818 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1819 int64_t Val =
CE->getValue();
1821 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1822 Val == std::numeric_limits<int32_t>::min();
1827 bool isMemImm7s4Offset()
const {
1831 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1833 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0 ||
1834 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1838 if (!
Memory.OffsetImm)
return true;
1839 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1840 int64_t Val =
CE->getValue();
1842 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1847 bool isMemImm0_1020s4Offset()
const {
1848 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1851 if (!
Memory.OffsetImm)
return true;
1852 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1853 int64_t Val =
CE->getValue();
1854 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1859 bool isMemImm8Offset()
const {
1860 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1863 if (
Memory.BaseRegNum == ARM::PC)
return false;
1865 if (!
Memory.OffsetImm)
return true;
1866 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1867 int64_t Val =
CE->getValue();
1868 return (Val == std::numeric_limits<int32_t>::min()) ||
1869 (Val > -256 && Val < 256);
1874 template<
unsigned Bits,
unsigned RegClassID>
1875 bool isMemImm7ShiftedOffset()
const {
1876 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0 ||
1877 !ARMMCRegisterClasses[RegClassID].contains(
Memory.BaseRegNum))
1883 if (!
Memory.OffsetImm)
return true;
1884 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1885 int64_t Val =
CE->getValue();
1889 if (Val == INT32_MIN)
1892 unsigned Divisor = 1U <<
Bits;
1895 if (Val % Divisor != 0)
1900 return (Val >= -127 && Val <= 127);
1905 template <
int shift>
bool isMemRegRQOffset()
const {
1906 if (!isMVEMem() ||
Memory.OffsetImm !=
nullptr ||
Memory.Alignment != 0)
1909 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1912 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1926 template <
int shift>
bool isMemRegQOffset()
const {
1927 if (!isMVEMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1930 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1936 static_assert(shift < 56,
1937 "Such that we dont shift by a value higher than 62");
1938 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1939 int64_t Val =
CE->getValue();
1942 if ((Val & ((1U << shift) - 1)) != 0)
1948 int64_t
Range = (1U << (7 + shift)) - 1;
1949 return (Val == INT32_MIN) || (Val > -
Range && Val <
Range);
1954 bool isMemPosImm8Offset()
const {
1955 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1958 if (!
Memory.OffsetImm)
return true;
1959 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1960 int64_t Val =
CE->getValue();
1961 return Val >= 0 && Val < 256;
1966 bool isMemNegImm8Offset()
const {
1967 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1970 if (
Memory.BaseRegNum == ARM::PC)
return false;
1972 if (!
Memory.OffsetImm)
return false;
1973 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1974 int64_t Val =
CE->getValue();
1975 return (Val == std::numeric_limits<int32_t>::min()) ||
1976 (Val > -256 && Val < 0);
1981 bool isMemUImm12Offset()
const {
1982 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1985 if (!
Memory.OffsetImm)
return true;
1986 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1987 int64_t Val =
CE->getValue();
1988 return (Val >= 0 && Val < 4096);
1993 bool isMemImm12Offset()
const {
1998 if (
isImm() && !isa<MCConstantExpr>(getImm()))
2001 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
2004 if (!
Memory.OffsetImm)
return true;
2005 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
2006 int64_t Val =
CE->getValue();
2007 return (Val > -4096 && Val < 4096) ||
2008 (Val == std::numeric_limits<int32_t>::min());
2015 bool isConstPoolAsmImm()
const {
2018 return (isConstantPoolImm());
2021 bool isPostIdxImm8()
const {
2022 if (!
isImm())
return false;
2024 if (!CE)
return false;
2025 int64_t Val =
CE->getValue();
2026 return (Val > -256 && Val < 256) ||
2027 (Val == std::numeric_limits<int32_t>::min());
2030 bool isPostIdxImm8s4()
const {
2031 if (!
isImm())
return false;
2033 if (!CE)
return false;
2034 int64_t Val =
CE->getValue();
2035 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2036 (Val == std::numeric_limits<int32_t>::min());
2039 bool isMSRMask()
const {
return Kind == k_MSRMask; }
2040 bool isBankedReg()
const {
return Kind == k_BankedReg; }
2041 bool isProcIFlags()
const {
return Kind == k_ProcIFlags; }
2044 bool isAnyVectorList()
const {
2045 return Kind == k_VectorList ||
Kind == k_VectorListAllLanes ||
2046 Kind == k_VectorListIndexed;
2049 bool isVectorList()
const {
return Kind == k_VectorList; }
2051 bool isSingleSpacedVectorList()
const {
2052 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2055 bool isDoubleSpacedVectorList()
const {
2056 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2059 bool isVecListOneD()
const {
2061 if (isDReg() && !Parser->hasMVE())
2063 if (!isSingleSpacedVectorList())
return false;
2064 return VectorList.Count == 1;
2067 bool isVecListTwoMQ()
const {
2068 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2069 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2073 bool isVecListDPair()
const {
2076 if (isQReg() && !Parser->hasMVE())
2078 if (!isSingleSpacedVectorList())
return false;
2079 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2083 bool isVecListThreeD()
const {
2084 if (!isSingleSpacedVectorList())
return false;
2085 return VectorList.Count == 3;
2088 bool isVecListFourD()
const {
2089 if (!isSingleSpacedVectorList())
return false;
2090 return VectorList.Count == 4;
2093 bool isVecListDPairSpaced()
const {
2094 if (Kind != k_VectorList)
return false;
2095 if (isSingleSpacedVectorList())
return false;
2096 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2100 bool isVecListThreeQ()
const {
2101 if (!isDoubleSpacedVectorList())
return false;
2102 return VectorList.Count == 3;
2105 bool isVecListFourQ()
const {
2106 if (!isDoubleSpacedVectorList())
return false;
2107 return VectorList.Count == 4;
2110 bool isVecListFourMQ()
const {
2111 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2112 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2116 bool isSingleSpacedVectorAllLanes()
const {
2117 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2120 bool isDoubleSpacedVectorAllLanes()
const {
2121 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2124 bool isVecListOneDAllLanes()
const {
2125 if (!isSingleSpacedVectorAllLanes())
return false;
2126 return VectorList.Count == 1;
2129 bool isVecListDPairAllLanes()
const {
2130 if (!isSingleSpacedVectorAllLanes())
return false;
2131 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2135 bool isVecListDPairSpacedAllLanes()
const {
2136 if (!isDoubleSpacedVectorAllLanes())
return false;
2137 return VectorList.Count == 2;
2140 bool isVecListThreeDAllLanes()
const {
2141 if (!isSingleSpacedVectorAllLanes())
return false;
2142 return VectorList.Count == 3;
2145 bool isVecListThreeQAllLanes()
const {
2146 if (!isDoubleSpacedVectorAllLanes())
return false;
2147 return VectorList.Count == 3;
2150 bool isVecListFourDAllLanes()
const {
2151 if (!isSingleSpacedVectorAllLanes())
return false;
2152 return VectorList.Count == 4;
2155 bool isVecListFourQAllLanes()
const {
2156 if (!isDoubleSpacedVectorAllLanes())
return false;
2157 return VectorList.Count == 4;
2160 bool isSingleSpacedVectorIndexed()
const {
2161 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2164 bool isDoubleSpacedVectorIndexed()
const {
2165 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2168 bool isVecListOneDByteIndexed()
const {
2169 if (!isSingleSpacedVectorIndexed())
return false;
2170 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2173 bool isVecListOneDHWordIndexed()
const {
2174 if (!isSingleSpacedVectorIndexed())
return false;
2175 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2178 bool isVecListOneDWordIndexed()
const {
2179 if (!isSingleSpacedVectorIndexed())
return false;
2180 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2183 bool isVecListTwoDByteIndexed()
const {
2184 if (!isSingleSpacedVectorIndexed())
return false;
2185 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2188 bool isVecListTwoDHWordIndexed()
const {
2189 if (!isSingleSpacedVectorIndexed())
return false;
2190 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2193 bool isVecListTwoQWordIndexed()
const {
2194 if (!isDoubleSpacedVectorIndexed())
return false;
2195 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2198 bool isVecListTwoQHWordIndexed()
const {
2199 if (!isDoubleSpacedVectorIndexed())
return false;
2200 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2203 bool isVecListTwoDWordIndexed()
const {
2204 if (!isSingleSpacedVectorIndexed())
return false;
2205 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2208 bool isVecListThreeDByteIndexed()
const {
2209 if (!isSingleSpacedVectorIndexed())
return false;
2210 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2213 bool isVecListThreeDHWordIndexed()
const {
2214 if (!isSingleSpacedVectorIndexed())
return false;
2215 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2218 bool isVecListThreeQWordIndexed()
const {
2219 if (!isDoubleSpacedVectorIndexed())
return false;
2220 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2223 bool isVecListThreeQHWordIndexed()
const {
2224 if (!isDoubleSpacedVectorIndexed())
return false;
2225 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2228 bool isVecListThreeDWordIndexed()
const {
2229 if (!isSingleSpacedVectorIndexed())
return false;
2230 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2233 bool isVecListFourDByteIndexed()
const {
2234 if (!isSingleSpacedVectorIndexed())
return false;
2235 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2238 bool isVecListFourDHWordIndexed()
const {
2239 if (!isSingleSpacedVectorIndexed())
return false;
2240 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2243 bool isVecListFourQWordIndexed()
const {
2244 if (!isDoubleSpacedVectorIndexed())
return false;
2245 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2248 bool isVecListFourQHWordIndexed()
const {
2249 if (!isDoubleSpacedVectorIndexed())
return false;
2250 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2253 bool isVecListFourDWordIndexed()
const {
2254 if (!isSingleSpacedVectorIndexed())
return false;
2255 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2258 bool isVectorIndex()
const {
return Kind == k_VectorIndex; }
2260 template <
unsigned NumLanes>
2261 bool isVectorIndexInRange()
const {
2262 if (Kind != k_VectorIndex)
return false;
2263 return VectorIndex.Val < NumLanes;
2266 bool isVectorIndex8()
const {
return isVectorIndexInRange<8>(); }
2267 bool isVectorIndex16()
const {
return isVectorIndexInRange<4>(); }
2268 bool isVectorIndex32()
const {
return isVectorIndexInRange<2>(); }
2269 bool isVectorIndex64()
const {
return isVectorIndexInRange<1>(); }
2271 template<
int PermittedValue,
int OtherPermittedValue>
2272 bool isMVEPairVectorIndex()
const {
2273 if (Kind != k_VectorIndex)
return false;
2274 return VectorIndex.Val == PermittedValue ||
2275 VectorIndex.Val == OtherPermittedValue;
2278 bool isNEONi8splat()
const {
2279 if (!
isImm())
return false;
2282 if (!CE)
return false;
2283 int64_t
Value =
CE->getValue();
2290 if (isNEONByteReplicate(2))
2296 if (!CE)
return false;
2297 unsigned Value =
CE->getValue();
2301 bool isNEONi16splatNot()
const {
2306 if (!CE)
return false;
2307 unsigned Value =
CE->getValue();
2312 if (isNEONByteReplicate(4))
2318 if (!CE)
return false;
2319 unsigned Value =
CE->getValue();
2323 bool isNEONi32splatNot()
const {
2328 if (!CE)
return false;
2329 unsigned Value =
CE->getValue();
2333 static bool isValidNEONi32vmovImm(int64_t
Value) {
2336 return ((
Value & 0xffffffffffffff00) == 0) ||
2337 ((
Value & 0xffffffffffff00ff) == 0) ||
2338 ((
Value & 0xffffffffff00ffff) == 0) ||
2339 ((
Value & 0xffffffff00ffffff) == 0) ||
2340 ((
Value & 0xffffffffffff00ff) == 0xff) ||
2341 ((
Value & 0xffffffffff00ffff) == 0xffff);
2344 bool isNEONReplicate(
unsigned Width,
unsigned NumElems,
bool Inv)
const {
2345 assert((Width == 8 || Width == 16 || Width == 32) &&
2346 "Invalid element width");
2347 assert(NumElems * Width <= 64 &&
"Invalid result width");
2355 int64_t
Value =
CE->getValue();
2363 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2365 if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2368 for (
unsigned i = 1; i < NumElems; ++i) {
2370 if ((
Value & Mask) != Elem)
2376 bool isNEONByteReplicate(
unsigned NumBytes)
const {
2377 return isNEONReplicate(8, NumBytes,
false);
2380 static void checkNeonReplicateArgs(
unsigned FromW,
unsigned ToW) {
2381 assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2382 "Invalid source width");
2383 assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2384 "Invalid destination width");
2385 assert(FromW < ToW &&
"ToW is not less than FromW");
2388 template<
unsigned FromW,
unsigned ToW>
2389 bool isNEONmovReplicate()
const {
2390 checkNeonReplicateArgs(FromW, ToW);
2391 if (ToW == 64 && isNEONi64splat())
2393 return isNEONReplicate(FromW, ToW / FromW,
false);
2396 template<
unsigned FromW,
unsigned ToW>
2397 bool isNEONinvReplicate()
const {
2398 checkNeonReplicateArgs(FromW, ToW);
2399 return isNEONReplicate(FromW, ToW / FromW,
true);
2402 bool isNEONi32vmov()
const {
2403 if (isNEONByteReplicate(4))
2411 return isValidNEONi32vmovImm(
CE->getValue());
2414 bool isNEONi32vmovNeg()
const {
2415 if (!
isImm())
return false;
2418 if (!CE)
return false;
2419 return isValidNEONi32vmovImm(~
CE->getValue());
2422 bool isNEONi64splat()
const {
2423 if (!
isImm())
return false;
2426 if (!CE)
return false;
2429 for (
unsigned i = 0; i < 8; ++i, Value >>= 8)
2430 if ((
Value & 0xff) != 0 && (
Value & 0xff) != 0xff)
return false;
2434 template<
int64_t Angle,
int64_t Remainder>
2435 bool isComplexRotation()
const {
2436 if (!
isImm())
return false;
2439 if (!CE)
return false;
2442 return (
Value % Angle == Remainder &&
Value <= 270);
2445 bool isMVELongShift()
const {
2446 if (!
isImm())
return false;
2449 if (!CE)
return false;
2454 bool isMveSaturateOp()
const {
2455 if (!
isImm())
return false;
2457 if (!CE)
return false;
2462 bool isITCondCodeNoAL()
const {
2463 if (!isITCondCode())
return false;
2468 bool isITCondCodeRestrictedI()
const {
2469 if (!isITCondCode())
2475 bool isITCondCodeRestrictedS()
const {
2476 if (!isITCondCode())
2483 bool isITCondCodeRestrictedU()
const {
2484 if (!isITCondCode())
2490 bool isITCondCodeRestrictedFP()
const {
2491 if (!isITCondCode())
2498 void setVecListDPair(
unsigned int DPair) {
2499 Kind = k_VectorList;
2500 VectorList.RegNum = DPair;
2501 VectorList.Count = 2;
2502 VectorList.isDoubleSpaced =
false;
2505 void setVecListOneD(
unsigned int DReg) {
2506 Kind = k_VectorList;
2507 VectorList.RegNum =
DReg;
2508 VectorList.Count = 1;
2509 VectorList.isDoubleSpaced =
false;
2516 else if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2522 void addARMBranchTargetOperands(
MCInst &Inst,
unsigned N)
const {
2523 assert(
N == 1 &&
"Invalid number of operands!");
2524 addExpr(Inst, getImm());
2527 void addThumbBranchTargetOperands(
MCInst &Inst,
unsigned N)
const {
2528 assert(
N == 1 &&
"Invalid number of operands!");
2529 addExpr(Inst, getImm());
2532 void addCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
2533 assert(
N == 2 &&
"Invalid number of operands!");
2539 void addVPTPredNOperands(
MCInst &Inst,
unsigned N)
const {
2540 assert(
N == 3 &&
"Invalid number of operands!");
2542 unsigned RegNum = getVPTPred() ==
ARMVCC::None ? 0: ARM::P0;
2547 void addVPTPredROperands(
MCInst &Inst,
unsigned N)
const {
2548 assert(
N == 4 &&
"Invalid number of operands!");
2549 addVPTPredNOperands(Inst,
N-1);
2555 auto &MCID = Parser->getInstrDesc(Inst.
getOpcode());
2556 int TiedOp = MCID.getOperandConstraint(NextOpIndex,
MCOI::TIED_TO);
2558 "Inactive register in vpred_r is not tied to an output!");
2564 void addCoprocNumOperands(
MCInst &Inst,
unsigned N)
const {
2565 assert(
N == 1 &&
"Invalid number of operands!");
2569 void addCoprocRegOperands(
MCInst &Inst,
unsigned N)
const {
2570 assert(
N == 1 &&
"Invalid number of operands!");
2574 void addCoprocOptionOperands(
MCInst &Inst,
unsigned N)
const {
2575 assert(
N == 1 &&
"Invalid number of operands!");
2579 void addITMaskOperands(
MCInst &Inst,
unsigned N)
const {
2580 assert(
N == 1 &&
"Invalid number of operands!");
2584 void addITCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
2585 assert(
N == 1 &&
"Invalid number of operands!");
2589 void addITCondCodeInvOperands(
MCInst &Inst,
unsigned N)
const {
2590 assert(
N == 1 &&
"Invalid number of operands!");
2594 void addCCOutOperands(
MCInst &Inst,
unsigned N)
const {
2595 assert(
N == 1 &&
"Invalid number of operands!");
2599 void addRegOperands(
MCInst &Inst,
unsigned N)
const {
2600 assert(
N == 1 &&
"Invalid number of operands!");
2604 void addRegShiftedRegOperands(
MCInst &Inst,
unsigned N)
const {
2605 assert(
N == 3 &&
"Invalid number of operands!");
2606 assert(isRegShiftedReg() &&
2607 "addRegShiftedRegOperands() on non-RegShiftedReg!");
2614 void addRegShiftedImmOperands(
MCInst &Inst,
unsigned N)
const {
2615 assert(
N == 2 &&
"Invalid number of operands!");
2616 assert(isRegShiftedImm() &&
2617 "addRegShiftedImmOperands() on non-RegShiftedImm!");
2620 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2625 void addShifterImmOperands(
MCInst &Inst,
unsigned N)
const {
2626 assert(
N == 1 &&
"Invalid number of operands!");
2631 void addRegListOperands(
MCInst &Inst,
unsigned N)
const {
2632 assert(
N == 1 &&
"Invalid number of operands!");
2634 for (
unsigned Reg : RegList)
2638 void addRegListWithAPSROperands(
MCInst &Inst,
unsigned N)
const {
2639 assert(
N == 1 &&
"Invalid number of operands!");
2641 for (
unsigned Reg : RegList)
2645 void addDPRRegListOperands(
MCInst &Inst,
unsigned N)
const {
2646 addRegListOperands(Inst,
N);
2649 void addSPRRegListOperands(
MCInst &Inst,
unsigned N)
const {
2650 addRegListOperands(Inst,
N);
2653 void addFPSRegListWithVPROperands(
MCInst &Inst,
unsigned N)
const {
2654 addRegListOperands(Inst,
N);
2657 void addFPDRegListWithVPROperands(
MCInst &Inst,
unsigned N)
const {
2658 addRegListOperands(Inst,
N);
2661 void addRotImmOperands(
MCInst &Inst,
unsigned N)
const {
2662 assert(
N == 1 &&
"Invalid number of operands!");
2667 void addModImmOperands(
MCInst &Inst,
unsigned N)
const {
2668 assert(
N == 1 &&
"Invalid number of operands!");
2672 return addImmOperands(Inst,
N);
2677 void addModImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2678 assert(
N == 1 &&
"Invalid number of operands!");
2684 void addModImmNegOperands(
MCInst &Inst,
unsigned N)
const {
2685 assert(
N == 1 &&
"Invalid number of operands!");
2691 void addThumbModImmNeg8_255Operands(
MCInst &Inst,
unsigned N)
const {
2692 assert(
N == 1 &&
"Invalid number of operands!");
2698 void addThumbModImmNeg1_7Operands(
MCInst &Inst,
unsigned N)
const {
2699 assert(
N == 1 &&
"Invalid number of operands!");
2705 void addBitfieldOperands(
MCInst &Inst,
unsigned N)
const {
2706 assert(
N == 1 &&
"Invalid number of operands!");
2712 (32 - (lsb + width)));
2716 void addImmOperands(
MCInst &Inst,
unsigned N)
const {
2717 assert(
N == 1 &&
"Invalid number of operands!");
2718 addExpr(Inst, getImm());
2721 void addFBits16Operands(
MCInst &Inst,
unsigned N)
const {
2722 assert(
N == 1 &&
"Invalid number of operands!");
2727 void addFBits32Operands(
MCInst &Inst,
unsigned N)
const {
2728 assert(
N == 1 &&
"Invalid number of operands!");
2733 void addFPImmOperands(
MCInst &Inst,
unsigned N)
const {
2734 assert(
N == 1 &&
"Invalid number of operands!");
2740 void addImm8s4Operands(
MCInst &Inst,
unsigned N)
const {
2741 assert(
N == 1 &&
"Invalid number of operands!");
2748 void addImm7s4Operands(
MCInst &Inst,
unsigned N)
const {
2749 assert(
N == 1 &&
"Invalid number of operands!");
2756 void addImm7Shift0Operands(
MCInst &Inst,
unsigned N)
const {
2757 assert(
N == 1 &&
"Invalid number of operands!");
2762 void addImm7Shift1Operands(
MCInst &Inst,
unsigned N)
const {
2763 assert(
N == 1 &&
"Invalid number of operands!");
2768 void addImm7Shift2Operands(
MCInst &Inst,
unsigned N)
const {
2769 assert(
N == 1 &&
"Invalid number of operands!");
2774 void addImm7Operands(
MCInst &Inst,
unsigned N)
const {
2775 assert(
N == 1 &&
"Invalid number of operands!");
2780 void addImm0_1020s4Operands(
MCInst &Inst,
unsigned N)
const {
2781 assert(
N == 1 &&
"Invalid number of operands!");
2788 void addImm0_508s4NegOperands(
MCInst &Inst,
unsigned N)
const {
2789 assert(
N == 1 &&
"Invalid number of operands!");
2796 void addImm0_508s4Operands(
MCInst &Inst,
unsigned N)
const {
2797 assert(
N == 1 &&
"Invalid number of operands!");
2804 void addImm1_16Operands(
MCInst &Inst,
unsigned N)
const {
2805 assert(
N == 1 &&
"Invalid number of operands!");
2812 void addImm1_32Operands(
MCInst &Inst,
unsigned N)
const {
2813 assert(
N == 1 &&
"Invalid number of operands!");
2820 void addImmThumbSROperands(
MCInst &Inst,
unsigned N)
const {
2821 assert(
N == 1 &&
"Invalid number of operands!");
2825 unsigned Imm =
CE->getValue();
2829 void addPKHASRImmOperands(
MCInst &Inst,
unsigned N)
const {
2830 assert(
N == 1 &&
"Invalid number of operands!");
2834 int Val =
CE->getValue();
2838 void addT2SOImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2839 assert(
N == 1 &&
"Invalid number of operands!");
2846 void addT2SOImmNegOperands(
MCInst &Inst,
unsigned N)
const {
2847 assert(
N == 1 &&
"Invalid number of operands!");
2854 void addImm0_4095NegOperands(
MCInst &Inst,
unsigned N)
const {
2855 assert(
N == 1 &&
"Invalid number of operands!");
2862 void addUnsignedOffset_b8s2Operands(
MCInst &Inst,
unsigned N)
const {
2863 if(
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2871 void addThumbMemPCOperands(
MCInst &Inst,
unsigned N)
const {
2872 assert(
N == 1 &&
"Invalid number of operands!");
2884 assert(isGPRMem() &&
"Unknown value type!");
2885 assert(isa<MCConstantExpr>(
Memory.OffsetImm) &&
"Unknown value type!");
2886 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
2892 void addMemBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2893 assert(
N == 1 &&
"Invalid number of operands!");
2897 void addInstSyncBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2898 assert(
N == 1 &&
"Invalid number of operands!");
2902 void addTraceSyncBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2903 assert(
N == 1 &&
"Invalid number of operands!");
2907 void addMemNoOffsetOperands(
MCInst &Inst,
unsigned N)
const {
2908 assert(
N == 1 &&
"Invalid number of operands!");
2912 void addMemNoOffsetT2Operands(
MCInst &Inst,
unsigned N)
const {
2913 assert(
N == 1 &&
"Invalid number of operands!");
2917 void addMemNoOffsetT2NoSpOperands(
MCInst &Inst,
unsigned N)
const {
2918 assert(
N == 1 &&
"Invalid number of operands!");
2922 void addMemNoOffsetTOperands(
MCInst &Inst,
unsigned N)
const {
2923 assert(
N == 1 &&
"Invalid number of operands!");
2927 void addMemPCRelImm12Operands(
MCInst &Inst,
unsigned N)
const {
2928 assert(
N == 1 &&
"Invalid number of operands!");
2929 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
2935 void addAdrLabelOperands(
MCInst &Inst,
unsigned N)
const {
2936 assert(
N == 1 &&
"Invalid number of operands!");
2941 if (!isa<MCConstantExpr>(getImm())) {
2947 int Val =
CE->getValue();
2951 void addAlignedMemoryOperands(
MCInst &Inst,
unsigned N)
const {
2952 assert(
N == 2 &&
"Invalid number of operands!");
2957 void addDupAlignedMemoryNoneOperands(
MCInst &Inst,
unsigned N)
const {
2958 addAlignedMemoryOperands(Inst,
N);
2961 void addAlignedMemoryNoneOperands(
MCInst &Inst,
unsigned N)
const {
2962 addAlignedMemoryOperands(Inst,
N);
2965 void addAlignedMemory16Operands(
MCInst &Inst,
unsigned N)
const {
2966 addAlignedMemoryOperands(Inst,
N);
2969 void addDupAlignedMemory16Operands(
MCInst &Inst,
unsigned N)
const {
2970 addAlignedMemoryOperands(Inst,
N);
2973 void addAlignedMemory32Operands(
MCInst &Inst,
unsigned N)
const {
2974 addAlignedMemoryOperands(Inst,
N);
2977 void addDupAlignedMemory32Operands(
MCInst &Inst,
unsigned N)
const {
2978 addAlignedMemoryOperands(Inst,
N);
2981 void addAlignedMemory64Operands(
MCInst &Inst,
unsigned N)
const {
2982 addAlignedMemoryOperands(Inst,
N);
2985 void addDupAlignedMemory64Operands(
MCInst &Inst,
unsigned N)
const {
2986 addAlignedMemoryOperands(Inst,
N);
2989 void addAlignedMemory64or128Operands(
MCInst &Inst,
unsigned N)
const {
2990 addAlignedMemoryOperands(Inst,
N);
2993 void addDupAlignedMemory64or128Operands(
MCInst &Inst,
unsigned N)
const {
2994 addAlignedMemoryOperands(Inst,
N);
2997 void addAlignedMemory64or128or256Operands(
MCInst &Inst,
unsigned N)
const {
2998 addAlignedMemoryOperands(Inst,
N);
3001 void addAddrMode2Operands(
MCInst &Inst,
unsigned N)
const {
3002 assert(
N == 3 &&
"Invalid number of operands!");
3005 if (!
Memory.OffsetRegNum) {
3008 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3009 int32_t Val =
CE->getValue();
3012 if (Val == std::numeric_limits<int32_t>::min())
3030 void addAM2OffsetImmOperands(
MCInst &Inst,
unsigned N)
const {
3031 assert(
N == 2 &&
"Invalid number of operands!");
3033 assert(CE &&
"non-constant AM2OffsetImm operand!");
3034 int32_t Val =
CE->getValue();
3037 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3038 if (Val < 0) Val = -Val;
3044 void addAddrMode3Operands(
MCInst &Inst,
unsigned N)
const {
3045 assert(
N == 3 &&
"Invalid number of operands!");
3058 if (!
Memory.OffsetRegNum) {
3061 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3062 int32_t Val =
CE->getValue();
3065 if (Val == std::numeric_limits<int32_t>::min())
3082 void addAM3OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3083 assert(
N == 2 &&
"Invalid number of operands!");
3084 if (Kind == k_PostIndexRegister) {
3094 int32_t Val =
CE->getValue();
3097 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3098 if (Val < 0) Val = -Val;
3104 void addAddrMode5Operands(
MCInst &Inst,
unsigned N)
const {
3105 assert(
N == 2 &&
"Invalid number of operands!");
3118 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3120 int32_t Val =
CE->getValue() / 4;
3123 if (Val == std::numeric_limits<int32_t>::min())
3133 void addAddrMode5FP16Operands(
MCInst &Inst,
unsigned N)
const {
3134 assert(
N == 2 &&
"Invalid number of operands!");
3148 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3149 int32_t Val =
CE->getValue() / 2;
3152 if (Val == std::numeric_limits<int32_t>::min())
3162 void addMemImm8s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3163 assert(
N == 2 &&
"Invalid number of operands!");
3174 addExpr(Inst,
Memory.OffsetImm);
3177 void addMemImm7s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3178 assert(
N == 2 &&
"Invalid number of operands!");
3189 addExpr(Inst,
Memory.OffsetImm);
3192 void addMemImm0_1020s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3193 assert(
N == 2 &&
"Invalid number of operands!");
3197 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3204 void addMemImmOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3205 assert(
N == 2 &&
"Invalid number of operands!");
3207 addExpr(Inst,
Memory.OffsetImm);
3210 void addMemRegRQOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3211 assert(
N == 2 &&
"Invalid number of operands!");
3216 void addMemUImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3217 assert(
N == 2 &&
"Invalid number of operands!");
3220 addExpr(Inst, getImm());
3227 addExpr(Inst,
Memory.OffsetImm);
3230 void addMemImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3231 assert(
N == 2 &&
"Invalid number of operands!");
3234 addExpr(Inst, getImm());
3241 addExpr(Inst,
Memory.OffsetImm);
3244 void addConstPoolAsmImmOperands(
MCInst &Inst,
unsigned N)
const {
3245 assert(
N == 1 &&
"Invalid number of operands!");
3248 addExpr(Inst, getConstantPoolImm());
3251 void addMemTBBOperands(
MCInst &Inst,
unsigned N)
const {
3252 assert(
N == 2 &&
"Invalid number of operands!");
3257 void addMemTBHOperands(
MCInst &Inst,
unsigned N)
const {
3258 assert(
N == 2 &&
"Invalid number of operands!");
3263 void addMemRegOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3264 assert(
N == 3 &&
"Invalid number of operands!");
3273 void addT2MemRegOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3274 assert(
N == 3 &&
"Invalid number of operands!");
3280 void addMemThumbRROperands(
MCInst &Inst,
unsigned N)
const {
3281 assert(
N == 2 &&
"Invalid number of operands!");
3286 void addMemThumbRIs4Operands(
MCInst &Inst,
unsigned N)
const {
3287 assert(
N == 2 &&
"Invalid number of operands!");
3291 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3298 void addMemThumbRIs2Operands(
MCInst &Inst,
unsigned N)
const {
3299 assert(
N == 2 &&
"Invalid number of operands!");
3303 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3309 void addMemThumbRIs1Operands(
MCInst &Inst,
unsigned N)
const {
3310 assert(
N == 2 &&
"Invalid number of operands!");
3312 addExpr(Inst,
Memory.OffsetImm);
3315 void addMemThumbSPIOperands(
MCInst &Inst,
unsigned N)
const {
3316 assert(
N == 2 &&
"Invalid number of operands!");
3320 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3327 void addPostIdxImm8Operands(
MCInst &Inst,
unsigned N)
const {
3328 assert(
N == 1 &&
"Invalid number of operands!");
3330 assert(CE &&
"non-constant post-idx-imm8 operand!");
3331 int Imm =
CE->getValue();
3332 bool isAdd =
Imm >= 0;
3333 if (Imm == std::numeric_limits<int32_t>::min())
Imm = 0;
3338 void addPostIdxImm8s4Operands(
MCInst &Inst,
unsigned N)
const {
3339 assert(
N == 1 &&
"Invalid number of operands!");
3341 assert(CE &&
"non-constant post-idx-imm8s4 operand!");
3342 int Imm =
CE->getValue();
3343 bool isAdd =
Imm >= 0;
3344 if (Imm == std::numeric_limits<int32_t>::min())
Imm = 0;
3350 void addPostIdxRegOperands(
MCInst &Inst,
unsigned N)
const {
3351 assert(
N == 2 &&
"Invalid number of operands!");
3356 void addPostIdxRegShiftedOperands(
MCInst &Inst,
unsigned N)
const {
3357 assert(
N == 2 &&
"Invalid number of operands!");
3363 PostIdxReg.ShiftTy);
3367 void addPowerTwoOperands(
MCInst &Inst,
unsigned N)
const {
3368 assert(
N == 1 &&
"Invalid number of operands!");
3373 void addMSRMaskOperands(
MCInst &Inst,
unsigned N)
const {
3374 assert(
N == 1 &&
"Invalid number of operands!");
3378 void addBankedRegOperands(
MCInst &Inst,
unsigned N)
const {
3379 assert(
N == 1 &&
"Invalid number of operands!");
3383 void addProcIFlagsOperands(
MCInst &Inst,
unsigned N)
const {
3384 assert(
N == 1 &&
"Invalid number of operands!");
3388 void addVecListOperands(
MCInst &Inst,
unsigned N)
const {
3389 assert(
N == 1 &&
"Invalid number of operands!");
3391 if (isAnyVectorList())
3393 else if (isDReg() && !Parser->hasMVE()) {
3395 }
else if (isQReg() && !Parser->hasMVE()) {
3396 auto DPair = Parser->getDRegFromQReg(
Reg.RegNum);
3397 DPair = Parser->getMRI()->getMatchingSuperReg(
3398 DPair, ARM::dsub_0, &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3403 "attempted to add a vector list register with wrong type!");
3407 void addMVEVecListOperands(
MCInst &Inst,
unsigned N)
const {
3408 assert(
N == 1 &&
"Invalid number of operands!");
3424 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3426 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3427 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3430 for (
I = 0;
I <
E;
I++)
3433 assert(
I <
E &&
"Invalid vector list start register!");
3438 void addVecListIndexedOperands(
MCInst &Inst,
unsigned N)
const {
3439 assert(
N == 2 &&
"Invalid number of operands!");
3444 void addVectorIndex8Operands(
MCInst &Inst,
unsigned N)
const {
3445 assert(
N == 1 &&
"Invalid number of operands!");
3449 void addVectorIndex16Operands(
MCInst &Inst,
unsigned N)
const {
3450 assert(
N == 1 &&
"Invalid number of operands!");
3454 void addVectorIndex32Operands(
MCInst &Inst,
unsigned N)
const {
3455 assert(
N == 1 &&
"Invalid number of operands!");
3459 void addVectorIndex64Operands(
MCInst &Inst,
unsigned N)
const {
3460 assert(
N == 1 &&
"Invalid number of operands!");
3464 void addMVEVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
3465 assert(
N == 1 &&
"Invalid number of operands!");
3469 void addMVEPairVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
3470 assert(
N == 1 &&
"Invalid number of operands!");
3474 void addNEONi8splatOperands(
MCInst &Inst,
unsigned N)
const {
3475 assert(
N == 1 &&
"Invalid number of operands!");
3482 void addNEONi16splatOperands(
MCInst &Inst,
unsigned N)
const {
3483 assert(
N == 1 &&
"Invalid number of operands!");
3486 unsigned Value =
CE->getValue();
3491 void addNEONi16splatNotOperands(
MCInst &Inst,
unsigned N)
const {
3492 assert(
N == 1 &&
"Invalid number of operands!");
3495 unsigned Value =
CE->getValue();
3500 void addNEONi32splatOperands(
MCInst &Inst,
unsigned N)
const {
3501 assert(
N == 1 &&
"Invalid number of operands!");
3504 unsigned Value =
CE->getValue();
3509 void addNEONi32splatNotOperands(
MCInst &Inst,
unsigned N)
const {
3510 assert(
N == 1 &&
"Invalid number of operands!");
3513 unsigned Value =
CE->getValue();
3518 void addNEONi8ReplicateOperands(
MCInst &Inst,
bool Inv)
const {
3523 "All instructions that wants to replicate non-zero byte "
3524 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3525 unsigned Value =
CE->getValue();
3528 unsigned B =
Value & 0xff;
3533 void addNEONinvi8ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3534 assert(
N == 1 &&
"Invalid number of operands!");
3535 addNEONi8ReplicateOperands(Inst,
true);
3538 static unsigned encodeNeonVMOVImmediate(
unsigned Value) {
3541 else if (
Value > 0xffff &&
Value <= 0xffffff)
3543 else if (
Value > 0xffffff)
3548 void addNEONi32vmovOperands(
MCInst &Inst,
unsigned N)
const {
3549 assert(
N == 1 &&
"Invalid number of operands!");
3552 unsigned Value = encodeNeonVMOVImmediate(
CE->getValue());
3556 void addNEONvmovi8ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3557 assert(
N == 1 &&
"Invalid number of operands!");
3558 addNEONi8ReplicateOperands(Inst,
false);
3561 void addNEONvmovi16ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3562 assert(
N == 1 &&
"Invalid number of operands!");
3568 "All instructions that want to replicate non-zero half-word "
3569 "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3571 unsigned Elem =
Value & 0xffff;
3573 Elem = (Elem >> 8) | 0x200;
3577 void addNEONi32vmovNegOperands(
MCInst &Inst,
unsigned N)
const {
3578 assert(
N == 1 &&
"Invalid number of operands!");
3581 unsigned Value = encodeNeonVMOVImmediate(~
CE->getValue());
3585 void addNEONvmovi32ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3586 assert(
N == 1 &&
"Invalid number of operands!");
3592 "All instructions that want to replicate non-zero word "
3593 "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3595 unsigned Elem = encodeNeonVMOVImmediate(
Value & 0xffffffff);
3599 void addNEONi64splatOperands(
MCInst &Inst,
unsigned N)
const {
3600 assert(
N == 1 &&
"Invalid number of operands!");
3605 for (
unsigned i = 0; i < 8; ++i, Value >>= 8) {
3611 void addComplexRotationEvenOperands(
MCInst &Inst,
unsigned N)
const {
3612 assert(
N == 1 &&
"Invalid number of operands!");
3617 void addComplexRotationOddOperands(
MCInst &Inst,
unsigned N)
const {
3618 assert(
N == 1 &&
"Invalid number of operands!");
3623 void addMveSaturateOperands(
MCInst &Inst,
unsigned N)
const {
3624 assert(
N == 1 &&
"Invalid number of operands!");
3626 unsigned Imm =
CE->getValue();
3627 assert((Imm == 48 || Imm == 64) &&
"Invalid saturate operand");
3633 static std::unique_ptr<ARMOperand> CreateITMask(
unsigned Mask,
SMLoc S,
3634 ARMAsmParser &Parser) {
3635 auto Op = std::make_unique<ARMOperand>(k_ITCondMask, Parser);
3642 static std::unique_ptr<ARMOperand>
3644 auto Op = std::make_unique<ARMOperand>(k_CondCode, Parser);
3652 ARMAsmParser &Parser) {
3653 auto Op = std::make_unique<ARMOperand>(k_VPTPred, Parser);
3660 static std::unique_ptr<ARMOperand> CreateCoprocNum(
unsigned CopVal,
SMLoc S,
3661 ARMAsmParser &Parser) {
3662 auto Op = std::make_unique<ARMOperand>(k_CoprocNum, Parser);
3663 Op->Cop.Val = CopVal;
3669 static std::unique_ptr<ARMOperand> CreateCoprocReg(
unsigned CopVal,
SMLoc S,
3670 ARMAsmParser &Parser) {
3671 auto Op = std::make_unique<ARMOperand>(k_CoprocReg, Parser);
3672 Op->Cop.Val = CopVal;
3678 static std::unique_ptr<ARMOperand>
3679 CreateCoprocOption(
unsigned Val,
SMLoc S,
SMLoc E, ARMAsmParser &Parser) {
3680 auto Op = std::make_unique<ARMOperand>(k_CoprocOption, Parser);
3687 static std::unique_ptr<ARMOperand> CreateCCOut(
unsigned RegNum,
SMLoc S,
3688 ARMAsmParser &Parser) {
3689 auto Op = std::make_unique<ARMOperand>(k_CCOut, Parser);
3690 Op->Reg.RegNum = RegNum;
3696 static std::unique_ptr<ARMOperand> CreateToken(
StringRef Str,
SMLoc S,
3697 ARMAsmParser &Parser) {
3698 auto Op = std::make_unique<ARMOperand>(k_Token, Parser);
3699 Op->Tok.Data = Str.data();
3700 Op->Tok.Length = Str.size();
3706 static std::unique_ptr<ARMOperand> CreateReg(
unsigned RegNum,
SMLoc S,
3707 SMLoc E, ARMAsmParser &Parser) {
3708 auto Op = std::make_unique<ARMOperand>(k_Register, Parser);
3709 Op->Reg.RegNum = RegNum;
3715 static std::unique_ptr<ARMOperand>
3717 unsigned ShiftReg,
unsigned ShiftImm,
SMLoc S,
SMLoc E,
3718 ARMAsmParser &Parser) {
3719 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister, Parser);
3720 Op->RegShiftedReg.ShiftTy = ShTy;
3721 Op->RegShiftedReg.SrcReg = SrcReg;
3722 Op->RegShiftedReg.ShiftReg = ShiftReg;
3723 Op->RegShiftedReg.ShiftImm = ShiftImm;
3729 static std::unique_ptr<ARMOperand>
3732 ARMAsmParser &Parser) {
3733 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate, Parser);
3734 Op->RegShiftedImm.ShiftTy = ShTy;
3735 Op->RegShiftedImm.SrcReg = SrcReg;
3736 Op->RegShiftedImm.ShiftImm = ShiftImm;
3742 static std::unique_ptr<ARMOperand> CreateShifterImm(
bool isASR,
unsigned Imm,
3744 ARMAsmParser &Parser) {
3745 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate, Parser);
3746 Op->ShifterImm.isASR = isASR;
3747 Op->ShifterImm.Imm =
Imm;
3753 static std::unique_ptr<ARMOperand>
3754 CreateRotImm(
unsigned Imm,
SMLoc S,
SMLoc E, ARMAsmParser &Parser) {
3755 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate, Parser);
3756 Op->RotImm.Imm =
Imm;
3762 static std::unique_ptr<ARMOperand> CreateModImm(
unsigned Bits,
unsigned Rot,
3764 ARMAsmParser &Parser) {
3765 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate, Parser);
3767 Op->ModImm.Rot = Rot;
3773 static std::unique_ptr<ARMOperand>
3775 ARMAsmParser &Parser) {
3776 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate, Parser);
3783 static std::unique_ptr<ARMOperand> CreateBitfield(
unsigned LSB,
3784 unsigned Width,
SMLoc S,
3786 ARMAsmParser &Parser) {
3787 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor, Parser);
3788 Op->Bitfield.LSB = LSB;
3789 Op->Bitfield.Width = Width;
3795 static std::unique_ptr<ARMOperand>
3797 SMLoc StartLoc,
SMLoc EndLoc, ARMAsmParser &Parser) {
3798 assert(Regs.size() > 0 &&
"RegList contains no registers?");
3799 KindTy
Kind = k_RegisterList;
3801 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
3802 Regs.front().second)) {
3803 if (Regs.back().second == ARM::VPR)
3804 Kind = k_FPDRegisterListWithVPR;
3806 Kind = k_DPRRegisterList;
3807 }
else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
3808 Regs.front().second)) {
3809 if (Regs.back().second == ARM::VPR)
3810 Kind = k_FPSRegisterListWithVPR;
3812 Kind = k_SPRRegisterList;
3815 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3816 Kind = k_RegisterListWithAPSR;
3820 auto Op = std::make_unique<ARMOperand>(Kind, Parser);
3821 for (
const auto &
P : Regs)
3822 Op->Registers.push_back(
P.second);
3824 Op->StartLoc = StartLoc;
3825 Op->EndLoc = EndLoc;
3829 static std::unique_ptr<ARMOperand>
3830 CreateVectorList(
unsigned RegNum,
unsigned Count,
bool isDoubleSpaced,
3832 auto Op = std::make_unique<ARMOperand>(k_VectorList, Parser);
3833 Op->VectorList.RegNum = RegNum;
3834 Op->VectorList.Count = Count;
3835 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3841 static std::unique_ptr<ARMOperand>
3842 CreateVectorListAllLanes(
unsigned RegNum,
unsigned Count,
bool isDoubleSpaced,
3844 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes, Parser);
3845 Op->VectorList.RegNum = RegNum;
3846 Op->VectorList.Count = Count;
3847 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3853 static std::unique_ptr<ARMOperand>
3854 CreateVectorListIndexed(
unsigned RegNum,
unsigned Count,
unsigned Index,
3856 ARMAsmParser &Parser) {
3857 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed, Parser);
3858 Op->VectorList.RegNum = RegNum;
3859 Op->VectorList.Count = Count;
3860 Op->VectorList.LaneIndex =
Index;
3861 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3867 static std::unique_ptr<ARMOperand> CreateVectorIndex(
unsigned Idx,
SMLoc S,
3869 ARMAsmParser &Parser) {
3870 auto Op = std::make_unique<ARMOperand>(k_VectorIndex, Parser);
3871 Op->VectorIndex.Val =
Idx;
3877 static std::unique_ptr<ARMOperand> CreateImm(
const MCExpr *Val,
SMLoc S,
3878 SMLoc E, ARMAsmParser &Parser) {
3879 auto Op = std::make_unique<ARMOperand>(k_Immediate, Parser);
3886 static std::unique_ptr<ARMOperand>
3887 CreateMem(
unsigned BaseRegNum,
const MCExpr *OffsetImm,
unsigned OffsetRegNum,
3889 bool isNegative,
SMLoc S,
SMLoc E, ARMAsmParser &Parser,
3891 auto Op = std::make_unique<ARMOperand>(k_Memory, Parser);
3892 Op->Memory.BaseRegNum = BaseRegNum;
3893 Op->Memory.OffsetImm = OffsetImm;
3894 Op->Memory.OffsetRegNum = OffsetRegNum;
3895 Op->Memory.ShiftType = ShiftType;
3896 Op->Memory.ShiftImm = ShiftImm;
3897 Op->Memory.Alignment = Alignment;
3898 Op->Memory.isNegative = isNegative;
3901 Op->AlignmentLoc = AlignmentLoc;
3905 static std::unique_ptr<ARMOperand>
3907 unsigned ShiftImm,
SMLoc S,
SMLoc E, ARMAsmParser &Parser) {
3908 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister, Parser);
3909 Op->PostIdxReg.RegNum = RegNum;
3910 Op->PostIdxReg.isAdd = isAdd;
3911 Op->PostIdxReg.ShiftTy = ShiftTy;
3912 Op->PostIdxReg.ShiftImm = ShiftImm;
3918 static std::unique_ptr<ARMOperand>
3920 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt, Parser);
3921 Op->MBOpt.Val = Opt;
3927 static std::unique_ptr<ARMOperand>
3929 ARMAsmParser &Parser) {
3930 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt, Parser);
3931 Op->ISBOpt.Val = Opt;
3937 static std::unique_ptr<ARMOperand>
3939 ARMAsmParser &Parser) {
3940 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt, Parser);
3941 Op->TSBOpt.Val = Opt;
3947 static std::unique_ptr<ARMOperand>
3949 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags, Parser);
3956 static std::unique_ptr<ARMOperand> CreateMSRMask(
unsigned MMask,
SMLoc S,
3957 ARMAsmParser &Parser) {
3958 auto Op = std::make_unique<ARMOperand>(k_MSRMask, Parser);
3959 Op->MMask.Val = MMask;
3965 static std::unique_ptr<ARMOperand> CreateBankedReg(
unsigned Reg,
SMLoc S,
3966 ARMAsmParser &Parser) {
3967 auto Op = std::make_unique<ARMOperand>(k_BankedReg, Parser);
3968 Op->BankedReg.Val =
Reg;
3995 case k_ITCondMask: {
3996 static const char *
const MaskStr[] = {
3997 "(invalid)",
"(tttt)",
"(ttt)",
"(ttte)",
3998 "(tt)",
"(ttet)",
"(tte)",
"(ttee)",
3999 "(t)",
"(tett)",
"(tet)",
"(tete)",
4000 "(te)",
"(teet)",
"(tee)",
"(teee)",
4002 assert((ITMask.Mask & 0xf) == ITMask.Mask);
4003 OS <<
"<it-mask " << MaskStr[ITMask.Mask] <<
">";
4007 OS <<
"<coprocessor number: " << getCoproc() <<
">";
4010 OS <<
"<coprocessor register: " << getCoproc() <<
">";
4012 case k_CoprocOption:
4013 OS <<
"<coprocessor option: " << CoprocOption.Val <<
">";
4016 OS <<
"<mask: " << getMSRMask() <<
">";
4019 OS <<
"<banked reg: " << getBankedReg() <<
">";
4024 case k_MemBarrierOpt:
4025 OS <<
"<ARM_MB::" << MemBOptToString(getMemBarrierOpt(),
false) <<
">";
4027 case k_InstSyncBarrierOpt:
4028 OS <<
"<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) <<
">";
4030 case k_TraceSyncBarrierOpt:
4031 OS <<
"<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) <<
">";
4038 OS <<
" offset-imm:" << *
Memory.OffsetImm;
4040 OS <<
" offset-reg:" << (
Memory.isNegative ?
"-" :
"")
4044 OS <<
" shift-imm:" <<
Memory.ShiftImm;
4047 OS <<
" alignment:" <<
Memory.Alignment;
4050 case k_PostIndexRegister:
4051 OS <<
"post-idx register " << (PostIdxReg.isAdd ?
"" :
"-")
4052 <<
RegName(PostIdxReg.RegNum);
4055 << PostIdxReg.ShiftImm;
4058 case k_ProcIFlags: {
4059 OS <<
"<ARM_PROC::";
4060 unsigned IFlags = getProcIFlags();
4061 for (
int i=2; i >= 0; --i)
4062 if (IFlags & (1 << i))
4070 case k_ShifterImmediate:
4071 OS <<
"<shift " << (ShifterImm.isASR ?
"asr" :
"lsl")
4072 <<
" #" << ShifterImm.Imm <<
">";
4074 case k_ShiftedRegister:
4075 OS <<
"<so_reg_reg " <<
RegName(RegShiftedReg.SrcReg) <<
" "
4077 <<
RegName(RegShiftedReg.ShiftReg) <<
">";
4079 case k_ShiftedImmediate:
4080 OS <<
"<so_reg_imm " <<
RegName(RegShiftedImm.SrcReg) <<
" "
4082 << RegShiftedImm.ShiftImm <<
">";
4084 case k_RotateImmediate:
4085 OS <<
"<ror " <<
" #" << (RotImm.Imm * 8) <<
">";
4087 case k_ModifiedImmediate:
4088 OS <<
"<mod_imm #" << ModImm.Bits <<
", #"
4089 << ModImm.Rot <<
")>";
4091 case k_ConstantPoolImmediate:
4092 OS <<
"<constant_pool_imm #" << *getConstantPoolImm();
4094 case k_BitfieldDescriptor:
4095 OS <<
"<bitfield " <<
"lsb: " <<
Bitfield.LSB
4096 <<
", width: " <<
Bitfield.Width <<
">";
4098 case k_RegisterList:
4099 case k_RegisterListWithAPSR:
4100 case k_DPRRegisterList:
4101 case k_SPRRegisterList:
4102 case k_FPSRegisterListWithVPR:
4103 case k_FPDRegisterListWithVPR: {
4104 OS <<
"<register_list ";
4110 if (++
I <
E)
OS <<
", ";
4117 OS <<
"<vector_list " << VectorList.Count <<
" * "
4118 <<
RegName(VectorList.RegNum) <<
">";
4120 case k_VectorListAllLanes:
4121 OS <<
"<vector_list(all lanes) " << VectorList.Count <<
" * "
4122 <<
RegName(VectorList.RegNum) <<
">";
4124 case k_VectorListIndexed:
4125 OS <<
"<vector_list(lane " << VectorList.LaneIndex <<
") "
4126 << VectorList.Count <<
" * " <<
RegName(VectorList.RegNum) <<
">";
4129 OS <<
"'" << getToken() <<
"'";
4132 OS <<
"<vectorindex " << getVectorIndex() <<
">";
4146 ".8",
".16",
".32",
".64",
".i8",
".i16",
".i32",
".i64",
4147 ".u8",
".u16",
".u32",
".u64",
".s8",
".s16",
".s32",
".s64",
4148 ".p8",
".p16",
".f32",
".f64",
".f",
".d"};
4153 unsigned MnemonicOpsEndInd = 1;
4157 static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"cps") {
4159 static_cast<ARMOperand &
>(*
Operands[1]).getImm()->getKind() ==
4161 (dyn_cast<MCConstantExpr>(
4162 static_cast<ARMOperand &
>(*
Operands[1]).getImm())
4164 dyn_cast<MCConstantExpr>(
4165 static_cast<ARMOperand &
>(*
Operands[1]).getImm())
4167 ++MnemonicOpsEndInd;
4171 bool RHSCondCode =
false;
4172 while (MnemonicOpsEndInd <
Operands.size()) {
4173 auto Op =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
4175 if (
Op.isITMask()) {
4177 MnemonicOpsEndInd++;
4178 }
else if (
Op.isToken() &&
4182 Op.getToken() ==
".w" ||
Op.getToken() ==
".bf16" ||
4183 Op.getToken() ==
".p64" ||
Op.getToken() ==
".f16" ||
4189 MnemonicOpsEndInd++;
4192 else if (
Op.isCCOut() || (
Op.isCondCode() && !RHSCondCode) ||
4193 Op.isVPTPred() || (
Op.isToken() &&
Op.getToken() ==
".w"))
4194 MnemonicOpsEndInd++;
4198 return MnemonicOpsEndInd;
4203 const AsmToken &Tok = getParser().getTok();
4206 Reg = tryParseRegister();
4213 if (parseRegister(
Reg, StartLoc, EndLoc))
4221int ARMAsmParser::tryParseRegister(
bool AllowOutOfBoundReg) {
4230 .
Case(
"r13", ARM::SP)
4231 .
Case(
"r14", ARM::LR)
4232 .
Case(
"r15", ARM::PC)
4233 .
Case(
"ip", ARM::R12)
4235 .
Case(
"a1", ARM::R0)
4236 .
Case(
"a2", ARM::R1)
4237 .
Case(
"a3", ARM::R2)
4238 .
Case(
"a4", ARM::R3)
4239 .
Case(
"v1", ARM::R4)
4240 .
Case(
"v2", ARM::R5)
4241 .
Case(
"v3", ARM::R6)
4242 .
Case(
"v4", ARM::R7)
4243 .
Case(
"v5", ARM::R8)
4244 .
Case(
"v6", ARM::R9)
4245 .
Case(
"v7", ARM::R10)
4246 .
Case(
"v8", ARM::R11)
4247 .
Case(
"sb", ARM::R9)
4248 .
Case(
"sl", ARM::R10)
4249 .
Case(
"fp", ARM::R11)
4258 if (Entry == RegisterReqs.
end())
4261 return Entry->getValue();
4265 if (!AllowOutOfBoundReg && !hasD32() && RegNum >=
ARM::D16 &&
4274std::optional<ARM_AM::ShiftOpc> ARMAsmParser::tryParseShiftToken() {
4278 return std::nullopt;
4300 auto ShiftTyOpt = tryParseShiftToken();
4301 if (ShiftTyOpt == std::nullopt)
4303 auto ShiftTy = ShiftTyOpt.value();
4310 std::unique_ptr<ARMOperand> PrevOp(
4311 (ARMOperand *)
Operands.pop_back_val().release());
4312 if (!PrevOp->isReg())
4313 return Error(PrevOp->getStartLoc(),
"shift must be of a register");
4314 int SrcReg = PrevOp->getReg();
4330 const MCExpr *ShiftExpr =
nullptr;
4331 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4332 Error(ImmLoc,
"invalid immediate shift value");
4338 Error(ImmLoc,
"invalid immediate shift value");
4344 Imm =
CE->getValue();
4348 Error(ImmLoc,
"immediate shift value out of range");
4358 ShiftReg = tryParseRegister();
4359 if (ShiftReg == -1) {
4360 Error(L,
"expected immediate or register in shift operand");
4365 "expected immediate or register in shift operand");
4371 Operands.push_back(ARMOperand::CreateShiftedRegister(
4372 ShiftTy, SrcReg, ShiftReg, Imm, S, EndLoc, *
this));
4374 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4390 int RegNo = tryParseRegister();
4395 ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc, *
this));
4400 ExclaimTok.
getLoc(), *
this));
4413 if (getParser().parseExpression(ImmVal))
4417 return TokError(
"immediate value expected for vector index");
4426 getContext(), *
this));
4444 if (
Name.size() < 2 ||
Name[0] != CoprocOp)
4448 switch (
Name.size()) {
4471 case '0':
return 10;
4472 case '1':
return 11;
4473 case '2':
return 12;
4474 case '3':
return 13;
4475 case '4':
return 14;
4476 case '5':
return 15;
4516 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S, *
this));
4535 Operands.push_back(ARMOperand::CreateCoprocReg(
Reg, S, *
this));
4552 if (getParser().parseExpression(Expr))
4553 return Error(Loc,
"illegal expression");
4555 if (!CE ||
CE->getValue() < 0 ||
CE->getValue() > 255)
4557 "coprocessor option must be an immediate in range [0, 255]");
4558 int Val =
CE->getValue();
4566 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S,
E, *
this));
4577 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4581 case ARM::R0:
return ARM::R1;
case ARM::R1:
return ARM::R2;
4582 case ARM::R2:
return ARM::R3;
case ARM::R3:
return ARM::R4;
4583 case ARM::R4:
return ARM::R5;
case ARM::R5:
return ARM::R6;
4584 case ARM::R6:
return ARM::R7;
case ARM::R7:
return ARM::R8;
4585 case ARM::R8:
return ARM::R9;
case ARM::R9:
return ARM::R10;
4586 case ARM::R10:
return ARM::R11;
case ARM::R11:
return ARM::R12;
4587 case ARM::R12:
return ARM::SP;
case ARM::SP:
return ARM::LR;
4588 case ARM::LR:
return ARM::PC;
case ARM::PC:
return ARM::R0;
4596 unsigned Enc,
unsigned Reg) {
4597 Regs.emplace_back(Enc,
Reg);
4598 for (
auto I = Regs.rbegin(), J =
I + 1,
E = Regs.rend(); J !=
E; ++
I, ++J) {
4599 if (J->first == Enc) {
4600 Regs.erase(J.base());
4612 bool AllowRAAC,
bool AllowOutOfBoundReg) {
4615 return TokError(
"Token is not a Left Curly Brace");
4622 int Reg = tryParseRegister();
4624 return Error(RegLoc,
"register expected");
4625 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4626 return Error(RegLoc,
"pseudo-register not allowed");
4633 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4634 Reg = getDRegFromQReg(
Reg);
4635 EReg =
MRI->getEncodingValue(
Reg);
4640 if (
Reg == ARM::RA_AUTH_CODE ||
4641 ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4642 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4643 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg))
4644 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4645 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
Reg))
4646 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4647 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4648 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4650 return Error(RegLoc,
"invalid register in register list");
4653 EReg =
MRI->getEncodingValue(
Reg);
4662 if (
Reg == ARM::RA_AUTH_CODE)
4663 return Error(RegLoc,
"pseudo-register not allowed");
4666 int EndReg = tryParseRegister(AllowOutOfBoundReg);
4668 return Error(AfterMinusLoc,
"register expected");
4669 if (EndReg == ARM::RA_AUTH_CODE)
4670 return Error(AfterMinusLoc,
"pseudo-register not allowed");
4672 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4673 EndReg = getDRegFromQReg(EndReg) + 1;
4680 return Error(AfterMinusLoc,
"invalid register in register list");
4682 if (
MRI->getEncodingValue(
Reg) >
MRI->getEncodingValue(EndReg))
4683 return Error(AfterMinusLoc,
"bad range in register list");
4686 while (
Reg != EndReg) {
4688 EReg =
MRI->getEncodingValue(
Reg);
4692 ") in register list");
4701 Reg = tryParseRegister(AllowOutOfBoundReg);
4703 return Error(RegLoc,
"register expected");
4704 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4705 return Error(RegLoc,
"pseudo-register not allowed");
4707 bool isQReg =
false;
4708 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4709 Reg = getDRegFromQReg(
Reg);
4713 RC->
getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4714 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
Reg)) {
4717 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4719 if (
Reg == ARM::VPR &&
4720 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4721 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4722 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4723 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4724 EReg =
MRI->getEncodingValue(
Reg);
4727 ") in register list");
4732 if ((
Reg == ARM::RA_AUTH_CODE &&
4733 RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4735 return Error(RegLoc,
"invalid register in register list");
4741 MRI->getEncodingValue(
Reg) <
MRI->getEncodingValue(OldReg)) {
4742 if (ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4743 Warning(RegLoc,
"register list not in ascending order");
4744 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4745 return Error(RegLoc,
"register list not in ascending order");
4748 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4749 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4751 return Error(RegLoc,
"non-contiguous register range");
4752 EReg =
MRI->getEncodingValue(
Reg);
4755 ") in register list");
4758 EReg =
MRI->getEncodingValue(++
Reg);
4774 ARMOperand::CreateToken(
"^", Parser.
getTok().
getLoc(), *
this));
4782ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4790 LaneKind = AllLanes;
4803 if (getParser().parseExpression(LaneIndex))
4804 return Error(Loc,
"illegal expression");
4807 return Error(Loc,
"lane index must be empty or an integer");
4812 int64_t Val =
CE->getValue();
4815 if (Val < 0 || Val > 7)
4818 LaneKind = IndexedLane;
4828 VectorLaneTy LaneKind;
4838 int Reg = tryParseRegister();
4841 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg)) {
4842 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex,
E);
4847 Operands.push_back(ARMOperand::CreateReg(
Reg, S,
E, *
this));
4851 ARMOperand::CreateVectorListAllLanes(
Reg, 1,
false, S,
E, *
this));
4854 Operands.push_back(ARMOperand::CreateVectorListIndexed(
4855 Reg, 1, LaneIndex,
false, S,
E, *
this));
4860 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4861 Reg = getDRegFromQReg(
Reg);
4862 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex,
E);
4867 Operands.push_back(ARMOperand::CreateReg(
Reg, S,
E, *
this));
4870 Reg =
MRI->getMatchingSuperReg(
Reg, ARM::dsub_0,
4871 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4873 ARMOperand::CreateVectorListAllLanes(
Reg, 2,
false, S,
E, *
this));
4876 Operands.push_back(ARMOperand::CreateVectorListIndexed(
4877 Reg, 2, LaneIndex,
false, S,
E, *
this));
4882 Operands.push_back(ARMOperand::CreateReg(
Reg, S,
E, *
this));
4892 int Reg = tryParseRegister();
4894 return Error(RegLoc,
"register expected");
4897 unsigned FirstReg =
Reg;
4899 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg))
4901 "vector register in range Q0-Q7 expected");
4904 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4905 FirstReg =
Reg = getDRegFromQReg(
Reg);
4913 if (!parseVectorLane(LaneKind, LaneIndex,
E).isSuccess())
4921 else if (Spacing == 2)
4923 "sequential registers in double spaced list");
4926 int EndReg = tryParseRegister();
4928 return Error(AfterMinusLoc,
"register expected");
4930 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4931 EndReg = getDRegFromQReg(EndReg) + 1;
4938 !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(EndReg)) ||
4940 !ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(EndReg)))
4941 return Error(AfterMinusLoc,
"invalid register in register list");
4944 return Error(AfterMinusLoc,
"bad range in register list");
4946 VectorLaneTy NextLaneKind;
4947 unsigned NextLaneIndex;
4948 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
4950 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4951 return Error(AfterMinusLoc,
"mismatched lane index in register list");
4954 Count += EndReg -
Reg;
4961 Reg = tryParseRegister();
4963 return Error(RegLoc,
"register expected");
4966 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg))
4967 return Error(RegLoc,
"vector register in range Q0-Q7 expected");
4976 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4979 else if (Spacing == 2)
4982 "invalid register in double-spaced list (must be 'D' register')");
4983 Reg = getDRegFromQReg(
Reg);
4984 if (
Reg != OldReg + 1)
4985 return Error(RegLoc,
"non-contiguous register range");
4989 VectorLaneTy NextLaneKind;
4990 unsigned NextLaneIndex;
4992 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
4994 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4995 return Error(LaneLoc,
"mismatched lane index in register list");
5002 Spacing = 1 + (
Reg == OldReg + 2);
5005 if (
Reg != OldReg + Spacing)
5006 return Error(RegLoc,
"non-contiguous register range");
5009 VectorLaneTy NextLaneKind;
5010 unsigned NextLaneIndex;
5012 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
5014 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
5015 return Error(EndLoc,
"mismatched lane index in register list");
5028 if (Count == 2 && !hasMVE()) {
5030 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
5031 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
5032 FirstReg =
MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
5034 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
5035 ARMOperand::CreateVectorListAllLanes);
5036 Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S,
E, *
this));
5040 Operands.push_back(ARMOperand::CreateVectorListIndexed(
5041 FirstReg, Count, LaneIndex, (Spacing == 2), S,
E, *
this));
5092 const MCExpr *MemBarrierID;
5093 if (getParser().parseExpression(MemBarrierID))
5094 return Error(Loc,
"illegal expression");
5098 return Error(Loc,
"constant expression expected");
5100 int Val =
CE->getValue();
5102 return Error(Loc,
"immediate value out of range");
5128 ARMOperand::CreateTraceSyncBarrierOpt(
ARM_TSB::CSYNC, S, *
this));
5156 const MCExpr *ISBarrierID;
5157 if (getParser().parseExpression(ISBarrierID))
5158 return Error(Loc,
"illegal expression");
5162 return Error(Loc,
"constant expression expected");
5164 int Val =
CE->getValue();
5166 return Error(Loc,
"immediate value out of range");
5172 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5189 if (IFlagsStr !=
"none") {
5190 for (
int i = 0, e = IFlagsStr.
size(); i != e; ++i) {
5199 if (Flag == ~0U || (IFlags & Flag))
5215 if (
static_cast<ARMOperand &
>(*
Operands.back()).isMSRMask() ||
5216 static_cast<ARMOperand &
>(*
Operands.back()).isBankedReg())
5224 if (Val > 255 || Val < 0) {
5227 unsigned SYSmvalue = Val & 0xFF;
5229 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *
this));
5238 auto TheReg = ARMSysReg::lookupMClassSysRegByName(
Mask.lower());
5239 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5242 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5245 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S, *
this));
5250 size_t Start = 0, Next =
Mask.find(
'_');
5252 std::string SpecReg =
Mask.slice(Start, Next).lower();
5259 unsigned FlagsVal = 0;
5261 if (SpecReg ==
"apsr") {
5265 .
Case(
"nzcvqg", 0xc)
5268 if (FlagsVal == ~0U) {
5274 }
else if (SpecReg ==
"cpsr" || SpecReg ==
"spsr") {
5276 if (Flags ==
"all" || Flags ==
"")
5278 for (
int i = 0, e =
Flags.size(); i != e; ++i) {
5288 if (Flag == ~0U || (FlagsVal & Flag))
5304 if (SpecReg ==
"spsr")
5308 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S, *
this));
5316 if (
static_cast<ARMOperand &
>(*
Operands.back()).isBankedReg() ||
5317 static_cast<ARMOperand &
>(*
Operands.back()).isMSRMask())
5326 auto TheReg = ARMBankedReg::lookupBankedRegByName(
RegName.lower());
5329 unsigned Encoding = TheReg->Encoding;
5332 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S, *
this));
5342 auto ShiftCodeOpt = tryParseShiftToken();
5344 if (!ShiftCodeOpt.has_value())
5346 auto ShiftCode = ShiftCodeOpt.value();
5350 if (ShiftCode !=
Op)
5362 const MCExpr *ShiftAmount;
5365 if (getParser().parseExpression(ShiftAmount, EndLoc))
5366 return Error(Loc,
"illegal expression");
5369 return Error(Loc,
"constant expression expected");
5370 int Val =
CE->getValue();
5371 if (Val < Low || Val >
High)
5372 return Error(Loc,
"immediate value out of range");
5374 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc, *
this));
5384 return Error(S,
"'be' or 'le' operand expected");
5392 return Error(S,
"'be' or 'le' operand expected");
5393 Operands.push_back(ARMOperand::CreateImm(
5411 if (ShiftName ==
"lsl" || ShiftName ==
"LSL")
5413 else if (ShiftName ==
"asr" || ShiftName ==
"ASR")
5426 const MCExpr *ShiftAmount;
5428 if (getParser().parseExpression(ShiftAmount, EndLoc))
5429 return Error(ExLoc,
"malformed shift expression");
5432 return Error(ExLoc,
"shift amount must be an immediate");
5434 int64_t Val =
CE->getValue();
5437 if (Val < 1 || Val > 32)
5438 return Error(ExLoc,
"'asr' shift amount must be in range [1,32]");
5441 return Error(ExLoc,
"'asr #32' shift amount not allowed in Thumb mode");
5442 if (Val == 32) Val = 0;
5445 if (Val < 0 || Val > 31)
5446 return Error(ExLoc,
"'lsr' shift amount must be in range [0,31]");
5450 ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc, *
this));
5465 if (ShiftName !=
"ror" && ShiftName !=
"ROR")
5476 const MCExpr *ShiftAmount;
5478 if (getParser().parseExpression(ShiftAmount, EndLoc))
5479 return Error(ExLoc,
"malformed rotate expression");
5482 return Error(ExLoc,
"rotate amount must be an immediate");
5484 int64_t Val =
CE->getValue();
5488 if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5489 return Error(ExLoc,
"'ror' rotate amount must be 8, 16, or 24");
5491 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc, *
this));
5530 if (getParser().parseExpression(Imm1Exp, Ex1))
5531 return Error(Sx1,
"malformed expression");
5537 Imm1 =
CE->getValue();
5541 Operands.push_back(ARMOperand::CreateModImm(
5542 (Enc & 0xFF), (Enc & 0xF00) >> 7, Sx1, Ex1, *
this));
5553 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *
this));
5559 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1, *
this));
5566 "expected modified immediate operand: #[0, 255], #even[0-30]");
5569 return Error(Sx1,
"immediate operand must a number in the range [0, 255]");
5584 if (getParser().parseExpression(Imm2Exp, Ex2))
5585 return Error(Sx2,
"malformed expression");
5587 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5590 Imm2 =
CE->getValue();
5591 if (!(Imm2 & ~0x1E)) {
5593 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2, *
this));
5597 "immediate operand must an even number in the range [0, 30]");
5599 return Error(Sx2,
"constant expression expected");
5614 if (getParser().parseExpression(LSBExpr))
5615 return Error(
E,
"malformed immediate expression");
5618 return Error(
E,
"'lsb' operand must be an immediate");
5620 int64_t LSB =
CE->getValue();
5622 if (LSB < 0 || LSB > 31)
5623 return Error(
E,
"'lsb' operand must be in the range [0,31]");
5637 if (getParser().parseExpression(WidthExpr, EndLoc))
5638 return Error(
E,
"malformed immediate expression");
5639 CE = dyn_cast<MCConstantExpr>(WidthExpr);
5641 return Error(
E,
"'width' operand must be an immediate");
5643 int64_t Width =
CE->getValue();
5645 if (Width < 1 || Width > 32 - LSB)
5646 return Error(
E,
"'width' operand must be in the range [1,32-lsb]");
5648 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc, *
this));
5665 bool haveEaten =
false;
5677 int Reg = tryParseRegister();
5685 unsigned ShiftImm = 0;
5688 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5696 ARMOperand::CreatePostIdxReg(
Reg, isAdd, ShiftTy, ShiftImm, S,
E, *
this));
5726 if (getParser().parseExpression(
Offset,
E))
5730 return Error(S,
"constant expression expected");
5733 int32_t Val =
CE->getValue();
5734 if (isNegative && Val == 0)
5735 Val = std::numeric_limits<int32_t>::min();
5737 Operands.push_back(ARMOperand::CreateImm(
5743 bool haveEaten =
false;
5755 int Reg = tryParseRegister();
5762 Operands.push_back(ARMOperand::CreatePostIdxReg(
5770 unsigned MnemonicOpsEndInd) {
5771 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
5772 auto Op =
static_cast<ARMOperand &
>(*
Operands[
I]);
5773 if (
Op.isCondCode())
5780 unsigned MnemonicOpsEndInd) {
5781 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
5782 auto Op =
static_cast<ARMOperand &
>(*
Operands[
I]);
5792void ARMAsmParser::cvtThumbMultiply(
MCInst &Inst,
5799 unsigned RegRd = MnemonicOpsEndInd;
5800 unsigned RegRn = MnemonicOpsEndInd + 1;
5801 unsigned RegRm = MnemonicOpsEndInd;
5803 if (
Operands.size() == MnemonicOpsEndInd + 3) {
5808 RegRn = MnemonicOpsEndInd + 2;
5809 RegRm = MnemonicOpsEndInd + 1;
5811 RegRn = MnemonicOpsEndInd + 1;
5812 RegRm = MnemonicOpsEndInd + 2;
5817 ((ARMOperand &)*
Operands[RegRd]).addRegOperands(Inst, 1);
5819 if (CondOutI != 0) {
5820 ((ARMOperand &)*
Operands[CondOutI]).addCCOutOperands(Inst, 1);
5823 *ARMOperand::CreateCCOut(0,
Operands[0]->getEndLoc(), *
this);
5824 Op.addCCOutOperands(Inst, 1);
5827 ((ARMOperand &)*
Operands[RegRn]).addRegOperands(Inst, 1);
5829 ((ARMOperand &)*
Operands[RegRm]).addRegOperands(Inst, 1);
5833 ((ARMOperand &)*
Operands[CondI]).addCondCodeOperands(Inst, 2);
5835 ARMOperand
Op = *ARMOperand::CreateCondCode(
5837 Op.addCondCodeOperands(Inst, 2);
5841void ARMAsmParser::cvtThumbBranches(
MCInst &Inst,
5855 case ARM::tBcc: Inst.
setOpcode(ARM::tB);
break;
5856 case ARM::t2Bcc: Inst.
setOpcode(ARM::t2B);
break;
5875 ARMOperand &
op =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
5876 if (!
op.isSignedOffset<11, 1>() &&
isThumb() && hasV8MBaseline())
5882 ARMOperand &
op =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
5883 if (!
op.isSignedOffset<8, 1>() &&
isThumb() && hasV8MBaseline())
5888 ((ARMOperand &)*
Operands[MnemonicOpsEndInd]).addImmOperands(Inst, 1);
5890 ((ARMOperand &)*
Operands[CondI]).addCondCodeOperands(Inst, 2);
5892 ARMOperand
Op = *ARMOperand::CreateCondCode(
5894 Op.addCondCodeOperands(Inst, 2);
5898void ARMAsmParser::cvtMVEVMOVQtoDReg(
5907 ((ARMOperand &)*
Operands[MnemonicOpsEndInd]).addRegOperands(Inst, 1);
5908 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 1])
5909 .addRegOperands(Inst, 1);
5910 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 2])
5911 .addRegOperands(Inst, 1);
5912 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 3])
5913 .addMVEPairVectorIndexOperands(Inst, 1);
5915 ((ARMOperand &)*
Operands[MnemonicOpsEndInd + 5])
5916 .addMVEPairVectorIndexOperands(Inst, 1);
5919 .addCondCodeOperands(Inst, 2);
5923 Op.addCondCodeOperands(Inst, 2);
5933 return TokError(
"Token is not a Left Bracket");
5938 int BaseRegNum = tryParseRegister();
5939 if (BaseRegNum == -1)
5940 return Error(BaseRegTok.
getLoc(),
"register expected");
5946 return Error(Tok.
getLoc(),
"malformed memory operand");
5952 Operands.push_back(ARMOperand::CreateMem(
5959 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
5967 "Lost colon or comma in memory operand?!");
5979 if (getParser().parseExpression(Expr))
5987 return Error (
E,
"constant expression expected");
5990 switch (
CE->getValue()) {
5993 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5994 case 16:
Align = 2;
break;
5995 case 32:
Align = 4;
break;
5996 case 64:
Align = 8;
break;
5997 case 128:
Align = 16;
break;
5998 case 256:
Align = 32;
break;
6009 Operands.push_back(ARMOperand::CreateMem(BaseRegNum,
nullptr, 0,
6011 S,
E, *
this, AlignmentLoc));
6017 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
6038 if (getParser().parseExpression(
Offset))
6041 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Offset)) {
6044 int32_t Val =
CE->getValue();
6045 if (isNegative && Val == 0)
6050 AdjustedOffset =
CE;
6053 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, AdjustedOffset, 0,
6067 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
6075 bool isNegative =
false;
6085 int OffsetRegNum = tryParseRegister();
6086 if (OffsetRegNum == -1)
6087 return Error(
E,
"register expected");
6091 unsigned ShiftImm = 0;
6094 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
6104 Operands.push_back(ARMOperand::CreateMem(BaseRegNum,
nullptr, OffsetRegNum,
6105 ShiftType, ShiftImm, 0, isNegative,
6112 ARMOperand::CreateToken(
"!", Parser.
getTok().
getLoc(), *
this));
6129 return Error(Loc,
"illegal shift operator");
6131 if (ShiftName ==
"lsl" || ShiftName ==
"LSL" ||
6132 ShiftName ==
"asl" || ShiftName ==
"ASL")
6134 else if (ShiftName ==
"lsr" || ShiftName ==
"LSR")
6136 else if (ShiftName ==
"asr" || ShiftName ==
"ASR")
6138 else if (ShiftName ==
"ror" || ShiftName ==
"ROR")
6140 else if (ShiftName ==
"rrx" || ShiftName ==
"RRX")
6142 else if (ShiftName ==
"uxtw" || ShiftName ==
"UXTW")
6145 return Error(Loc,
"illegal shift operator");
6160 if (getParser().parseExpression(Expr))
6167 return Error(Loc,
"shift amount must be an immediate");
6168 int64_t
Imm =
CE->getValue();
6172 return Error(Loc,
"immediate shift value out of range");
6216 bool isVmovf =
false;
6218 for (
unsigned I = 1;
I < MnemonicOpsEndInd; ++
I) {
6219 ARMOperand &TyOp =
static_cast<ARMOperand &
>(*
Operands[
I]);
6220 if (TyOp.isToken() &&
6221 (TyOp.getToken() ==
".f32" || TyOp.getToken() ==
".f64" ||
6222 TyOp.getToken() ==
".f16")) {
6228 ARMOperand &Mnemonic =
static_cast<ARMOperand &
>(*
Operands[0]);
6229 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() ==
"fconstd" ||
6230 Mnemonic.getToken() ==
"fconsts");
6231 if (!(isVmovf || isFconst))
6237 bool isNegative =
false;
6260 if (Val > 255 || Val < 0)
6261 return Error(Loc,
"encoded floating point value out of range");
6271 return Error(Loc,
"invalid floating point immediate");
6291 switch (getLexer().getKind()) {
6299 bool ExpectLabel = Mnemonic ==
"b" || Mnemonic ==
"bl";
6301 if (!tryParseRegisterWithWriteBack(
Operands))
6303 int Res = tryParseShiftRegister(
Operands);
6309 if (Mnemonic ==
"vmrs" &&
6313 Operands.push_back(ARMOperand::CreateToken(
"APSR_nzcv", S, *
this));
6330 if (getParser().parseExpression(IdVal))
6333 Operands.push_back(ARMOperand::CreateImm(IdVal, S,
E, *
this));
6339 bool AllowOutOfBoundReg = Mnemonic ==
"vlldm" || Mnemonic ==
"vlstm";
6341 AllowOutOfBoundReg);
6354 auto AdjacentToken = getLexer().peekTok(
false);
6358 if (!ExpectIdentifier) {
6367 if (getParser().parseExpression(ImmVal))
6371 int32_t Val =
CE->getValue();
6372 if (IsNegative && Val == 0)
6377 Operands.push_back(ARMOperand::CreateImm(ImmVal, S,
E, *
this));
6383 Operands.push_back(ARMOperand::CreateToken(
6399 if (parsePrefix(RefKind))
6402 const MCExpr *SubExprVal;
6403 if (getParser().parseExpression(SubExprVal))
6409 Operands.push_back(ARMOperand::CreateImm(ExprVal, S,
E, *
this));
6414 if (Mnemonic !=
"ldr")
6415 return Error(S,
"unexpected token in operand");
6417 const MCExpr *SubExprVal;
6418 if (getParser().parseExpression(SubExprVal))
6425 ARMOperand::CreateConstantPoolImm(SubExprVal, S,
E, *
this));
6431bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6432 const MCExpr *Expr =
nullptr;
6433 SMLoc L = getParser().getTok().getLoc();
6434 if (check(getParser().parseExpression(Expr), L,
"expected expression"))
6437 if (check(!
Value, L,
"expected constant expression"))
6439 Out =
Value->getValue();
6468 static const struct PrefixEntry {
6469 const char *Spelling;
6471 uint8_t SupportedFormats;
6472 } PrefixEntries[] = {
6484 llvm::find_if(PrefixEntries, [&IDVal](
const PrefixEntry &PE) {
6485 return PE.Spelling == IDVal;
6487 if (Prefix == std::end(PrefixEntries)) {
6492 uint8_t CurrentFormat;
6493 switch (getContext().getObjectFileType()) {
6495 CurrentFormat = MACHO;
6498 CurrentFormat =
ELF;
6501 CurrentFormat =
COFF;
6504 CurrentFormat = WASM;
6514 if (~
Prefix->SupportedFormats & CurrentFormat) {
6516 "cannot represent relocation in the current file format");
6520 RefKind =
Prefix->VariantKind;
6544 unsigned &ProcessorIMod,
6548 CarrySetting =
false;
6554 if ((Mnemonic ==
"movs" &&
isThumb()) || Mnemonic ==
"teq" ||
6555 Mnemonic ==
"vceq" || Mnemonic ==
"svc" || Mnemonic ==
"mls" ||
6556 Mnemonic ==
"smmls" || Mnemonic ==
"vcls" || Mnemonic ==
"vmls" ||
6557 Mnemonic ==
"vnmls" || Mnemonic ==
"vacge" || Mnemonic ==
"vcge" ||
6558 Mnemonic ==
"vclt" || Mnemonic ==
"vacgt" || Mnemonic ==
"vaclt" ||
6559 Mnemonic ==
"vacle" || Mnemonic ==
"hlt" || Mnemonic ==
"vcgt" ||
6560 Mnemonic ==
"vcle" || Mnemonic ==
"smlal" || Mnemonic ==
"umaal" ||
6561 Mnemonic ==
"umlal" || Mnemonic ==
"vabal" || Mnemonic ==
"vmlal" ||
6562 Mnemonic ==
"vpadal" || Mnemonic ==
"vqdmlal" || Mnemonic ==
"fmuls" ||
6563 Mnemonic ==
"vmaxnm" || Mnemonic ==
"vminnm" || Mnemonic ==
"vcvta" ||
6564 Mnemonic ==
"vcvtn" || Mnemonic ==
"vcvtp" || Mnemonic ==
"vcvtm" ||
6565 Mnemonic ==
"vrinta" || Mnemonic ==
"vrintn" || Mnemonic ==
"vrintp" ||
6566 Mnemonic ==
"vrintm" || Mnemonic ==
"hvc" ||
6567 Mnemonic.
starts_with(
"vsel") || Mnemonic ==
"vins" ||
6568 Mnemonic ==
"vmovx" || Mnemonic ==
"bxns" || Mnemonic ==
"blxns" ||
6569 Mnemonic ==
"vdot" || Mnemonic ==
"vmmla" || Mnemonic ==
"vudot" ||
6570 Mnemonic ==
"vsdot" || Mnemonic ==
"vcmla" || Mnemonic ==
"vcadd" ||
6571 Mnemonic ==
"vfmal" || Mnemonic ==
"vfmsl" || Mnemonic ==
"wls" ||
6572 Mnemonic ==
"le" || Mnemonic ==
"dls" || Mnemonic ==
"csel" ||
6573 Mnemonic ==
"csinc" || Mnemonic ==
"csinv" || Mnemonic ==
"csneg" ||
6574 Mnemonic ==
"cinc" || Mnemonic ==
"cinv" || Mnemonic ==
"cneg" ||
6575 Mnemonic ==
"cset" || Mnemonic ==
"csetm" || Mnemonic ==
"aut" ||
6576 Mnemonic ==
"pac" || Mnemonic ==
"pacbti" || Mnemonic ==
"bti")
6581 if (Mnemonic !=
"adcs" && Mnemonic !=
"bics" && Mnemonic !=
"movs" &&
6582 Mnemonic !=
"muls" && Mnemonic !=
"smlals" && Mnemonic !=
"smulls" &&
6583 Mnemonic !=
"umlals" && Mnemonic !=
"umulls" && Mnemonic !=
"lsls" &&
6584 Mnemonic !=
"sbcs" && Mnemonic !=
"rscs" &&
6586 (Mnemonic ==
"vmine" || Mnemonic ==
"vshle" || Mnemonic ==
"vshlt" ||
6587 Mnemonic ==
"vshllt" || Mnemonic ==
"vrshle" || Mnemonic ==
"vrshlt" ||
6588 Mnemonic ==
"vmvne" || Mnemonic ==
"vorne" || Mnemonic ==
"vnege" ||
6589 Mnemonic ==
"vnegt" || Mnemonic ==
"vmule" || Mnemonic ==
"vmult" ||
6590 Mnemonic ==
"vrintne" || Mnemonic ==
"vcmult" ||
6591 Mnemonic ==
"vcmule" || Mnemonic ==
"vpsele" || Mnemonic ==
"vpselt" ||
6595 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size() - 2);
6603 !(Mnemonic ==
"cps" || Mnemonic ==
"mls" || Mnemonic ==
"mrs" ||
6604 Mnemonic ==
"smmls" || Mnemonic ==
"vabs" || Mnemonic ==
"vcls" ||
6605 Mnemonic ==
"vmls" || Mnemonic ==
"vmrs" || Mnemonic ==
"vnmls" ||
6606 Mnemonic ==
"vqabs" || Mnemonic ==
"vrecps" || Mnemonic ==
"vrsqrts" ||
6607 Mnemonic ==
"srs" || Mnemonic ==
"flds" || Mnemonic ==
"fmrs" ||
6608 Mnemonic ==
"fsqrts" || Mnemonic ==
"fsubs" || Mnemonic ==
"fsts" ||
6609 Mnemonic ==
"fcpys" || Mnemonic ==
"fdivs" || Mnemonic ==
"fmuls" ||
6610 Mnemonic ==
"fcmps" || Mnemonic ==
"fcmpzs" || Mnemonic ==
"vfms" ||
6611 Mnemonic ==
"vfnms" || Mnemonic ==
"fconsts" || Mnemonic ==
"bxns" ||
6612 Mnemonic ==
"blxns" || Mnemonic ==
"vfmas" || Mnemonic ==
"vmlas" ||
6613 (Mnemonic ==
"movs" &&
isThumb()))) {
6614 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size() - 1);
6615 CarrySetting =
true;
6628 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size()-2);
6629 ProcessorIMod =
IMod;
6633 if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic !=
"vmovlt" &&
6634 Mnemonic !=
"vshllt" && Mnemonic !=
"vrshrnt" && Mnemonic !=
"vshrnt" &&
6635 Mnemonic !=
"vqrshrunt" && Mnemonic !=
"vqshrunt" &&
6636 Mnemonic !=
"vqrshrnt" && Mnemonic !=
"vqshrnt" && Mnemonic !=
"vmullt" &&
6637 Mnemonic !=
"vqmovnt" && Mnemonic !=
"vqmovunt" &&
6638 Mnemonic !=
"vqmovnt" && Mnemonic !=
"vmovnt" && Mnemonic !=
"vqdmullt" &&
6639 Mnemonic !=
"vpnot" && Mnemonic !=
"vcvtt" && Mnemonic !=
"vcvt") {
6643 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size()-1);
6651 ITMask = Mnemonic.
slice(2, Mnemonic.
size());
6652 Mnemonic = Mnemonic.
slice(0, 2);
6656 ITMask = Mnemonic.
slice(4, Mnemonic.
size());
6657 Mnemonic = Mnemonic.
slice(0, 4);
6659 ITMask = Mnemonic.
slice(3, Mnemonic.
size());
6660 Mnemonic = Mnemonic.
slice(0, 3);
6670void ARMAsmParser::getMnemonicAcceptInfo(
StringRef Mnemonic,
6673 bool &CanAcceptCarrySet,
6674 bool &CanAcceptPredicationCode,
6675 bool &CanAcceptVPTPredicationCode) {
6676 CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6679 Mnemonic ==
"and" || Mnemonic ==
"lsl" || Mnemonic ==
"lsr" ||
6680 Mnemonic ==
"rrx" || Mnemonic ==
"ror" || Mnemonic ==
"sub" ||
6681 Mnemonic ==
"add" || Mnemonic ==
"adc" || Mnemonic ==
"mul" ||
6682 Mnemonic ==
"bic" || Mnemonic ==
"asr" || Mnemonic ==
"orr" ||
6683 Mnemonic ==
"mvn" || Mnemonic ==
"rsb" || Mnemonic ==
"rsc" ||
6684 Mnemonic ==
"orn" || Mnemonic ==
"sbc" || Mnemonic ==
"eor" ||
6685 Mnemonic ==
"neg" || Mnemonic ==
"vfm" || Mnemonic ==
"vfnm" ||
6687 (Mnemonic ==
"smull" || Mnemonic ==
"mov" || Mnemonic ==
"mla" ||
6688 Mnemonic ==
"smlal" || Mnemonic ==
"umlal" || Mnemonic ==
"umull"));
6690 if (Mnemonic ==
"bkpt" || Mnemonic ==
"cbnz" || Mnemonic ==
"setend" ||
6691 Mnemonic ==
"cps" || Mnemonic ==
"it" || Mnemonic ==
"cbz" ||
6692 Mnemonic ==
"trap" || Mnemonic ==
"hlt" || Mnemonic ==
"udf" ||
6694 Mnemonic.
starts_with(
"vsel") || Mnemonic ==
"vmaxnm" ||
6695 Mnemonic ==
"vminnm" || Mnemonic ==
"vcvta" || Mnemonic ==
"vcvtn" ||
6696 Mnemonic ==
"vcvtp" || Mnemonic ==
"vcvtm" || Mnemonic ==
"vrinta" ||
6697 Mnemonic ==
"vrintn" || Mnemonic ==
"vrintp" || Mnemonic ==
"vrintm" ||
6698 Mnemonic.
starts_with(
"aes") || Mnemonic ==
"hvc" ||
6699 Mnemonic ==
"setpan" || Mnemonic.
starts_with(
"sha1") ||
6702 Mnemonic ==
"vmovx" || Mnemonic ==
"vins" || Mnemonic ==
"vudot" ||
6703 Mnemonic ==
"vsdot" || Mnemonic ==
"vcmla" || Mnemonic ==
"vcadd" ||
6704 Mnemonic ==
"vfmal" || Mnemonic ==
"vfmsl" || Mnemonic ==
"vfmat" ||
6705 Mnemonic ==
"vfmab" || Mnemonic ==
"vdot" || Mnemonic ==
"vmmla" ||
6706 Mnemonic ==
"sb" || Mnemonic ==
"ssbb" || Mnemonic ==
"pssbb" ||
6707 Mnemonic ==
"vsmmla" || Mnemonic ==
"vummla" || Mnemonic ==
"vusmmla" ||
6708 Mnemonic ==
"vusdot" || Mnemonic ==
"vsudot" || Mnemonic ==
"bfcsel" ||
6709 Mnemonic ==
"wls" || Mnemonic ==
"dls" || Mnemonic ==
"le" ||
6710 Mnemonic ==
"csel" || Mnemonic ==
"csinc" || Mnemonic ==
"csinv" ||
6711 Mnemonic ==
"csneg" || Mnemonic ==
"cinc" || Mnemonic ==
"cinv" ||
6712 Mnemonic ==
"cneg" || Mnemonic ==
"cset" || Mnemonic ==
"csetm" ||
6713 (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6714 !MS.isITPredicableCDEInstr(Mnemonic)) ||
6716 Mnemonic ==
"pac" || Mnemonic ==
"pacbti" || Mnemonic ==
"aut" ||
6717 Mnemonic ==
"bti" ||
6724 CanAcceptPredicationCode =
false;
6727 CanAcceptPredicationCode =
6728 Mnemonic !=
"cdp2" && Mnemonic !=
"clrex" && Mnemonic !=
"mcr2" &&
6729 Mnemonic !=
"mcrr2" && Mnemonic !=
"mrc2" && Mnemonic !=
"mrrc2" &&
6730 Mnemonic !=
"dmb" && Mnemonic !=
"dfb" && Mnemonic !=
"dsb" &&
6731 Mnemonic !=
"isb" && Mnemonic !=
"pld" && Mnemonic !=
"pli" &&
6732 Mnemonic !=
"pldw" && Mnemonic !=
"ldc2" && Mnemonic !=
"ldc2l" &&
6733 Mnemonic !=
"stc2" && Mnemonic !=
"stc2l" && Mnemonic !=
"tsb" &&
6735 }
else if (isThumbOne()) {
6737 CanAcceptPredicationCode = Mnemonic !=
"movs";
6739 CanAcceptPredicationCode = Mnemonic !=
"nop" && Mnemonic !=
"movs";
6741 CanAcceptPredicationCode =
true;
6745 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I) {
6746 auto &
Op =
static_cast<ARMOperand &
>(*
Operands[
I]);
6747 if (
Op.isToken() &&
Op.getToken() ==
".w")
6757void ARMAsmParser::tryConvertingToTwoOperandForm(
6763 if (
Operands.size() != MnemonicOpsEndInd + 3)
6766 const auto &Op3 =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]);
6767 auto &Op4 =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]);
6768 if (!Op3.isReg() || !Op4.isReg())
6771 auto Op3Reg = Op3.getReg();
6772 auto Op4Reg = Op4.getReg();
6778 auto &Op5 =
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 2]);
6780 if (Mnemonic !=
"add")
6782 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6783 (Op5.isReg() && Op5.getReg() == ARM::PC);
6784 if (!TryTransform) {
6785 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6786 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6787 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6788 Op5.isImm() && !Op5.isImm0_508s4());
6792 }
else if (!isThumbOne())
6795 if (!(Mnemonic ==
"add" || Mnemonic ==
"sub" || Mnemonic ==
"and" ||
6796 Mnemonic ==
"eor" || Mnemonic ==
"lsl" || Mnemonic ==
"lsr" ||
6797 Mnemonic ==
"asr" || Mnemonic ==
"adc" || Mnemonic ==
"sbc" ||
6798 Mnemonic ==
"ror" || Mnemonic ==
"orr" || Mnemonic ==
"bic"))
6804 bool Transform = Op3Reg == Op4Reg;
6809 const ARMOperand *LastOp = &Op5;
6811 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6812 ((Mnemonic ==
"add" && Op4Reg != ARM::SP) ||
6813 Mnemonic ==
"and" || Mnemonic ==
"eor" ||
6814 Mnemonic ==
"adc" || Mnemonic ==
"orr")) {
6825 if (((Mnemonic ==
"add" && CarrySetting) || Mnemonic ==
"sub") &&
6831 if ((Mnemonic ==
"add" || Mnemonic ==
"sub") && LastOp->isImm0_7())
6845 ARMOperand &
Op =
static_cast<ARMOperand &
>(MCOp);
6851 const MCExpr *E = dyn_cast<MCExpr>(
Op.getImm());
6854 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6863bool ARMAsmParser::shouldOmitVectorPredicateOperand(
6865 if (!hasMVE() ||
Operands.size() <= MnemonicOpsEndInd)
6879 if (
static_cast<ARMOperand &
>(*Operand).isVectorIndex() ||
6880 ((*Operand).isReg() &&
6881 (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6882 (*Operand).getReg()) ||
6883 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6884 (*Operand).getReg())))) {
6894 if (
static_cast<ARMOperand &
>(*Operand).isVectorIndex() ||
6895 static_cast<ARMOperand &
>(*Operand).isQReg())
6911 unsigned VariantID);
6922void ARMAsmParser::fixupGNULDRDAlias(
StringRef Mnemonic,
6924 unsigned MnemonicOpsEndInd) {
6925 if (Mnemonic !=
"ldrd" && Mnemonic !=
"strd" && Mnemonic !=
"ldrexd" &&
6926 Mnemonic !=
"strexd" && Mnemonic !=
"ldaexd" && Mnemonic !=
"stlexd")
6929 unsigned IdX = Mnemonic ==
"strexd" || Mnemonic ==
"stlexd"
6930 ? MnemonicOpsEndInd + 1
6931 : MnemonicOpsEndInd;
6936 ARMOperand &Op2 =
static_cast<ARMOperand &
>(*
Operands[IdX]);
6937 ARMOperand &Op3 =
static_cast<ARMOperand &
>(*
Operands[IdX + 1]);
6941 if (!Op3.isGPRMem())
6948 unsigned RtEncoding =
MRI->getEncodingValue(Op2.getReg());
6949 if (!
isThumb() && (RtEncoding & 1)) {
6954 if (Op2.getReg() == ARM::PC)
6956 unsigned PairedReg = GPR.
getRegister(RtEncoding + 1);
6957 if (!PairedReg || PairedReg == ARM::PC ||
6958 (PairedReg == ARM::SP && !hasV8Ops()))
6962 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(),
6963 Op2.getEndLoc(), *
this));
6971bool ARMAsmParser::CDEConvertDualRegOperand(
StringRef Mnemonic,
6973 unsigned MnemonicOpsEndInd) {
6974 assert(MS.isCDEDualRegInstr(Mnemonic));
6976 if (
Operands.size() < 3 + MnemonicOpsEndInd)
6980 "operand must be an even-numbered register in the range [r0, r10]");
7013 RPair = ARM::R10_R11;
7028 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
7029 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isCondCode()) {
7031 --MnemonicOpsEndInd;
7037 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
7038 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isCCOut()) {
7040 --MnemonicOpsEndInd;
7046 for (
unsigned I = 0;
I < MnemonicOpsEndInd; ++
I)
7047 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isVPTPred()) {
7049 --MnemonicOpsEndInd;
7064 const FeatureBitset &AvailableFeatures = getAvailableFeatures();
7065 unsigned AssemblerDialect = getParser().getAssemblerDialect();
7071 parseDirectiveReq(
Name, NameLoc);
7078 size_t Start = 0, Next =
Name.find(
'.');
7085 unsigned ProcessorIMod;
7088 Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
7089 CarrySetting, ProcessorIMod, ITMask);
7092 if (isThumbOne() && PredicationCode !=
ARMCC::AL && Mnemonic !=
"b") {
7093 return Error(NameLoc,
"conditional execution not supported in Thumb1");
7096 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc, *
this));
7109 if (Mnemonic ==
"it" || Mnemonic.
starts_with(
"vpt") ||
7112 Mnemonic ==
"vpt" ?
SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
7113 SMLoc::getFromPointer(NameLoc.getPointer() + 4);
7114 if (ITMask.
size() > 3) {
7115 if (Mnemonic ==
"it")
7116 return Error(Loc,
"too many conditions on IT instruction");
7117 return Error(Loc,
"too many conditions on VPT instruction");
7121 if (Pos !=
't' && Pos !=
'e') {
7122 return Error(Loc,
"illegal IT block condition mask '" + ITMask +
"'");
7128 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc, *
this));
7141 bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7142 getMnemonicAcceptInfo(Mnemonic, ExtraToken,
Name, CanAcceptCarrySet,
7143 CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7147 if (!CanAcceptCarrySet && CarrySetting) {
7148 return Error(NameLoc,
"instruction '" + Mnemonic +
7149 "' can not set flags, but 's' suffix specified");
7153 if (!CanAcceptPredicationCode && PredicationCode !=
ARMCC::AL) {
7154 return Error(NameLoc,
"instruction '" + Mnemonic +
7155 "' is not predicable, but condition code specified");
7160 if (!CanAcceptVPTPredicationCode && VPTPredicationCode !=
ARMVCC::None) {
7161 return Error(NameLoc,
"instruction '" + Mnemonic +
7162 "' is not VPT predicable, but VPT code T/E is specified");
7166 if (CanAcceptCarrySet && CarrySetting) {
7169 ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, Loc, *
this));
7176 Operands.push_back(ARMOperand::CreateCondCode(
7184 !(Mnemonic.
starts_with(
"vcvt") && Mnemonic !=
"vcvta" &&
7185 Mnemonic !=
"vcvtn" && Mnemonic !=
"vcvtp" && Mnemonic !=
"vcvtm")) {
7188 Operands.push_back(ARMOperand::CreateVPTPred(
7193 if (ProcessorIMod) {
7194 Operands.push_back(ARMOperand::CreateImm(
7197 }
else if (Mnemonic ==
"cps" && isMClass()) {
7198 return Error(NameLoc,
"instruction 'cps' requires effect for M-class");
7204 Next =
Name.find(
'.', Start + 1);
7205 ExtraToken =
Name.slice(Start, Next);
7214 if (ExtraToken ==
".n" && !
isThumb()) {
7216 return Error(Loc,
"instruction with .n (narrow) qualifier not allowed in "
7223 if (ExtraToken !=
".n" && (
isThumb() || ExtraToken !=
".w")) {
7225 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc, *
this));
7232 unsigned MnemonicOpsEndInd =
Operands.size();
7237 if (parseOperand(
Operands, Mnemonic)) {
7243 if (parseOperand(
Operands, Mnemonic)) {
7252 tryConvertingToTwoOperandForm(Mnemonic, PredicationCode, CarrySetting,
7255 if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7263 if (MS.isCDEDualRegInstr(Mnemonic)) {
7265 CDEConvertDualRegOperand(Mnemonic,
Operands, MnemonicOpsEndInd);
7272 if (!shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7273 MnemonicOpsEndInd) &&
7274 Mnemonic ==
"vmov" && PredicationCode ==
ARMCC::LT) {
7282 Mnemonic.
size() - 1 + CarrySetting);
7287 }
else if (Mnemonic ==
"vcvt" && PredicationCode ==
ARMCC::NE &&
7288 !shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7289 MnemonicOpsEndInd)) {
7298 Mnemonic.
size() - 1 + CarrySetting);
7302 ARMOperand::CreateToken(
StringRef(
"vcvtn"), MLoc, *
this));
7303 }
else if (Mnemonic ==
"vmul" && PredicationCode ==
ARMCC::LT &&
7304 !shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7305 MnemonicOpsEndInd)) {
7318 if (!shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7319 MnemonicOpsEndInd)) {
7326 if (Mnemonic.
starts_with(
"vcvtt") && MnemonicOpsEndInd > 2) {
7328 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd - 2]);
7330 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd - 1]);
7331 if (!(Sz1.isToken() && Sz1.getToken().starts_with(
".f") &&
7332 Sz2.isToken() && Sz2.getToken().starts_with(
".f"))) {
7337 Mnemonic = Mnemonic.
substr(0, 4);
7339 ARMOperand::CreateToken(Mnemonic, MLoc, *
this));
7343 Mnemonic.
size() + CarrySetting);
7346 ARMOperand::CreateVPTPred(
7348 ++MnemonicOpsEndInd;
7350 }
else if (CanAcceptVPTPredicationCode) {
7354 if (shouldOmitVectorPredicateOperand(Mnemonic,
Operands,
7355 MnemonicOpsEndInd)) {
7362 bool usedVPTPredicationCode =
false;
7364 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isVPTPred())
7365 usedVPTPredicationCode =
true;
7366 if (!usedVPTPredicationCode) {
7374 Mnemonic =
Name.slice(0, Mnemonic.
size() + 1);
7377 ARMOperand::CreateToken(Mnemonic, NameLoc, *
this));
7386 if (!
isThumb() && Mnemonic ==
"blx" &&
7387 Operands.size() == MnemonicOpsEndInd + 1 &&
7388 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]).isImm())
7392 fixupGNULDRDAlias(Mnemonic,
Operands, MnemonicOpsEndInd);
7401 bool IsLoad = (Mnemonic ==
"ldrexd" || Mnemonic ==
"ldaexd");
7402 if (!
isThumb() &&
Operands.size() > MnemonicOpsEndInd + 1 + (!IsLoad) &&
7403 (Mnemonic ==
"ldrexd" || Mnemonic ==
"strexd" || Mnemonic ==
"ldaexd" ||
7404 Mnemonic ==
"stlexd")) {
7405 unsigned Idx = IsLoad ? MnemonicOpsEndInd : MnemonicOpsEndInd + 1;
7406 ARMOperand &Op1 =
static_cast<ARMOperand &
>(*
Operands[
Idx]);
7407 ARMOperand &Op2 =
static_cast<ARMOperand &
>(*
Operands[
Idx + 1]);
7411 if (Op1.isReg() && MRC.
contains(Op1.getReg())) {
7412 unsigned Reg1 = Op1.getReg();
7413 unsigned Rt =
MRI->getEncodingValue(Reg1);
7414 unsigned Reg2 = Op2.getReg();
7415 unsigned Rt2 =
MRI->getEncodingValue(Reg2);
7418 return Error(Op2.getStartLoc(),
7419 IsLoad ?
"destination operands must be sequential"
7420 :
"source operands must be sequential");
7426 IsLoad ?
"destination operands must start start at an even register"
7427 :
"source operands must start start at an even register");
7429 unsigned NewReg =
MRI->getMatchingSuperReg(
7430 Reg1, ARM::gsub_0, &(
MRI->getRegClass(ARM::GPRPairRegClassID)));
7431 Operands[
Idx] = ARMOperand::CreateReg(NewReg, Op1.getStartLoc(),
7432 Op2.getEndLoc(), *
this);
7442 if (isThumbTwo() && Mnemonic ==
"sub" &&
7443 Operands.size() == MnemonicOpsEndInd + 3 &&
7444 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]).isReg() &&
7445 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]).getReg() ==
7447 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]).isReg() &&
7448 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]).getReg() ==
7450 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 2]).isImm()) {
7451 Operands.front() = ARMOperand::CreateToken(
Name, NameLoc, *
this);
7463 unsigned Reg,
unsigned HiReg,
7464 bool &containsReg) {
7465 containsReg =
false;
7491 return Inst.
getOpcode() == ARM::tBKPT ||
7498 unsigned MnemonicOpsEndInd) {
7499 for (
unsigned I = MnemonicOpsEndInd;
I <
Operands.size(); ++
I) {
7500 const ARMOperand &
Op =
static_cast<const ARMOperand &
>(*
Operands[
I]);
7501 if (
Op.isRegList()) {
7508bool ARMAsmParser::validatetLDMRegList(
const MCInst &Inst,
7510 unsigned MnemonicOpsEndInd,
7511 unsigned ListIndex,
bool IsARPop) {
7516 if (!IsARPop && ListContainsSP)
7519 "SP may not be in the register list");
7520 if (ListContainsPC && ListContainsLR)
7523 "PC and LR may not be in the register list simultaneously");
7527bool ARMAsmParser::validatetSTMRegList(
const MCInst &Inst,
7529 unsigned MnemonicOpsEndInd,
7530 unsigned ListIndex) {
7534 if (ListContainsSP && ListContainsPC)
7537 "SP and PC may not be in the register list");
7541 "SP may not be in the register list");
7545 "PC may not be in the register list");
7550 bool Load,
bool ARMMode,
bool Writeback,
7551 unsigned MnemonicOpsEndInd) {
7552 unsigned RtIndex =
Load || !Writeback ? 0 : 1;
7565 "Rt must be even-numbered");
7568 if (Rt2 != Rt + 1) {
7571 "destination operands must be sequential");
7574 "source operands must be sequential");
7581 if (!ARMMode && Load) {
7584 "destination operands can't be identical");
7590 if (Rn == Rt || Rn == Rt2) {
7593 "base register needs to be different from destination "
7596 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7597 "source register and base register can't be identical");
7620 ARMOperand &
Op =
static_cast<ARMOperand &
>(MCOp);
7626 const MCExpr *E = dyn_cast<MCExpr>(
Op.getImm());
7633bool ARMAsmParser::validateInstruction(
MCInst &Inst,
7635 unsigned MnemonicOpsEndInd) {
7645 return Error(Loc,
"instructions in IT block must be predicable");
7648 if (
Cond != currentITCond()) {
7652 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isCondCode())
7654 return Error(CondLoc,
"incorrect condition in IT block; got '" +
7656 "', but expected '" +
7665 return Error(Loc,
"predicated instructions must be in IT block");
7669 return Warning(Loc,
"predicated instructions should be in IT block");
7676 if (MCID.
operands()[i].isPredicate()) {
7678 return Error(Loc,
"instruction is not predicable");
7686 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7687 return Error(Loc,
"instruction must be outside of IT block or the last instruction in an IT block");
7691 unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7693 return Error(Loc,
"instruction in VPT block must be predicable");
7696 if (Pred != VPTPred) {
7699 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isVPTPred())
7701 return Error(PredLoc,
"incorrect predication in VPT block; got '" +
7703 "', but expected '" +
7710 return Error(Loc,
"VPT predicated instructions must be in VPT block");
7712 const unsigned Opcode = Inst.
getOpcode();
7717 case ARM::VLSTM_T2: {
7721 MnemonicOpsEndInd + 2) {
7722 ARMOperand &
Op =
static_cast<ARMOperand &
>(
7725 auto &RegList =
Op.getRegList();
7727 if (RegList.size() == 32 && !hasV8_1MMainline()) {
7728 return Error(
Op.getEndLoc(),
"T2 version requires v8.1-M.Main");
7731 if (hasD32() && RegList.size() != 32) {
7732 return Error(
Op.getEndLoc(),
"operand must be exactly {d0-d31}");
7735 if (!hasD32() && (RegList.size() != 16 && RegList.size() != 32)) {
7737 "operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)");
7753 return Error(Loc,
"unpredictable IT predicate sequence");
7757 if (validateLDRDSTRD(Inst,
Operands,
true,
true,
7758 false, MnemonicOpsEndInd))
7762 case ARM::LDRD_POST:
7763 if (validateLDRDSTRD(Inst,
Operands,
true,
true,
7764 true, MnemonicOpsEndInd))
7768 if (validateLDRDSTRD(Inst,
Operands,
true,
false,
7769 false, MnemonicOpsEndInd))
7772 case ARM::t2LDRD_PRE:
7773 case ARM::t2LDRD_POST:
7774 if (validateLDRDSTRD(Inst,
Operands,
true,
false,
7775 true, MnemonicOpsEndInd))
7781 if (RmReg == ARM::SP && !hasV8Ops())
7783 "r13 (SP) is an unpredictable operand to BXJ");
7787 if (validateLDRDSTRD(Inst,
Operands,
false,
true,
7788 false, MnemonicOpsEndInd))
7792 case ARM::STRD_POST:
7793 if (validateLDRDSTRD(Inst,
Operands,
false,
true,
7794 true, MnemonicOpsEndInd))
7797 case ARM::t2STRD_PRE:
7798 case ARM::t2STRD_POST:
7799 if (validateLDRDSTRD(Inst,
Operands,
false,
false,
7800 true, MnemonicOpsEndInd))
7803 case ARM::STR_PRE_IMM:
7804 case ARM::STR_PRE_REG:
7805 case ARM::t2STR_PRE:
7806 case ARM::STR_POST_IMM:
7807 case ARM::STR_POST_REG:
7808 case ARM::t2STR_POST:
7810 case ARM::t2STRH_PRE:
7811 case ARM::STRH_POST:
7812 case ARM::t2STRH_POST:
7813 case ARM::STRB_PRE_IMM:
7814 case ARM::STRB_PRE_REG:
7815 case ARM::t2STRB_PRE:
7816 case ARM::STRB_POST_IMM:
7817 case ARM::STRB_POST_REG:
7818 case ARM::t2STRB_POST: {
7824 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
7825 "source register and base register can't be identical");
7828 case ARM::t2LDR_PRE_imm:
7829 case ARM::t2LDR_POST_imm:
7830 case ARM::t2STR_PRE_imm:
7831 case ARM::t2STR_POST_imm: {
7838 "destination register and base register can't be identical");
7839 if (Inst.
getOpcode() == ARM::t2LDR_POST_imm ||
7840 Inst.
getOpcode() == ARM::t2STR_POST_imm) {
7842 if (Imm > 255 || Imm < -255)
7843 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7844 "operand must be in range [-255, 255]");
7846 if (Inst.
getOpcode() == ARM::t2STR_PRE_imm ||
7847 Inst.
getOpcode() == ARM::t2STR_POST_imm) {
7850 "operand must be a register in range [r0, r14]");
7856 case ARM::t2LDRB_OFFSET_imm:
7857 case ARM::t2LDRB_PRE_imm:
7858 case ARM::t2LDRB_POST_imm:
7859 case ARM::t2STRB_OFFSET_imm:
7860 case ARM::t2STRB_PRE_imm:
7861 case ARM::t2STRB_POST_imm: {
7862 if (Inst.
getOpcode() == ARM::t2LDRB_POST_imm ||
7863 Inst.
getOpcode() == ARM::t2STRB_POST_imm ||
7864 Inst.
getOpcode() == ARM::t2LDRB_PRE_imm ||
7865 Inst.
getOpcode() == ARM::t2STRB_PRE_imm) {
7867 if (Imm > 255 || Imm < -255)
7868 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7869 "operand must be in range [-255, 255]");
7870 }
else if (Inst.
getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7871 Inst.
getOpcode() == ARM::t2STRB_OFFSET_imm) {
7873 if (Imm > 0 || Imm < -255)
7874 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7875 "operand must be in range [0, 255] with a negative sign");
7879 "if operand is PC, should call the LDRB (literal)");
7884 case ARM::t2LDRH_OFFSET_imm:
7885 case ARM::t2LDRH_PRE_imm:
7886 case ARM::t2LDRH_POST_imm:
7887 case ARM::t2STRH_OFFSET_imm:
7888 case ARM::t2STRH_PRE_imm:
7889 case ARM::t2STRH_POST_imm: {
7890 if (Inst.
getOpcode() == ARM::t2LDRH_POST_imm ||
7891 Inst.
getOpcode() == ARM::t2STRH_POST_imm ||
7892 Inst.
getOpcode() == ARM::t2LDRH_PRE_imm ||
7893 Inst.
getOpcode() == ARM::t2STRH_PRE_imm) {
7895 if (Imm > 255 || Imm < -255)
7896 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7897 "operand must be in range [-255, 255]");
7898 }
else if (Inst.
getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7899 Inst.
getOpcode() == ARM::t2STRH_OFFSET_imm) {
7901 if (Imm > 0 || Imm < -255)
7902 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7903 "operand must be in range [0, 255] with a negative sign");
7907 "if operand is PC, should call the LDRH (literal)");
7912 case ARM::t2LDRSB_OFFSET_imm:
7913 case ARM::t2LDRSB_PRE_imm:
7914 case ARM::t2LDRSB_POST_imm: {
7915 if (Inst.
getOpcode() == ARM::t2LDRSB_POST_imm ||
7916 Inst.
getOpcode() == ARM::t2LDRSB_PRE_imm) {
7918 if (Imm > 255 || Imm < -255)
7919 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7920 "operand must be in range [-255, 255]");
7921 }
else if (Inst.
getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7923 if (Imm > 0 || Imm < -255)
7924 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7925 "operand must be in range [0, 255] with a negative sign");
7928 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7929 "if operand is PC, should call the LDRH (literal)");
7934 case ARM::t2LDRSH_OFFSET_imm:
7935 case ARM::t2LDRSH_PRE_imm:
7936 case ARM::t2LDRSH_POST_imm: {
7937 if (Inst.
getOpcode() == ARM::t2LDRSH_POST_imm ||
7938 Inst.
getOpcode() == ARM::t2LDRSH_PRE_imm) {
7940 if (Imm > 255 || Imm < -255)
7941 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7942 "operand must be in range [-255, 255]");
7943 }
else if (Inst.
getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7945 if (Imm > 0 || Imm < -255)
7946 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
7947 "operand must be in range [0, 255] with a negative sign");
7951 "if operand is PC, should call the LDRH (literal)");
7956 case ARM::LDR_PRE_IMM:
7957 case ARM::LDR_PRE_REG:
7958 case ARM::t2LDR_PRE:
7959 case ARM::LDR_POST_IMM:
7960 case ARM::LDR_POST_REG:
7961 case ARM::t2LDR_POST:
7963 case ARM::t2LDRH_PRE:
7964 case ARM::LDRH_POST:
7965 case ARM::t2LDRH_POST:
7966 case ARM::LDRSH_PRE:
7967 case ARM::t2LDRSH_PRE:
7968 case ARM::LDRSH_POST:
7969 case ARM::t2LDRSH_POST:
7970 case ARM::LDRB_PRE_IMM:
7971 case ARM::LDRB_PRE_REG:
7972 case ARM::t2LDRB_PRE:
7973 case ARM::LDRB_POST_IMM:
7974 case ARM::LDRB_POST_REG:
7975 case ARM::t2LDRB_POST:
7976 case ARM::LDRSB_PRE:
7977 case ARM::t2LDRSB_PRE:
7978 case ARM::LDRSB_POST:
7979 case ARM::t2LDRSB_POST: {
7986 "destination register and base register can't be identical");
7990 case ARM::MVE_VLDRBU8_rq:
7991 case ARM::MVE_VLDRBU16_rq:
7992 case ARM::MVE_VLDRBS16_rq:
7993 case ARM::MVE_VLDRBU32_rq:
7994 case ARM::MVE_VLDRBS32_rq:
7995 case ARM::MVE_VLDRHU16_rq:
7996 case ARM::MVE_VLDRHU16_rq_u:
7997 case ARM::MVE_VLDRHU32_rq:
7998 case ARM::MVE_VLDRHU32_rq_u:
7999 case ARM::MVE_VLDRHS32_rq:
8000 case ARM::MVE_VLDRHS32_rq_u:
8001 case ARM::MVE_VLDRWU32_rq:
8002 case ARM::MVE_VLDRWU32_rq_u:
8003 case ARM::MVE_VLDRDU64_rq:
8004 case ARM::MVE_VLDRDU64_rq_u:
8005 case ARM::MVE_VLDRWU32_qi:
8006 case ARM::MVE_VLDRWU32_qi_pre:
8007 case ARM::MVE_VLDRDU64_qi:
8008 case ARM::MVE_VLDRDU64_qi_pre: {
8010 unsigned QdIdx = 0, QmIdx = 2;
8011 bool QmIsPointer =
false;
8013 case ARM::MVE_VLDRWU32_qi:
8014 case ARM::MVE_VLDRDU64_qi:
8018 case ARM::MVE_VLDRWU32_qi_pre:
8019 case ARM::MVE_VLDRDU64_qi_pre:
8030 Twine(
"destination vector register and vector ") +
8031 (QmIsPointer ?
"pointer" :
"offset") +
8032 " register can't be identical");
8044 if (Widthm1 >= 32 - LSB)
8045 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8046 "bitfield width must be in range [1,32-lsb]");
8058 bool HasWritebackToken =
8059 (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8061 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8062 .getToken() ==
"!");
8064 bool ListContainsBase;
8068 "registers must be in range r0-r7");
8070 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
8073 "writeback operator '!' expected");
8076 if (ListContainsBase && HasWritebackToken)
8077 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8078 "writeback operator '!' not allowed when base register "
8079 "in register list");
8081 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8085 case ARM::LDMIA_UPD:
8086 case ARM::LDMDB_UPD:
8087 case ARM::LDMIB_UPD:
8088 case ARM::LDMDA_UPD:
8095 "writeback register not allowed in register list");
8099 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8104 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8107 case ARM::t2LDMIA_UPD:
8108 case ARM::t2LDMDB_UPD:
8109 case ARM::t2STMIA_UPD:
8110 case ARM::t2STMDB_UPD:
8113 "writeback register not allowed in register list");
8115 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
8116 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8119 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 3))
8124 case ARM::sysLDMIA_UPD:
8125 case ARM::sysLDMDA_UPD:
8126 case ARM::sysLDMDB_UPD:
8127 case ARM::sysLDMIB_UPD:
8129 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8130 "writeback register only allowed on system LDM "
8131 "if PC in register-list");
8133 case ARM::sysSTMIA_UPD:
8134 case ARM::sysSTMDA_UPD:
8135 case ARM::sysSTMDB_UPD:
8136 case ARM::sysSTMIB_UPD:
8138 "system STM cannot have writeback register");
8143 bool ListContainsBase;
8147 "registers must be in range r0-r7 or pc");
8148 if (validatetLDMRegList(Inst,
Operands, MnemonicOpsEndInd, 2, !isMClass()))
8153 bool ListContainsBase;
8157 "registers must be in range r0-r7 or lr");
8158 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 2))
8162 case ARM::tSTMIA_UPD: {
8163 bool ListContainsBase, InvalidLowList;
8165 0, ListContainsBase);
8166 if (InvalidLowList && !isThumbTwo())
8167 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8168 "registers must be in range r0-r7");
8172 if (InvalidLowList && ListContainsBase)
8174 "writeback operator '!' not allowed when base register "
8175 "in register list");
8177 if (validatetSTMRegList(Inst,
Operands, MnemonicOpsEndInd, 4))
8184 if (!isThumbTwo() &&
8186 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8187 "source register must be the same as destination");
8197 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8198 "source register must be sp if destination is sp");
8203 if (!(
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd]))
8204 .isSignedOffset<11, 1>())
8206 "branch target out of range");
8209 int op = (
Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8210 : MnemonicOpsEndInd + 1;
8211 ARMOperand &Operand =
static_cast<ARMOperand &
>(*
Operands[
op]);
8213 if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8214 !Operand.isSignedOffset<24, 1>())
8215 return Error(
Operands[
op]->getStartLoc(),
"branch target out of range");
8220 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd])
8221 .isSignedOffset<8, 1>())
8223 "branch target out of range");
8226 int Op = (
Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8227 : MnemonicOpsEndInd + 1;
8228 if (!
static_cast<ARMOperand &
>(*
Operands[
Op]).isSignedOffset<20, 1>())
8229 return Error(
Operands[
Op]->getStartLoc(),
"branch target out of range");
8234 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8235 .isUnsignedOffset<6, 1>())
8236 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8237 "branch target out of range");
8243 case ARM::t2MOVTi16:
8251 int i = (
Operands[MnemonicOpsEndInd]->isImm()) ? MnemonicOpsEndInd
8252 : MnemonicOpsEndInd + 1;
8253 ARMOperand &
Op =
static_cast<ARMOperand &
>(*
Operands[i]);
8256 const MCExpr *E = dyn_cast<MCExpr>(
Op.getImm());
8258 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
8263 "immediate expression for mov requires :lower16: or :upper16");
8269 return Error(
Op.getStartLoc(),
8270 "Immediate expression for Thumb adds requires :lower0_7:,"
8271 " :lower8_15:, :upper0_7: or :upper8_15:");
8277 return Error(
Op.getStartLoc(),
8278 "Immediate expression for Thumb movs requires :lower0_7:,"
8279 " :lower8_15:, :upper0_7: or :upper8_15:");
8288 if (Imm8 == 0x10 && Pred !=
ARMCC::AL && hasRAS())
8289 return Error(
Operands[1]->getStartLoc(),
"instruction 'esb' is not "
8290 "predicable, but condition "
8293 return Error(
Operands[1]->getStartLoc(),
"instruction 'csdb' is not "
8294 "predicable, but condition "
8302 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd])
8303 .isUnsignedOffset<4, 1>() ||
8306 "branch location out of range or not a multiple of 2");
8309 if (Opcode == ARM::t2BFi) {
8310 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8311 .isSignedOffset<16, 1>())
8313 "branch target out of range or not a multiple of 2");
8314 }
else if (Opcode == ARM::t2BFLi) {
8315 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8316 .isSignedOffset<18, 1>())
8318 "branch target out of range or not a multiple of 2");
8323 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd])
8324 .isUnsignedOffset<4, 1>() ||
8327 "branch location out of range or not a multiple of 2");
8329 if (!
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8330 .isSignedOffset<16, 1>())
8331 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8332 "branch target out of range or not a multiple of 2");
8335 "branch location and else branch target should either both be "
8336 "immediates or both labels");
8340 if (Diff != 4 && Diff != 2)
8343 "else branch target must be 2 or 4 greater than the branch location");
8350 !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8353 "invalid register in register list. Valid registers are "
8354 "r0-r12, lr/r14 and APSR.");
8371 "instruction 'ssbb' is not predicable, but condition code "
8375 "instruction 'pssbb' is not predicable, but condition code "
8379 case ARM::VMOVRRS: {
8384 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8385 "source operands must be sequential");
8388 case ARM::VMOVSRR: {
8394 "destination operands must be sequential");
8398 case ARM::VSTMDIA: {
8400 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]);
8401 auto &RegList =
Op.getRegList();
8402 if (RegList.size() < 1 || RegList.size() > 16)
8403 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8404 "list of registers must be at least 1 and at most 16");
8407 case ARM::MVE_VQDMULLs32bh:
8408 case ARM::MVE_VQDMULLs32th:
8409 case ARM::MVE_VCMULf32:
8410 case ARM::MVE_VMULLBs32:
8411 case ARM::MVE_VMULLTs32:
8412 case ARM::MVE_VMULLBu32:
8413 case ARM::MVE_VMULLTu32: {
8415 Operands[MnemonicOpsEndInd + 1]->getReg()) {
8417 "Qd register and Qn register can't be identical");
8422 "Qd register and Qm register can't be identical");
8426 case ARM::MVE_VREV64_8:
8427 case ARM::MVE_VREV64_16:
8428 case ARM::MVE_VREV64_32:
8429 case ARM::MVE_VQDMULL_qr_s32bh:
8430 case ARM::MVE_VQDMULL_qr_s32th: {
8434 "Qd register and Qn register can't be identical");
8438 case ARM::MVE_VCADDi32:
8439 case ARM::MVE_VCADDf32:
8440 case ARM::MVE_VHCADDs32: {
8444 "Qd register and Qm register can't be identical");
8448 case ARM::MVE_VMOV_rr_q: {
8451 return Error(
Operands[MnemonicOpsEndInd + 2]->getStartLoc(),
8452 "Q-registers must be the same");
8453 if (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 3])
8454 .getVectorIndex() !=
8455 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 5])
8458 return Error(
Operands[MnemonicOpsEndInd + 3]->getStartLoc(),
8459 "Q-register indexes must be 2 and 0 or 3 and 1");
8462 case ARM::MVE_VMOV_q_rr: {
8466 "Q-registers must be the same");
8467 if (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
8468 .getVectorIndex() !=
8469 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 3])
8472 return Error(
Operands[MnemonicOpsEndInd + 1]->getStartLoc(),
8473 "Q-register indexes must be 2 and 0 or 3 and 1");
8476 case ARM::MVE_SQRSHR:
8477 case ARM::MVE_UQRSHL: {
8481 "Rda register and Rm register can't be identical");
8502 case ARM::t2SMLALBB:
8503 case ARM::t2SMLALBT:
8505 case ARM::t2SMLALDX:
8506 case ARM::t2SMLALTB:
8507 case ARM::t2SMLALTT:
8509 case ARM::t2SMLSLDX:
8510 case ARM::t2SMULL: {
8515 "unpredictable instruction, RdHi and RdLo must be different");
8523 case ARM::CDE_CX1DA:
8527 case ARM::CDE_CX2DA:
8531 case ARM::CDE_CX3DA:
8532 case ARM::CDE_VCX1_vec:
8533 case ARM::CDE_VCX1_fpsp:
8534 case ARM::CDE_VCX1_fpdp:
8535 case ARM::CDE_VCX1A_vec:
8536 case ARM::CDE_VCX1A_fpsp:
8537 case ARM::CDE_VCX1A_fpdp:
8538 case ARM::CDE_VCX2_vec:
8539 case ARM::CDE_VCX2_fpsp:
8540 case ARM::CDE_VCX2_fpdp:
8541 case ARM::CDE_VCX2A_vec:
8542 case ARM::CDE_VCX2A_fpsp:
8543 case ARM::CDE_VCX2A_fpdp:
8544 case ARM::CDE_VCX3_vec:
8545 case ARM::CDE_VCX3_fpsp:
8546 case ARM::CDE_VCX3_fpdp:
8547 case ARM::CDE_VCX3A_vec:
8548 case ARM::CDE_VCX3A_fpsp:
8549 case ARM::CDE_VCX3A_fpdp: {
8551 "CDE operand 1 must be a coprocessor ID");
8555 "coprocessor must be configured as CDE");
8556 else if (Coproc >= 8)
8558 "coprocessor must be in the range [p0, p7]");
8564 case ARM::t2LDC2L_OFFSET:
8565 case ARM::t2LDC2L_OPTION:
8566 case ARM::t2LDC2L_POST:
8567 case ARM::t2LDC2L_PRE:
8568 case ARM::t2LDC2_OFFSET:
8569 case ARM::t2LDC2_OPTION:
8570 case ARM::t2LDC2_POST:
8571 case ARM::t2LDC2_PRE:
8572 case ARM::t2LDCL_OFFSET:
8573 case ARM::t2LDCL_OPTION:
8574 case ARM::t2LDCL_POST:
8575 case ARM::t2LDCL_PRE:
8576 case ARM::t2LDC_OFFSET:
8577 case ARM::t2LDC_OPTION:
8578 case ARM::t2LDC_POST:
8579 case ARM::t2LDC_PRE:
8588 case ARM::t2STC2L_OFFSET:
8589 case ARM::t2STC2L_OPTION:
8590 case ARM::t2STC2L_POST:
8591 case ARM::t2STC2L_PRE:
8592 case ARM::t2STC2_OFFSET:
8593 case ARM::t2STC2_OPTION:
8594 case ARM::t2STC2_POST:
8595 case ARM::t2STC2_PRE:
8596 case ARM::t2STCL_OFFSET:
8597 case ARM::t2STCL_OPTION:
8598 case ARM::t2STCL_POST:
8599 case ARM::t2STCL_PRE:
8600 case ARM::t2STC_OFFSET:
8601 case ARM::t2STC_OPTION:
8602 case ARM::t2STC_POST:
8603 case ARM::t2STC_PRE: {
8608 if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8610 else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8613 "Operand must be a coprocessor ID");
8618 "coprocessor must be configured as GCP");
8630 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST1LNd8_UPD;
8631 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST1LNd16_UPD;
8632 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST1LNd32_UPD;
8633 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST1LNd8_UPD;
8634 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST1LNd16_UPD;
8635 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST1LNd32_UPD;
8636 case ARM::VST1LNdAsm_8: Spacing = 1;
return ARM::VST1LNd8;
8637 case ARM::VST1LNdAsm_16: Spacing = 1;
return ARM::VST1LNd16;
8638 case ARM::VST1LNdAsm_32: Spacing = 1;
return ARM::VST1LNd32;
8641 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST2LNd8_UPD;
8642 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST2LNd16_UPD;
8643 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST2LNd32_UPD;
8644 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2;
return ARM::VST2LNq16_UPD;
8645 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST2LNq32_UPD;
8647 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST2LNd8_UPD;
8648 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST2LNd16_UPD;
8649 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST2LNd32_UPD;
8650 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST2LNq16_UPD;
8651 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST2LNq32_UPD;
8653 case ARM::VST2LNdAsm_8: Spacing = 1;
return ARM::VST2LNd8;
8654 case ARM::VST2LNdAsm_16: Spacing = 1;
return ARM::VST2LNd16;
8655 case ARM::VST2LNdAsm_32: Spacing = 1;
return ARM::VST2LNd32;
8656 case ARM::VST2LNqAsm_16: Spacing = 2;
return ARM::VST2LNq16;
8657 case ARM::VST2LNqAsm_32: Spacing = 2;
return ARM::VST2LNq32;
8660 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST3LNd8_UPD;
8661 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3LNd16_UPD;
8662 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST3LNd32_UPD;
8663 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3LNq16_UPD;
8664 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST3LNq32_UPD;
8665 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST3LNd8_UPD;
8666 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST3LNd16_UPD;
8667 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST3LNd32_UPD;
8668 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST3LNq16_UPD;
8669 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST3LNq32_UPD;
8670 case ARM::VST3LNdAsm_8: Spacing = 1;
return ARM::VST3LNd8;
8671 case ARM::VST3LNdAsm_16: Spacing = 1;
return ARM::VST3LNd16;
8672 case ARM::VST3LNdAsm_32: Spacing = 1;
return ARM::VST3LNd32;
8673 case ARM::VST3LNqAsm_16: Spacing = 2;
return ARM::VST3LNq16;
8674 case ARM::VST3LNqAsm_32: Spacing = 2;
return ARM::VST3LNq32;
8677 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1;
return ARM::VST3d8_UPD;
8678 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3d16_UPD;
8679 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1;
return ARM::VST3d32_UPD;
8680 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2;
return ARM::VST3q8_UPD;
8681 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2;
return ARM::VST3q16_UPD;
8682 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2;
return ARM::VST3q32_UPD;
8683 case ARM::VST3dWB_register_Asm_8: Spacing = 1;
return ARM::VST3d8_UPD;
8684 case ARM::VST3dWB_register_Asm_16: Spacing = 1;
return ARM::VST3d16_UPD;
8685 case ARM::VST3dWB_register_Asm_32: Spacing = 1;
return ARM::VST3d32_UPD;
8686 case ARM::VST3qWB_register_Asm_8: Spacing = 2;
return ARM::VST3q8_UPD;
8687 case ARM::VST3qWB_register_Asm_16: Spacing = 2;
return ARM::VST3q16_UPD;
8688 case ARM::VST3qWB_register_Asm_32: Spacing = 2;
return ARM::VST3q32_UPD;
8689 case ARM::VST3dAsm_8: Spacing = 1;
return ARM::VST3d8;
8690 case ARM::VST3dAsm_16: Spacing = 1;
return ARM::VST3d16;
8691 case ARM::VST3dAsm_32: Spacing = 1;
return ARM::VST3d32;
8692 case ARM::VST3qAsm_8: Spacing = 2;
return ARM::VST3q8;
8693 case ARM::VST3qAsm_16: Spacing = 2;
return ARM::VST3q16;
8694 case ARM::VST3qAsm_32: Spacing = 2;
return ARM::VST3q32;
8697 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST4LNd8_UPD;
8698 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4LNd16_UPD;
8699 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST4LNd32_UPD;
8700 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4LNq16_UPD;
8701 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST4LNq32_UPD;
8702 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST4LNd8_UPD;
8703 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST4LNd16_UPD;
8704 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST4LNd32_UPD;
8705 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST4LNq16_UPD;
8706 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST4LNq32_UPD;
8707 case ARM::VST4LNdAsm_8: Spacing = 1;
return ARM::VST4LNd8;
8708 case ARM::VST4LNdAsm_16: Spacing = 1;
return ARM::VST4LNd16;
8709 case ARM::VST4LNdAsm_32: Spacing = 1;
return ARM::VST4LNd32;
8710 case ARM::VST4LNqAsm_16: Spacing = 2;
return ARM::VST4LNq16;
8711 case ARM::VST4LNqAsm_32: Spacing = 2;
return ARM::VST4LNq32;
8714 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1;
return ARM::VST4d8_UPD;
8715 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4d16_UPD;
8716 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1;
return ARM::VST4d32_UPD;
8717 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2;
return ARM::VST4q8_UPD;
8718 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2;
return ARM::VST4q16_UPD;
8719 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2;
return ARM::VST4q32_UPD;
8720 case ARM::VST4dWB_register_Asm_8: Spacing = 1;
return ARM::VST4d8_UPD;
8721 case ARM::VST4dWB_register_Asm_16: Spacing = 1;
return ARM::VST4d16_UPD;
8722 case ARM::VST4dWB_register_Asm_32: Spacing = 1;
return ARM::VST4d32_UPD;
8723 case ARM::VST4qWB_register_Asm_8: Spacing = 2;
return ARM::VST4q8_UPD;
8724 case ARM::VST4qWB_register_Asm_16: Spacing = 2;
return ARM::VST4q16_UPD;
8725 case ARM::VST4qWB_register_Asm_32: Spacing = 2;
return ARM::VST4q32_UPD;
8726 case ARM::VST4dAsm_8: Spacing = 1;
return ARM::VST4d8;
8727 case ARM::VST4dAsm_16: Spacing = 1;
return ARM::VST4d16;
8728 case ARM::VST4dAsm_32: Spacing = 1;
return ARM::VST4d32;
8729 case ARM::VST4qAsm_8: Spacing = 2;
return ARM::VST4q8;
8730 case ARM::VST4qAsm_16: Spacing = 2;
return ARM::VST4q16;
8731 case ARM::VST4qAsm_32: Spacing = 2;
return ARM::VST4q32;
8739 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD1LNd8_UPD;
8740 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD1LNd16_UPD;
8741 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD1LNd32_UPD;
8742 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD1LNd8_UPD;
8743 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD1LNd16_UPD;
8744 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD1LNd32_UPD;
8745 case ARM::VLD1LNdAsm_8: Spacing = 1;
return ARM::VLD1LNd8;
8746 case ARM::VLD1LNdAsm_16: Spacing = 1;
return ARM::VLD1LNd16;
8747 case ARM::VLD1LNdAsm_32: Spacing = 1;
return ARM::VLD1LNd32;
8750 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD2LNd8_UPD;
8751 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD2LNd16_UPD;
8752 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD2LNd32_UPD;
8753 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD2LNq16_UPD;
8754 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD2LNq32_UPD;
8755 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD2LNd8_UPD;
8756 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD2LNd16_UPD;
8757 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD2LNd32_UPD;
8758 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD2LNq16_UPD;
8759 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD2LNq32_UPD;
8760 case ARM::VLD2LNdAsm_8: Spacing = 1;
return ARM::VLD2LNd8;
8761 case ARM::VLD2LNdAsm_16: Spacing = 1;
return ARM::VLD2LNd16;
8762 case ARM::VLD2LNdAsm_32: Spacing = 1;
return ARM::VLD2LNd32;
8763 case ARM::VLD2LNqAsm_16: Spacing = 2;
return ARM::VLD2LNq16;
8764 case ARM::VLD2LNqAsm_32: Spacing = 2;
return ARM::VLD2LNq32;
8767 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3DUPd8_UPD;
8768 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3DUPd16_UPD;
8769 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3DUPd32_UPD;
8770 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3DUPq8_UPD;
8771 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD3DUPq16_UPD;
8772 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3DUPq32_UPD;
8773 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1;
return ARM::VLD3DUPd8_UPD;
8774 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1;
return ARM::VLD3DUPd16_UPD;
8775 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1;
return ARM::VLD3DUPd32_UPD;
8776 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2;
return ARM::VLD3DUPq8_UPD;
8777 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2;
return ARM::VLD3DUPq16_UPD;
8778 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2;
return ARM::VLD3DUPq32_UPD;
8779 case ARM::VLD3DUPdAsm_8: Spacing = 1;
return ARM::VLD3DUPd8;
8780 case ARM::VLD3DUPdAsm_16: Spacing = 1;
return ARM::VLD3DUPd16;
8781 case ARM::VLD3DUPdAsm_32: Spacing = 1;
return ARM::VLD3DUPd32;
8782 case ARM::VLD3DUPqAsm_8: Spacing = 2;
return ARM::VLD3DUPq8;
8783 case ARM::VLD3DUPqAsm_16: Spacing = 2;
return ARM::VLD3DUPq16;
8784 case ARM::VLD3DUPqAsm_32: Spacing = 2;
return ARM::VLD3DUPq32;
8787 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3LNd8_UPD;
8788 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3LNd16_UPD;
8789 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3LNd32_UPD;
8790 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3LNq16_UPD;
8791 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3LNq32_UPD;
8792 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD3LNd8_UPD;
8793 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD3LNd16_UPD;
8794 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD3LNd32_UPD;
8795 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD3LNq16_UPD;
8796 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD3LNq32_UPD;
8797 case ARM::VLD3LNdAsm_8: Spacing = 1;
return ARM::VLD3LNd8;
8798 case ARM::VLD3LNdAsm_16: Spacing = 1;
return ARM::VLD3LNd16;
8799 case ARM::VLD3LNdAsm_32: Spacing = 1;
return ARM::VLD3LNd32;
8800 case ARM::VLD3LNqAsm_16: Spacing = 2;
return ARM::VLD3LNq16;
8801 case ARM::VLD3LNqAsm_32: Spacing = 2;
return ARM::VLD3LNq32;
8804 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3d8_UPD;
8805 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3d16_UPD;
8806 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3d32_UPD;
8807 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2;
return ARM::VLD3q8_UPD;
8808 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD3q16_UPD;
8809 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3q32_UPD;
8810 case ARM::VLD3dWB_register_Asm_8: Spacing = 1;
return ARM::VLD3d8_UPD;
8811 case ARM::VLD3dWB_register_Asm_16: Spacing = 1;
return ARM::VLD3d16_UPD;
8812 case ARM::VLD3dWB_register_Asm_32: Spacing = 1;
return ARM::VLD3d32_UPD;
8813 case ARM::VLD3qWB_register_Asm_8: Spacing = 2;
return ARM::VLD3q8_UPD;
8814 case ARM::VLD3qWB_register_Asm_16: Spacing = 2;
return ARM::VLD3q16_UPD;
8815 case ARM::VLD3qWB_register_Asm_32: Spacing = 2;
return ARM::VLD3q32_UPD;
8816 case ARM::VLD3dAsm_8: Spacing = 1;
return ARM::VLD3d8;
8817 case ARM::VLD3dAsm_16: Spacing = 1;
return ARM::VLD3d16;
8818 case ARM::VLD3dAsm_32: Spacing = 1;
return ARM::VLD3d32;
8819 case ARM::VLD3qAsm_8: Spacing = 2;
return ARM::VLD3q8;
8820 case ARM::VLD3qAsm_16: Spacing = 2;
return ARM::VLD3q16;
8821 case ARM::VLD3qAsm_32: Spacing = 2;
return ARM::VLD3q32;
8824 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4LNd8_UPD;
8825 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4LNd16_UPD;
8826 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4LNd32_UPD;
8827 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD4LNq16_UPD;
8828 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4LNq32_UPD;
8829 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD4LNd8_UPD;
8830 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD4LNd16_UPD;
8831 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD4LNd32_UPD;
8832 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD4LNq16_UPD;
8833 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD4LNq32_UPD;
8834 case ARM::VLD4LNdAsm_8: Spacing = 1;
return ARM::VLD4LNd8;
8835 case ARM::VLD4LNdAsm_16: Spacing = 1;
return ARM::VLD4LNd16;
8836 case ARM::VLD4LNdAsm_32: Spacing = 1;
return ARM::VLD4LNd32;
8837 case ARM::VLD4LNqAsm_16: Spacing = 2;
return ARM::VLD4LNq16;
8838 case ARM::VLD4LNqAsm_32: Spacing = 2;
return ARM::VLD4LNq32;
8841 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4DUPd8_UPD;
8842 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4DUPd16_UPD;
8843 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4DUPd32_UPD;
8844 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4DUPq8_UPD;
8845 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4DUPq16_UPD;
8846 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4DUPq32_UPD;
8847 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1;
return ARM::VLD4DUPd8_UPD;
8848 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1;
return ARM::VLD4DUPd16_UPD;
8849 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1;
return ARM::VLD4DUPd32_UPD;
8850 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2;
return ARM::VLD4DUPq8_UPD;
8851 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2;
return ARM::VLD4DUPq16_UPD;
8852 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2;
return ARM::VLD4DUPq32_UPD;
8853 case ARM::VLD4DUPdAsm_8: Spacing = 1;
return ARM::VLD4DUPd8;
8854 case ARM::VLD4DUPdAsm_16: Spacing = 1;
return ARM::VLD4DUPd16;
8855 case ARM::VLD4DUPdAsm_32: Spacing = 1;
return ARM::VLD4DUPd32;
8856 case ARM::VLD4DUPqAsm_8: Spacing = 2;
return ARM::VLD4DUPq8;
8857 case ARM::VLD4DUPqAsm_16: Spacing = 2;
return ARM::VLD4DUPq16;
8858 case ARM::VLD4DUPqAsm_32: Spacing = 2;
return ARM::VLD4DUPq32;
8861 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4d8_UPD;
8862 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4d16_UPD;
8863 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4d32_UPD;
8864 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2;
return ARM::VLD4q8_UPD;
8865 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD4q16_UPD;
8866 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4q32_UPD;
8867 case ARM::VLD4dWB_register_Asm_8: Spacing = 1;
return ARM::VLD4d8_UPD;
8868 case ARM::VLD4dWB_register_Asm_16: Spacing = 1;
return ARM::VLD4d16_UPD;
8869 case ARM::VLD4dWB_register_Asm_32: Spacing = 1;
return ARM::VLD4d32_UPD;
8870 case ARM::VLD4qWB_register_Asm_8: Spacing = 2;
return ARM::VLD4q8_UPD;
8871 case ARM::VLD4qWB_register_Asm_16: Spacing = 2;
return ARM::VLD4q16_UPD;
8872 case ARM::VLD4qWB_register_Asm_32: Spacing = 2;
return ARM::VLD4q32_UPD;
8873 case ARM::VLD4dAsm_8: Spacing = 1;
return ARM::VLD4d8;
8874 case ARM::VLD4dAsm_16: Spacing = 1;
return ARM::VLD4d16;
8875 case ARM::VLD4dAsm_32: Spacing = 1;
return ARM::VLD4d32;
8876 case ARM::VLD4qAsm_8: Spacing = 2;
return ARM::VLD4q8;
8877 case ARM::VLD4qAsm_16: Spacing = 2;
return ARM::VLD4q16;
8878 case ARM::VLD4qAsm_32: Spacing = 2;
return ARM::VLD4q32;
8882bool ARMAsmParser::processInstruction(
MCInst &Inst,
8884 unsigned MnemonicOpsEndInd,
8888 bool HasWideQualifier =
false;
8890 ARMOperand &ARMOp =
static_cast<ARMOperand&
>(*Op);
8891 if (ARMOp.isToken() && ARMOp.getToken() ==
".w") {
8892 HasWideQualifier =
true;
8903 MnemonicOpsEndInd + 2) {
8904 ARMOperand &
Op =
static_cast<ARMOperand &
>(
8907 auto &RegList =
Op.getRegList();
8910 if (RegList.size() == 32) {
8911 const unsigned Opcode =
8912 (Inst.
getOpcode() == ARM::VLLDM) ? ARM::VLLDM_T2 : ARM::VLSTM_T2;
8926 case ARM::LDRT_POST:
8927 case ARM::LDRBT_POST: {
8928 const unsigned Opcode =
8929 (Inst.
getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8930 : ARM::LDRBT_POST_IMM;
8946 case ARM::LDRSHTii: {
8951 else if (Inst.
getOpcode() == ARM::LDRHTii)
8953 else if (Inst.
getOpcode() == ARM::LDRSHTii)
8964 case ARM::STRT_POST:
8965 case ARM::STRBT_POST: {
8966 const unsigned Opcode =
8967 (Inst.
getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
8968 : ARM::STRBT_POST_IMM;
8995 llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
9000 MCSymbol *Dot = getContext().createTempSymbol();
9019 case ARM::t2LDR_PRE_imm:
9020 case ARM::t2LDR_POST_imm: {
9033 case ARM::t2STR_PRE_imm:
9034 case ARM::t2STR_POST_imm: {
9047 case ARM::t2LDRB_OFFSET_imm: {
9057 case ARM::t2LDRB_PRE_imm:
9058 case ARM::t2LDRB_POST_imm: {
9062 : ARM::t2LDRB_POST);
9072 case ARM::t2STRB_OFFSET_imm: {
9082 case ARM::t2STRB_PRE_imm:
9083 case ARM::t2STRB_POST_imm: {
9087 : ARM::t2STRB_POST);
9097 case ARM::t2LDRH_OFFSET_imm: {
9107 case ARM::t2LDRH_PRE_imm:
9108 case ARM::t2LDRH_POST_imm: {
9112 : ARM::t2LDRH_POST);
9122 case ARM::t2STRH_OFFSET_imm: {
9132 case ARM::t2STRH_PRE_imm:
9133 case ARM::t2STRH_POST_imm: {
9137 : ARM::t2STRH_POST);
9147 case ARM::t2LDRSB_OFFSET_imm: {
9157 case ARM::t2LDRSB_PRE_imm:
9158 case ARM::t2LDRSB_POST_imm: {
9162 : ARM::t2LDRSB_POST);
9172 case ARM::t2LDRSH_OFFSET_imm: {
9182 case ARM::t2LDRSH_PRE_imm:
9183 case ARM::t2LDRSH_POST_imm: {
9187 : ARM::t2LDRSH_POST);
9197 case ARM::t2LDRpcrel:
9206 case ARM::t2LDRBpcrel:
9209 case ARM::t2LDRHpcrel:
9212 case ARM::t2LDRSBpcrel:
9215 case ARM::t2LDRSHpcrel:
9218 case ARM::LDRConstPool:
9219 case ARM::tLDRConstPool:
9220 case ARM::t2LDRConstPool: {
9225 if (Inst.
getOpcode() == ARM::LDRConstPool)
9227 else if (Inst.
getOpcode() == ARM::tLDRConstPool)
9229 else if (Inst.
getOpcode() == ARM::t2LDRConstPool)
9231 const ARMOperand &PoolOperand =
9232 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1]);
9233 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9235 if (isa<MCConstantExpr>(SubExprVal) &&
9239 (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9241 bool MovHasS =
true;
9242 if (Inst.
getOpcode() == ARM::LDRConstPool) {
9252 else if (hasV6T2Ops() &&
9265 else if (hasThumb2() &&
9270 else if (hasV8MBaseline() &&
9291 getTargetStreamer().addConstantPoolEntry(SubExprVal,
9292 PoolOperand.getStartLoc());
9303 case ARM::VST1LNdWB_register_Asm_8:
9304 case ARM::VST1LNdWB_register_Asm_16:
9305 case ARM::VST1LNdWB_register_Asm_32: {
9323 case ARM::VST2LNdWB_register_Asm_8:
9324 case ARM::VST2LNdWB_register_Asm_16:
9325 case ARM::VST2LNdWB_register_Asm_32:
9326 case ARM::VST2LNqWB_register_Asm_16:
9327 case ARM::VST2LNqWB_register_Asm_32: {
9347 case ARM::VST3LNdWB_register_Asm_8:
9348 case ARM::VST3LNdWB_register_Asm_16:
9349 case ARM::VST3LNdWB_register_Asm_32:
9350 case ARM::VST3LNqWB_register_Asm_16:
9351 case ARM::VST3LNqWB_register_Asm_32: {
9373 case ARM::VST4LNdWB_register_Asm_8:
9374 case ARM::VST4LNdWB_register_Asm_16:
9375 case ARM::VST4LNdWB_register_Asm_32:
9376 case ARM::VST4LNqWB_register_Asm_16:
9377 case ARM::VST4LNqWB_register_Asm_32: {
9401 case ARM::VST1LNdWB_fixed_Asm_8:
9402 case ARM::VST1LNdWB_fixed_Asm_16:
9403 case ARM::VST1LNdWB_fixed_Asm_32: {
9421 case ARM::VST2LNdWB_fixed_Asm_8:
9422 case ARM::VST2LNdWB_fixed_Asm_16:
9423 case ARM::VST2LNdWB_fixed_Asm_32:
9424 case ARM::VST2LNqWB_fixed_Asm_16:
9425 case ARM::VST2LNqWB_fixed_Asm_32: {
9445 case ARM::VST3LNdWB_fixed_Asm_8:
9446 case ARM::VST3LNdWB_fixed_Asm_16:
9447 case ARM::VST3LNdWB_fixed_Asm_32:
9448 case ARM::VST3LNqWB_fixed_Asm_16:
9449 case ARM::VST3LNqWB_fixed_Asm_32: {
9471 case ARM::VST4LNdWB_fixed_Asm_8:
9472 case ARM::VST4LNdWB_fixed_Asm_16:
9473 case ARM::VST4LNdWB_fixed_Asm_32:
9474 case ARM::VST4LNqWB_fixed_Asm_16:
9475 case ARM::VST4LNqWB_fixed_Asm_32: {
9499 case ARM::VST1LNdAsm_8:
9500 case ARM::VST1LNdAsm_16:
9501 case ARM::VST1LNdAsm_32: {
9517 case ARM::VST2LNdAsm_8:
9518 case ARM::VST2LNdAsm_16:
9519 case ARM::VST2LNdAsm_32:
9520 case ARM::VST2LNqAsm_16:
9521 case ARM::VST2LNqAsm_32: {
9539 case ARM::VST3LNdAsm_8:
9540 case ARM::VST3LNdAsm_16:
9541 case ARM::VST3LNdAsm_32:
9542 case ARM::VST3LNqAsm_16:
9543 case ARM::VST3LNqAsm_32: {
9563 case ARM::VST4LNdAsm_8:
9564 case ARM::VST4LNdAsm_16:
9565 case ARM::VST4LNdAsm_32:
9566 case ARM::VST4LNqAsm_16:
9567 case ARM::VST4LNqAsm_32: {
9590 case ARM::VLD1LNdWB_register_Asm_8:
9591 case ARM::VLD1LNdWB_register_Asm_16:
9592 case ARM::VLD1LNdWB_register_Asm_32: {
9611 case ARM::VLD2LNdWB_register_Asm_8:
9612 case ARM::VLD2LNdWB_register_Asm_16:
9613 case ARM::VLD2LNdWB_register_Asm_32:
9614 case ARM::VLD2LNqWB_register_Asm_16:
9615 case ARM::VLD2LNqWB_register_Asm_32: {
9638 case ARM::VLD3LNdWB_register_Asm_8:
9639 case ARM::VLD3LNdWB_register_Asm_16:
9640 case ARM::VLD3LNdWB_register_Asm_32:
9641 case ARM::VLD3LNqWB_register_Asm_16:
9642 case ARM::VLD3LNqWB_register_Asm_32: {
9669 case ARM::VLD4LNdWB_register_Asm_8:
9670 case ARM::VLD4LNdWB_register_Asm_16:
9671 case ARM::VLD4LNdWB_register_Asm_32:
9672 case ARM::VLD4LNqWB_register_Asm_16:
9673 case ARM::VLD4LNqWB_register_Asm_32: {
9704 case ARM::VLD1LNdWB_fixed_Asm_8:
9705 case ARM::VLD1LNdWB_fixed_Asm_16:
9706 case ARM::VLD1LNdWB_fixed_Asm_32: {
9725 case ARM::VLD2LNdWB_fixed_Asm_8:
9726 case ARM::VLD2LNdWB_fixed_Asm_16:
9727 case ARM::VLD2LNdWB_fixed_Asm_32:
9728 case ARM::VLD2LNqWB_fixed_Asm_16:
9729 case ARM::VLD2LNqWB_fixed_Asm_32: {
9752 case ARM::VLD3LNdWB_fixed_Asm_8:
9753 case ARM::VLD3LNdWB_fixed_Asm_16:
9754 case ARM::VLD3LNdWB_fixed_Asm_32:
9755 case ARM::VLD3LNqWB_fixed_Asm_16:
9756 case ARM::VLD3LNqWB_fixed_Asm_32: {
9783 case ARM::VLD4LNdWB_fixed_Asm_8:
9784 case ARM::VLD4LNdWB_fixed_Asm_16:
9785 case ARM::VLD4LNdWB_fixed_Asm_32:
9786 case ARM::VLD4LNqWB_fixed_Asm_16:
9787 case ARM::VLD4LNqWB_fixed_Asm_32: {
9818 case ARM::VLD1LNdAsm_8:
9819 case ARM::VLD1LNdAsm_16:
9820 case ARM::VLD1LNdAsm_32: {
9837 case ARM::VLD2LNdAsm_8:
9838 case ARM::VLD2LNdAsm_16:
9839 case ARM::VLD2LNdAsm_32:
9840 case ARM::VLD2LNqAsm_16:
9841 case ARM::VLD2LNqAsm_32: {
9862 case ARM::VLD3LNdAsm_8:
9863 case ARM::VLD3LNdAsm_16:
9864 case ARM::VLD3LNdAsm_32:
9865 case ARM::VLD3LNqAsm_16:
9866 case ARM::VLD3LNqAsm_32: {
9891 case ARM::VLD4LNdAsm_8:
9892 case ARM::VLD4LNdAsm_16:
9893 case ARM::VLD4LNdAsm_32:
9894 case ARM::VLD4LNqAsm_16:
9895 case ARM::VLD4LNqAsm_32: {
9925 case ARM::VLD3DUPdAsm_8:
9926 case ARM::VLD3DUPdAsm_16:
9927 case ARM::VLD3DUPdAsm_32:
9928 case ARM::VLD3DUPqAsm_8:
9929 case ARM::VLD3DUPqAsm_16:
9930 case ARM::VLD3DUPqAsm_32: {
9947 case ARM::VLD3DUPdWB_fixed_Asm_8:
9948 case ARM::VLD3DUPdWB_fixed_Asm_16:
9949 case ARM::VLD3DUPdWB_fixed_Asm_32:
9950 case ARM::VLD3DUPqWB_fixed_Asm_8:
9951 case ARM::VLD3DUPqWB_fixed_Asm_16:
9952 case ARM::VLD3DUPqWB_fixed_Asm_32: {
9971 case ARM::VLD3DUPdWB_register_Asm_8:
9972 case ARM::VLD3DUPdWB_register_Asm_16:
9973 case ARM::VLD3DUPdWB_register_Asm_32:
9974 case ARM::VLD3DUPqWB_register_Asm_8:
9975 case ARM::VLD3DUPqWB_register_Asm_16:
9976 case ARM::VLD3DUPqWB_register_Asm_32: {
9996 case ARM::VLD3dAsm_8:
9997 case ARM::VLD3dAsm_16:
9998 case ARM::VLD3dAsm_32:
9999 case ARM::VLD3qAsm_8:
10000 case ARM::VLD3qAsm_16:
10001 case ARM::VLD3qAsm_32: {
10018 case ARM::VLD3dWB_fixed_Asm_8:
10019 case ARM::VLD3dWB_fixed_Asm_16:
10020 case ARM::VLD3dWB_fixed_Asm_32:
10021 case ARM::VLD3qWB_fixed_Asm_8:
10022 case ARM::VLD3qWB_fixed_Asm_16:
10023 case ARM::VLD3qWB_fixed_Asm_32: {
10042 case ARM::VLD3dWB_register_Asm_8:
10043 case ARM::VLD3dWB_register_Asm_16:
10044 case ARM::VLD3dWB_register_Asm_32:
10045 case ARM::VLD3qWB_register_Asm_8:
10046 case ARM::VLD3qWB_register_Asm_16:
10047 case ARM::VLD3qWB_register_Asm_32: {
10067 case ARM::VLD4DUPdAsm_8:
10068 case ARM::VLD4DUPdAsm_16:
10069 case ARM::VLD4DUPdAsm_32:
10070 case ARM::VLD4DUPqAsm_8:
10071 case ARM::VLD4DUPqAsm_16:
10072 case ARM::VLD4DUPqAsm_32: {
10091 case ARM::VLD4DUPdWB_fixed_Asm_8:
10092 case ARM::VLD4DUPdWB_fixed_Asm_16:
10093 case ARM::VLD4DUPdWB_fixed_Asm_32:
10094 case ARM::VLD4DUPqWB_fixed_Asm_8:
10095 case ARM::VLD4DUPqWB_fixed_Asm_16:
10096 case ARM::VLD4DUPqWB_fixed_Asm_32: {
10117 case ARM::VLD4DUPdWB_register_Asm_8:
10118 case ARM::VLD4DUPdWB_register_Asm_16:
10119 case ARM::VLD4DUPdWB_register_Asm_32:
10120 case ARM::VLD4DUPqWB_register_Asm_8:
10121 case ARM::VLD4DUPqWB_register_Asm_16:
10122 case ARM::VLD4DUPqWB_register_Asm_32: {
10144 case ARM::VLD4dAsm_8:
10145 case ARM::VLD4dAsm_16:
10146 case ARM::VLD4dAsm_32:
10147 case ARM::VLD4qAsm_8:
10148 case ARM::VLD4qAsm_16:
10149 case ARM::VLD4qAsm_32: {
10168 case ARM::VLD4dWB_fixed_Asm_8:
10169 case ARM::VLD4dWB_fixed_Asm_16:
10170 case ARM::VLD4dWB_fixed_Asm_32:
10171 case ARM::VLD4qWB_fixed_Asm_8:
10172 case ARM::VLD4qWB_fixed_Asm_16:
10173 case ARM::VLD4qWB_fixed_Asm_32: {
10194 case ARM::VLD4dWB_register_Asm_8:
10195 case ARM::VLD4dWB_register_Asm_16:
10196 case ARM::VLD4dWB_register_Asm_32:
10197 case ARM::VLD4qWB_register_Asm_8:
10198 case ARM::VLD4qWB_register_Asm_16:
10199 case ARM::VLD4qWB_register_Asm_32: {
10221 case ARM::VST3dAsm_8:
10222 case ARM::VST3dAsm_16:
10223 case ARM::VST3dAsm_32:
10224 case ARM::VST3qAsm_8:
10225 case ARM::VST3qAsm_16:
10226 case ARM::VST3qAsm_32: {
10243 case ARM::VST3dWB_fixed_Asm_8:
10244 case ARM::VST3dWB_fixed_Asm_16:
10245 case ARM::VST3dWB_fixed_Asm_32:
10246 case ARM::VST3qWB_fixed_Asm_8:
10247 case ARM::VST3qWB_fixed_Asm_16:
10248 case ARM::VST3qWB_fixed_Asm_32: {
10267 case ARM::VST3dWB_register_Asm_8:
10268 case ARM::VST3dWB_register_Asm_16:
10269 case ARM::VST3dWB_register_Asm_32:
10270 case ARM::VST3qWB_register_Asm_8:
10271 case ARM::VST3qWB_register_Asm_16:
10272 case ARM::VST3qWB_register_Asm_32: {
10292 case ARM::VST4dAsm_8:
10293 case ARM::VST4dAsm_16:
10294 case ARM::VST4dAsm_32:
10295 case ARM::VST4qAsm_8:
10296 case ARM::VST4qAsm_16:
10297 case ARM::VST4qAsm_32: {
10316 case ARM::VST4dWB_fixed_Asm_8:
10317 case ARM::VST4dWB_fixed_Asm_16:
10318 case ARM::VST4dWB_fixed_Asm_32:
10319 case ARM::VST4qWB_fixed_Asm_8:
10320 case ARM::VST4qWB_fixed_Asm_16:
10321 case ARM::VST4qWB_fixed_Asm_32: {
10342 case ARM::VST4dWB_register_Asm_8:
10343 case ARM::VST4dWB_register_Asm_16:
10344 case ARM::VST4dWB_register_Asm_32:
10345 case ARM::VST4qWB_register_Asm_8:
10346 case ARM::VST4qWB_register_Asm_16:
10347 case ARM::VST4qWB_register_Asm_32: {
10375 !HasWideQualifier) {
10379 case ARM::t2LSLri: NewOpc = ARM::tLSLri;
break;
10380 case ARM::t2LSRri: NewOpc = ARM::tLSRri;
break;
10381 case ARM::t2ASRri: NewOpc = ARM::tASRri;
break;
10399 case ARM::t2MOVSsr: {
10403 bool isNarrow =
false;
10408 inITBlock() == (Inst.
getOpcode() == ARM::t2MOVsr) &&
10415 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr;
break;
10416 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr;
break;
10417 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr;
break;
10418 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr;
break;
10424 Inst.
getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10431 Inst.
getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10436 case ARM::t2MOVSsi: {
10440 bool isNarrow =
false;
10443 inITBlock() == (Inst.
getOpcode() == ARM::t2MOVsi) &&
10450 bool isMov =
false;
10461 newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10465 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri;
break;
10466 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri;
break;
10467 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri;
break;
10468 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow =
false;
break;
10469 case ARM_AM::rrx: isNarrow =
false; newOpc = ARM::t2RRX;
break;
10472 if (Amount == 32) Amount = 0;
10475 if (isNarrow && !isMov)
10477 Inst.
getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10479 if (newOpc != ARM::t2RRX && !isMov)
10485 Inst.
getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10529 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10538 if (Opc == ARM::MOVsi)
10559 case ARM::t2LDMIA_UPD: {
10575 case ARM::t2STMDB_UPD: {
10591 case ARM::LDMIA_UPD:
10594 if (
static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"pop" &&
10609 case ARM::STMDB_UPD:
10612 if (
static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"push" &&
10625 case ARM::t2ADDri12:
10626 case ARM::t2SUBri12:
10627 case ARM::t2ADDspImm12:
10628 case ARM::t2SUBspImm12: {
10632 if ((Token !=
"add" && Token !=
"sub") ||
10636 case ARM::t2ADDri12:
10639 case ARM::t2SUBri12:
10642 case ARM::t2ADDspImm12:
10645 case ARM::t2SUBspImm12:
10660 Operands.size() == MnemonicOpsEndInd + 3) {
10671 Operands.size() == MnemonicOpsEndInd + 3) {
10677 case ARM::t2SUBri: {
10691 ARM::tADDi8 : ARM::tSUBi8);
10701 case ARM::t2ADDspImm:
10702 case ARM::t2SUBspImm: {
10707 if (V & 3 || V > ((1 << 7) - 1) << 2)
10720 case ARM::t2ADDrr: {
10784 case ARM::tLDMIA: {
10790 bool hasWritebackToken =
10791 (
static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
10793 static_cast<ARMOperand &
>(*
Operands[MnemonicOpsEndInd + 1])
10794 .getToken() ==
"!");
10795 bool listContainsBase;
10797 (!listContainsBase && !hasWritebackToken) ||
10798 (listContainsBase && hasWritebackToken)) {
10801 Inst.
setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10804 if (hasWritebackToken)
10811 case ARM::tSTMIA_UPD: {
10816 bool listContainsBase;
10826 bool listContainsBase;
10840 bool listContainsBase;
10857 !HasWideQualifier) {
10878 !HasWideQualifier) {
10885 if (
Op == ARM::tMOVr) {
10903 !HasWideQualifier) {
10907 case ARM::t2SXTH: NewOpc = ARM::tSXTH;
break;
10908 case ARM::t2SXTB: NewOpc = ARM::tSXTB;
break;
10909 case ARM::t2UXTH: NewOpc = ARM::tUXTH;
break;
10910 case ARM::t2UXTB: NewOpc = ARM::tUXTB;
break;
10948 case ARM::ADDrsi: {
10954 case ARM::ANDrsi: newOpc = ARM::ANDrr;
break;
10955 case ARM::ORRrsi: newOpc = ARM::ORRrr;
break;
10956 case ARM::EORrsi: newOpc = ARM::EORrr;
break;
10957 case ARM::BICrsi: newOpc = ARM::BICrr;
break;
10958 case ARM::SUBrsi: newOpc = ARM::SUBrr;
break;
10959 case ARM::ADDrsi: newOpc = ARM::ADDrr;
break;
10982 assert(!inITBlock() &&
"nested IT blocks?!");
10998 !HasWideQualifier) {
11002 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr;
break;
11003 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr;
break;
11004 case ARM::t2ASRrr: NewOpc = ARM::tASRrr;
break;
11005 case ARM::t2SBCrr: NewOpc = ARM::tSBC;
break;
11006 case ARM::t2RORrr: NewOpc = ARM::tROR;
break;
11007 case ARM::t2BICrr: NewOpc = ARM::tBIC;
break;
11034 !HasWideQualifier) {
11038 case ARM::t2ADCrr: NewOpc = ARM::tADC;
break;
11039 case ARM::t2ANDrr: NewOpc = ARM::tAND;
break;
11040 case ARM::t2EORrr: NewOpc = ARM::tEOR;
break;
11041 case ARM::t2ORRrr: NewOpc = ARM::tORR;
break;
11060 case ARM::MVE_VPST:
11061 case ARM::MVE_VPTv16i8:
11062 case ARM::MVE_VPTv8i16:
11063 case ARM::MVE_VPTv4i32:
11064 case ARM::MVE_VPTv16u8:
11065 case ARM::MVE_VPTv8u16:
11066 case ARM::MVE_VPTv4u32:
11067 case ARM::MVE_VPTv16s8:
11068 case ARM::MVE_VPTv8s16:
11069 case ARM::MVE_VPTv4s32:
11070 case ARM::MVE_VPTv4f32:
11071 case ARM::MVE_VPTv8f16:
11072 case ARM::MVE_VPTv16i8r:
11073 case ARM::MVE_VPTv8i16r:
11074 case ARM::MVE_VPTv4i32r:
11075 case ARM::MVE_VPTv16u8r:
11076 case ARM::MVE_VPTv8u16r:
11077 case ARM::MVE_VPTv4u32r:
11078 case ARM::MVE_VPTv16s8r:
11079 case ARM::MVE_VPTv8s16r:
11080 case ARM::MVE_VPTv4s32r:
11081 case ARM::MVE_VPTv4f32r:
11082 case ARM::MVE_VPTv8f16r: {
11083 assert(!inVPTBlock() &&
"Nested VPT blocks are not allowed");
11085 VPTState.Mask = MO.
getImm();
11086 VPTState.CurPosition = 0;
11094ARMAsmParser::checkEarlyTargetMatchPredicate(
MCInst &Inst,
11102 static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"nop" &&
11103 ((
isThumb() && !isThumbOne()) || hasV6MOps())) {
11104 return Match_MnemonicFail;
11109 return Match_Success;
11113unsigned ARMAsmParser::checkTargetMatchPredicate(
MCInst &Inst) {
11120 "optionally flag setting instruction missing optional def operand");
11122 "operand count mismatch!");
11123 bool IsCPSR =
false;
11125 for (
unsigned OpNo = 0; OpNo < MCID.
NumOperands; ++OpNo) {
11126 if (MCID.
operands()[OpNo].isOptionalDef() &&
11133 if (isThumbOne() && !IsCPSR)
11134 return Match_RequiresFlagSetting;
11137 if (isThumbTwo() && !IsCPSR && !inITBlock())
11138 return Match_RequiresITBlock;
11139 if (isThumbTwo() && IsCPSR && inITBlock())
11140 return Match_RequiresNotITBlock;
11142 if (Opc == ARM::tLSLri && Inst.
getOperand(3).
getImm() == 0 && inITBlock())
11143 return Match_RequiresNotITBlock;
11144 }
else if (isThumbOne()) {
11147 if (Opc == ARM::tADDhirr && !hasV6MOps() &&
11150 return Match_RequiresThumb2;
11152 else if (Opc == ARM::tMOVr && !hasV6Ops() &&
11155 return Match_RequiresV6;
11161 if (Opc == ARM::t2MOVr && !hasV8Ops())
11166 return Match_RequiresV8;
11171 return Match_RequiresV8;
11177 case ARM::VMRS_FPCXTS:
11178 case ARM::VMRS_FPCXTNS:
11179 case ARM::VMSR_FPCXTS:
11180 case ARM::VMSR_FPCXTNS:
11181 case ARM::VMRS_FPSCR_NZCVQC:
11182 case ARM::VMSR_FPSCR_NZCVQC:
11184 case ARM::VMRS_VPR:
11186 case ARM::VMSR_VPR:
11192 return Match_InvalidOperand;
11198 return Match_RequiresV8;
11206 return Match_InvalidTiedOperand;
11213 if (MCID.
operands()[
I].RegClass == ARM::rGPRRegClassID) {
11228 unsigned Reg =
Op.getReg();
11229 if ((Reg == ARM::SP) && !hasV8Ops())
11230 return Match_RequiresV8;
11231 else if (Reg == ARM::PC)
11232 return Match_InvalidOperand;
11235 return Match_Success;
11248bool ARMAsmParser::isITBlockTerminator(
MCInst &Inst)
const {
11267 bool MatchingInlineAsm,
11268 bool &EmitInITBlock,
11271 if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11272 return MatchInstructionImpl(
Operands, Inst, &NearMisses, MatchingInlineAsm);
11276 if (inImplicitITBlock()) {
11277 extendImplicitITBlock(ITState.Cond);
11278 if (MatchInstructionImpl(
Operands, Inst,
nullptr, MatchingInlineAsm) ==
11288 if (InstCond == ITCond) {
11289 EmitInITBlock =
true;
11290 return Match_Success;
11292 invertCurrentITCondition();
11293 EmitInITBlock =
true;
11294 return Match_Success;
11298 rewindImplicitITPosition();
11302 flushPendingInstructions(Out);
11303 unsigned PlainMatchResult =
11304 MatchInstructionImpl(
Operands, Inst, &NearMisses, MatchingInlineAsm);
11305 if (PlainMatchResult == Match_Success) {
11314 EmitInITBlock =
false;
11315 return Match_Success;
11318 EmitInITBlock =
false;
11319 return Match_Success;
11322 EmitInITBlock =
false;
11323 return Match_Success;
11330 startImplicitITBlock();
11331 if (MatchInstructionImpl(
Operands, Inst,
nullptr, MatchingInlineAsm) ==
11338 EmitInITBlock =
true;
11339 return Match_Success;
11342 discardImplicitITBlock();
11346 EmitInITBlock =
false;
11347 return PlainMatchResult;
11351 unsigned VariantID = 0);
11354bool ARMAsmParser::MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
11357 bool MatchingInlineAsm) {
11359 unsigned MatchResult;
11360 bool PendConditionalInstruction =
false;
11363 MatchResult = MatchInstruction(
Operands, Inst, NearMisses, MatchingInlineAsm,
11364 PendConditionalInstruction, Out);
11369 switch (MatchResult) {
11370 case Match_Success:
11377 if (validateInstruction(Inst,
Operands, MnemonicOpsEndInd)) {
11380 forwardITPosition();
11381 forwardVPTPosition();
11390 while (processInstruction(Inst,
Operands, MnemonicOpsEndInd, Out))
11399 forwardITPosition();
11400 forwardVPTPosition();
11408 if (PendConditionalInstruction) {
11409 PendingConditionalInsts.
push_back(Inst);
11410 if (isITBlockFull() || isITBlockTerminator(Inst))
11411 flushPendingInstructions(Out);
11416 case Match_NearMisses:
11417 ReportNearMisses(NearMisses, IDLoc,
Operands);
11419 case Match_MnemonicFail: {
11420 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11422 ((ARMOperand &)*
Operands[0]).getToken(), FBS);
11423 return Error(IDLoc,
"invalid instruction" + Suggestion,
11424 ((ARMOperand &)*
Operands[0]).getLocRange());
11432bool ARMAsmParser::ParseDirective(
AsmToken DirectiveID) {
11438 if (IDVal ==
".word")
11439 parseLiteralValues(4, DirectiveID.
getLoc());
11440 else if (IDVal ==
".short" || IDVal ==
".hword")
11441 parseLiteralValues(2, DirectiveID.
getLoc());
11442 else if (IDVal ==
".thumb")
11443 parseDirectiveThumb(DirectiveID.
getLoc());
11444 else if (IDVal ==
".arm")
11445 parseDirectiveARM(DirectiveID.
getLoc());
11446 else if (IDVal ==
".thumb_func")
11447 parseDirectiveThumbFunc(DirectiveID.
getLoc());
11448 else if (IDVal ==
".code")
11449 parseDirectiveCode(DirectiveID.
getLoc());
11450 else if (IDVal ==
".syntax")
11451 parseDirectiveSyntax(DirectiveID.
getLoc());
11452 else if (IDVal ==
".unreq")
11453 parseDirectiveUnreq(DirectiveID.
getLoc());
11454 else if (IDVal ==
".fnend")
11455 parseDirectiveFnEnd(DirectiveID.
getLoc());
11456 else if (IDVal ==
".cantunwind")
11457 parseDirectiveCantUnwind(DirectiveID.
getLoc());
11458 else if (IDVal ==
".personality")
11459 parseDirectivePersonality(DirectiveID.
getLoc());
11460 else if (IDVal ==
".handlerdata")
11461 parseDirectiveHandlerData(DirectiveID.
getLoc());
11462 else if (IDVal ==
".setfp")
11463 parseDirectiveSetFP(DirectiveID.
getLoc());
11464 else if (IDVal ==
".pad")
11465 parseDirectivePad(DirectiveID.
getLoc());
11466 else if (IDVal ==
".save")
11467 parseDirectiveRegSave(DirectiveID.
getLoc(),
false);
11468 else if (IDVal ==
".vsave")
11469 parseDirectiveRegSave(DirectiveID.
getLoc(),
true);
11470 else if (IDVal ==
".ltorg" || IDVal ==
".pool")
11471 parseDirectiveLtorg(DirectiveID.
getLoc());
11472 else if (IDVal ==
".even")
11473 parseDirectiveEven(DirectiveID.
getLoc());
11474 else if (IDVal ==
".personalityindex")
11475 parseDirectivePersonalityIndex(DirectiveID.
getLoc());
11476 else if (IDVal ==
".unwind_raw")
11477 parseDirectiveUnwindRaw(DirectiveID.
getLoc());
11478 else if (IDVal ==
".movsp")
11479 parseDirectiveMovSP(DirectiveID.
getLoc());
11480 else if (IDVal ==
".arch_extension")
11481 parseDirectiveArchExtension(DirectiveID.
getLoc());
11482 else if (IDVal ==
".align")
11483 return parseDirectiveAlign(DirectiveID.
getLoc());
11484 else if (IDVal ==
".thumb_set")
11485 parseDirectiveThumbSet(DirectiveID.
getLoc());
11486 else if (IDVal ==
".inst")
11487 parseDirectiveInst(DirectiveID.
getLoc());
11488 else if (IDVal ==
".inst.n")
11489 parseDirectiveInst(DirectiveID.
getLoc(),
'n');
11490 else if (IDVal ==
".inst.w")
11491 parseDirectiveInst(DirectiveID.
getLoc(),
'w');
11492 else if (!IsMachO && !IsCOFF) {
11493 if (IDVal ==
".arch")
11494 parseDirectiveArch(DirectiveID.
getLoc());
11495 else if (IDVal ==
".cpu")
11496 parseDirectiveCPU(DirectiveID.
getLoc());
11497 else if (IDVal ==
".eabi_attribute")
11498 parseDirectiveEabiAttr(DirectiveID.
getLoc());
11499 else if (IDVal ==
".fpu")
11500 parseDirectiveFPU(DirectiveID.
getLoc());
11501 else if (IDVal ==
".fnstart")
11502 parseDirectiveFnStart(DirectiveID.
getLoc());
11503 else if (IDVal ==
".object_arch")
11504 parseDirectiveObjectArch(DirectiveID.
getLoc());
11505 else if (IDVal ==
".tlsdescseq")
11506 parseDirectiveTLSDescSeq(DirectiveID.
getLoc());
11509 }
else if (IsCOFF) {
11510 if (IDVal ==
".seh_stackalloc")
11511 parseDirectiveSEHAllocStack(DirectiveID.
getLoc(),
false);
11512 else if (IDVal ==
".seh_stackalloc_w")
11513 parseDirectiveSEHAllocStack(DirectiveID.
getLoc(),
true);
11514 else if (IDVal ==
".seh_save_regs")
11515 parseDirectiveSEHSaveRegs(DirectiveID.
getLoc(),
false);
11516 else if (IDVal ==
".seh_save_regs_w")
11517 parseDirectiveSEHSaveRegs(DirectiveID.
getLoc(),
true);
11518 else if (IDVal ==
".seh_save_sp")
11519 parseDirectiveSEHSaveSP(DirectiveID.
getLoc());
11520 else if (IDVal ==
".seh_save_fregs")
11521 parseDirectiveSEHSaveFRegs(DirectiveID.
getLoc());
11522 else if (IDVal ==
".seh_save_lr")
11523 parseDirectiveSEHSaveLR(DirectiveID.
getLoc());
11524 else if (IDVal ==
".seh_endprologue")
11525 parseDirectiveSEHPrologEnd(DirectiveID.
getLoc(),
false);
11526 else if (IDVal ==
".seh_endprologue_fragment")
11527 parseDirectiveSEHPrologEnd(DirectiveID.
getLoc(),
true);
11528 else if (IDVal ==
".seh_nop")
11529 parseDirectiveSEHNop(DirectiveID.
getLoc(),
false);
11530 else if (IDVal ==
".seh_nop_w")
11531 parseDirectiveSEHNop(DirectiveID.
getLoc(),
true);
11532 else if (IDVal ==
".seh_startepilogue")
11533 parseDirectiveSEHEpilogStart(DirectiveID.
getLoc(),
false);
11534 else if (IDVal ==
".seh_startepilogue_cond")
11535 parseDirectiveSEHEpilogStart(DirectiveID.
getLoc(),
true);
11536 else if (IDVal ==
".seh_endepilogue")
11537 parseDirectiveSEHEpilogEnd(DirectiveID.
getLoc());
11538 else if (IDVal ==
".seh_custom")
11539 parseDirectiveSEHCustom(DirectiveID.
getLoc());
11551bool ARMAsmParser::parseLiteralValues(
unsigned Size,
SMLoc L) {
11552 auto parseOne = [&]() ->
bool {
11554 if (getParser().parseExpression(
Value))
11556 getParser().getStreamer().emitValue(
Value,
Size, L);
11559 return (parseMany(parseOne));
11564bool ARMAsmParser::parseDirectiveThumb(
SMLoc L) {
11565 if (parseEOL() || check(!hasThumb(), L,
"target does not support Thumb mode"))
11571 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11572 getParser().getStreamer().emitCodeAlignment(
Align(2), &getSTI(), 0);
11578bool ARMAsmParser::parseDirectiveARM(
SMLoc L) {
11579 if (parseEOL() || check(!hasARM(), L,
"target does not support ARM mode"))
11584 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
11585 getParser().getStreamer().emitCodeAlignment(
Align(4), &getSTI(), 0);
11620void ARMAsmParser::doBeforeLabelEmit(
MCSymbol *Symbol,
SMLoc IDLoc) {
11623 flushPendingInstructions(getStreamer());
11626void ARMAsmParser::onLabelParsed(
MCSymbol *Symbol) {
11627 if (NextSymbolIsThumb) {
11628 getParser().getStreamer().emitThumbFunc(Symbol);
11629 NextSymbolIsThumb =
false;
11635bool ARMAsmParser::parseDirectiveThumbFunc(
SMLoc L) {
11637 const auto Format = getContext().getObjectFileType();
11646 MCSymbol *
Func = getParser().getContext().getOrCreateSymbol(
11648 getParser().getStreamer().emitThumbFunc(Func);
11663 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11665 NextSymbolIsThumb =
true;
11671bool ARMAsmParser::parseDirectiveSyntax(
SMLoc L) {
11675 Error(L,
"unexpected token in .syntax directive");
11681 if (check(Mode ==
"divided" || Mode ==
"DIVIDED", L,
11682 "'.syntax divided' arm assembly not supported") ||
11683 check(Mode !=
"unified" && Mode !=
"UNIFIED", L,
11684 "unrecognized syntax mode in .syntax directive") ||
11695bool ARMAsmParser::parseDirectiveCode(
SMLoc L) {
11699 return Error(L,
"unexpected token in .code directive");
11701 if (Val != 16 && Val != 32) {
11702 Error(L,
"invalid operand to .code directive");
11712 return Error(L,
"target does not support Thumb mode");
11716 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11719 return Error(L,
"target does not support ARM mode");
11723 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
11735 SMLoc SRegLoc, ERegLoc;
11736 if (check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11737 "register name expected") ||
11741 if (RegisterReqs.
insert(std::make_pair(
Name, Reg)).first->second != Reg)
11742 return Error(SRegLoc,
11743 "redefinition of '" +
Name +
"' does not match original.");
11750bool ARMAsmParser::parseDirectiveUnreq(
SMLoc L) {
11753 return Error(L,
"unexpected input in .unreq directive.");
11762void ARMAsmParser::FixModeAfterArchChange(
bool WasThumb,
SMLoc Loc) {
11764 if (WasThumb && hasThumb()) {
11767 }
else if (!WasThumb && hasARM()) {
11778 (WasThumb ?
"thumb" :
"arm") +
" mode, switching to " +
11779 (!WasThumb ?
"thumb" :
"arm") +
" mode");
11786bool ARMAsmParser::parseDirectiveArch(
SMLoc L) {
11787 StringRef Arch = getParser().parseStringToEndOfStatement().
trim();
11790 if (
ID == ARM::ArchKind::INVALID)
11791 return Error(L,
"Unknown arch name");
11798 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11799 FixModeAfterArchChange(WasThumb, L);
11801 getTargetStreamer().emitArch(
ID);
11808bool ARMAsmParser::parseDirectiveEabiAttr(
SMLoc L) {
11818 Error(TagLoc,
"attribute name not recognised: " +
Name);
11831 if (check(!CE, TagLoc,
"expected numeric constant"))
11834 Tag =
CE->getValue();
11841 bool IsStringValue =
false;
11843 int64_t IntegerValue = 0;
11844 bool IsIntegerValue =
false;
11847 IsStringValue =
true;
11849 IsStringValue =
true;
11850 IsIntegerValue =
true;
11851 }
else if (
Tag < 32 ||
Tag % 2 == 0)
11852 IsIntegerValue =
true;
11853 else if (
Tag % 2 == 1)
11854 IsStringValue =
true;
11858 if (IsIntegerValue) {
11859 const MCExpr *ValueExpr;
11866 return Error(ValueExprLoc,
"expected numeric constant");
11867 IntegerValue =
CE->getValue();
11875 std::string EscapedValue;
11876 if (IsStringValue) {
11884 StringValue = EscapedValue;
11894 if (IsIntegerValue && IsStringValue) {
11896 getTargetStreamer().emitIntTextAttribute(
Tag, IntegerValue, StringValue);
11897 }
else if (IsIntegerValue)
11898 getTargetStreamer().emitAttribute(
Tag, IntegerValue);
11899 else if (IsStringValue)
11900 getTargetStreamer().emitTextAttribute(
Tag, StringValue);
11906bool ARMAsmParser::parseDirectiveCPU(
SMLoc L) {
11907 StringRef CPU = getParser().parseStringToEndOfStatement().
trim();
11912 if (!getSTI().isCPUStringValid(CPU))
11913 return Error(L,
"Unknown CPU name");
11918 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11919 FixModeAfterArchChange(WasThumb, L);
11926bool ARMAsmParser::parseDirectiveFPU(
SMLoc L) {
11927 SMLoc FPUNameLoc = getTok().getLoc();
11928 StringRef FPU = getParser().parseStringToEndOfStatement().
trim();
11931 std::vector<StringRef> Features;
11933 return Error(FPUNameLoc,
"Unknown FPU name");
11936 for (
auto Feature : Features)
11938 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11940 getTargetStreamer().emitFPU(
ID);
11946bool ARMAsmParser::parseDirectiveFnStart(
SMLoc L) {
11950 if (UC.hasFnStart()) {
11951 Error(L,
".fnstart starts before the end of previous one");
11952 UC.emitFnStartLocNotes();
11959 getTargetStreamer().emitFnStart();
11961 UC.recordFnStart(L);
11967bool ARMAsmParser::parseDirectiveFnEnd(
SMLoc L) {
11971 if (!UC.hasFnStart())
11972 return Error(L,
".fnstart must precede .fnend directive");
11975 getTargetStreamer().emitFnEnd();
11983bool ARMAsmParser::parseDirectiveCantUnwind(
SMLoc L) {
11987 UC.recordCantUnwind(L);
11989 if (check(!UC.hasFnStart(), L,
".fnstart must precede .cantunwind directive"))
11992 if (UC.hasHandlerData()) {
11993 Error(L,
".cantunwind can't be used with .handlerdata directive");
11994 UC.emitHandlerDataLocNotes();
11997 if (UC.hasPersonality()) {
11998 Error(L,
".cantunwind can't be used with .personality directive");
11999 UC.emitPersonalityLocNotes();
12003 getTargetStreamer().emitCantUnwind();
12009bool ARMAsmParser::parseDirectivePersonality(
SMLoc L) {
12011 bool HasExistingPersonality = UC.hasPersonality();
12015 return Error(L,
"unexpected input in .personality directive.");
12022 UC.recordPersonality(L);
12025 if (!UC.hasFnStart())
12026 return Error(L,
".fnstart must precede .personality directive");
12027 if (UC.cantUnwind()) {
12028 Error(L,
".personality can't be used with .cantunwind directive");
12029 UC.emitCantUnwindLocNotes();
12032 if (UC.hasHandlerData()) {
12033 Error(L,
".personality must precede .handlerdata directive");
12034 UC.emitHandlerDataLocNotes();
12037 if (HasExistingPersonality) {
12038 Error(L,
"multiple personality directives");
12039 UC.emitPersonalityLocNotes();
12043 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(
Name);
12044 getTargetStreamer().emitPersonality(PR);
12050bool ARMAsmParser::parseDirectiveHandlerData(
SMLoc L) {
12054 UC.recordHandlerData(L);
12056 if (!UC.hasFnStart())
12057 return Error(L,
".fnstart must precede .personality directive");
12058 if (UC.cantUnwind()) {
12059 Error(L,
".handlerdata can't be used with .cantunwind directive");
12060 UC.emitCantUnwindLocNotes();
12064 getTargetStreamer().emitHandlerData();
12070bool ARMAsmParser::parseDirectiveSetFP(
SMLoc L) {
12073 if (check(!UC.hasFnStart(), L,
".fnstart must precede .setfp directive") ||
12074 check(UC.hasHandlerData(), L,
12075 ".setfp must precede .handlerdata directive"))
12080 int FPReg = tryParseRegister();
12082 if (check(FPReg == -1, FPRegLoc,
"frame pointer register expected") ||
12088 int SPReg = tryParseRegister();
12089 if (check(SPReg == -1, SPRegLoc,
"stack pointer register expected") ||
12090 check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
12091 "register should be either $sp or the latest fp register"))
12095 UC.saveFPReg(FPReg);
12105 const MCExpr *OffsetExpr;
12108 if (getParser().parseExpression(OffsetExpr, EndLoc))
12109 return Error(ExLoc,
"malformed setfp offset");
12111 if (check(!CE, ExLoc,
"setfp offset must be an immediate"))
12119 getTargetStreamer().emitSetFP(
static_cast<unsigned>(FPReg),
12120 static_cast<unsigned>(SPReg),
Offset);
12126bool ARMAsmParser::parseDirectivePad(
SMLoc L) {
12129 if (!UC.hasFnStart())
12130 return Error(L,
".fnstart must precede .pad directive");
12131 if (UC.hasHandlerData())
12132 return Error(L,
".pad must precede .handlerdata directive");
12140 const MCExpr *OffsetExpr;
12143 if (getParser().parseExpression(OffsetExpr, EndLoc))
12144 return Error(ExLoc,
"malformed pad offset");
12147 return Error(ExLoc,
"pad offset must be an immediate");
12152 getTargetStreamer().emitPad(
CE->getValue());
12159bool ARMAsmParser::parseDirectiveRegSave(
SMLoc L,
bool IsVector) {
12161 if (!UC.hasFnStart())
12162 return Error(L,
".fnstart must precede .save or .vsave directives");
12163 if (UC.hasHandlerData())
12164 return Error(L,
".save or .vsave must precede .handlerdata directive");
12170 if (parseRegisterList(
Operands,
true,
true) || parseEOL())
12172 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12173 if (!IsVector && !
Op.isRegList())
12174 return Error(L,
".save expects GPR registers");
12175 if (IsVector && !
Op.isDPRRegList())
12176 return Error(L,
".vsave expects DPR registers");
12178 getTargetStreamer().emitRegSave(
Op.getRegList(), IsVector);
12186bool ARMAsmParser::parseDirectiveInst(
SMLoc Loc,
char Suffix) {
12202 return Error(Loc,
"width suffixes are invalid in ARM mode");
12205 auto parseOne = [&]() ->
bool {
12207 if (getParser().parseExpression(Expr))
12211 return Error(Loc,
"expected constant expression");
12214 char CurSuffix = Suffix;
12217 if (
Value->getValue() > 0xffff)
12218 return Error(Loc,
"inst.n operand is too big, use inst.w instead");
12221 if (
Value->getValue() > 0xffffffff)
12223 " operand is too big");
12227 if (
Value->getValue() < 0xe800)
12229 else if (
Value->getValue() >= 0xe8000000)
12232 return Error(Loc,
"cannot determine Thumb instruction size, "
12233 "use inst.n/inst.w instead");
12239 getTargetStreamer().emitInst(
Value->getValue(), CurSuffix);
12240 forwardITPosition();
12241 forwardVPTPosition();
12246 return Error(Loc,
"expected expression following directive");
12247 if (parseMany(parseOne))
12254bool ARMAsmParser::parseDirectiveLtorg(
SMLoc L) {
12257 getTargetStreamer().emitCurrentConstantPool();
12261bool ARMAsmParser::parseDirectiveEven(
SMLoc L) {
12268 getStreamer().initSections(
false, getSTI());
12269 Section = getStreamer().getCurrentSectionOnly();
12272 assert(Section &&
"must have section to emit alignment");
12274 getStreamer().emitCodeAlignment(
Align(2), &getSTI());
12276 getStreamer().emitValueToAlignment(
Align(2));
12283bool ARMAsmParser::parseDirectivePersonalityIndex(
SMLoc L) {
12285 bool HasExistingPersonality = UC.hasPersonality();
12287 const MCExpr *IndexExpression;
12293 UC.recordPersonalityIndex(L);
12295 if (!UC.hasFnStart()) {
12296 return Error(L,
".fnstart must precede .personalityindex directive");
12298 if (UC.cantUnwind()) {
12299 Error(L,
".personalityindex cannot be used with .cantunwind");
12300 UC.emitCantUnwindLocNotes();
12303 if (UC.hasHandlerData()) {
12304 Error(L,
".personalityindex must precede .handlerdata directive");
12305 UC.emitHandlerDataLocNotes();
12308 if (HasExistingPersonality) {
12309 Error(L,
"multiple personality directives");
12310 UC.emitPersonalityLocNotes();
12316 return Error(IndexLoc,
"index must be a constant number");
12318 return Error(IndexLoc,
12319 "personality routine index should be in range [0-3]");
12321 getTargetStreamer().emitPersonalityIndex(
CE->getValue());
12327bool ARMAsmParser::parseDirectiveUnwindRaw(
SMLoc L) {
12330 const MCExpr *OffsetExpr;
12331 SMLoc OffsetLoc = getLexer().getLoc();
12333 if (!UC.hasFnStart())
12334 return Error(L,
".fnstart must precede .unwind_raw directives");
12335 if (getParser().parseExpression(OffsetExpr))
12336 return Error(OffsetLoc,
"expected expression");
12340 return Error(OffsetLoc,
"offset must be a constant");
12349 auto parseOne = [&]() ->
bool {
12350 const MCExpr *OE =
nullptr;
12351 SMLoc OpcodeLoc = getLexer().getLoc();
12354 OpcodeLoc,
"expected opcode expression"))
12358 return Error(OpcodeLoc,
"opcode value must be a constant");
12359 const int64_t Opcode =
OC->getValue();
12360 if (Opcode & ~0xff)
12361 return Error(OpcodeLoc,
"invalid opcode");
12367 SMLoc OpcodeLoc = getLexer().getLoc();
12369 return Error(OpcodeLoc,
"expected opcode expression");
12370 if (parseMany(parseOne))
12373 getTargetStreamer().emitUnwindRaw(
StackOffset, Opcodes);
12379bool ARMAsmParser::parseDirectiveTLSDescSeq(
SMLoc L) {
12383 return TokError(
"expected variable after '.tlsdescseq' directive");
12393 getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12399bool ARMAsmParser::parseDirectiveMovSP(
SMLoc L) {
12401 if (!UC.hasFnStart())
12402 return Error(L,
".fnstart must precede .movsp directives");
12403 if (UC.getFPReg() != ARM::SP)
12404 return Error(L,
"unexpected .movsp directive");
12407 int SPReg = tryParseRegister();
12409 return Error(SPRegLoc,
"register expected");
12410 if (SPReg == ARM::SP || SPReg == ARM::PC)
12411 return Error(SPRegLoc,
"sp and pc are not permitted in .movsp directive");
12418 const MCExpr *OffsetExpr;
12422 return Error(OffsetLoc,
"malformed offset expression");
12426 return Error(OffsetLoc,
"offset must be an immediate constant");
12434 getTargetStreamer().emitMovSP(SPReg,
Offset);
12435 UC.saveFPReg(SPReg);
12442bool ARMAsmParser::parseDirectiveObjectArch(
SMLoc L) {
12445 return Error(getLexer().getLoc(),
"unexpected token");
12453 if (
ID == ARM::ArchKind::INVALID)
12454 return Error(ArchLoc,
"unknown architecture '" + Arch +
"'");
12458 getTargetStreamer().emitObjectArch(
ID);
12464bool ARMAsmParser::parseDirectiveAlign(
SMLoc L) {
12470 assert(Section &&
"must have section to emit alignment");
12472 getStreamer().emitCodeAlignment(
Align(4), &getSTI(), 0);
12474 getStreamer().emitValueToAlignment(
Align(4), 0, 1, 0);
12482bool ARMAsmParser::parseDirectiveThumbSet(
SMLoc L) {
12487 "expected identifier after '.thumb_set'") ||
12497 getTargetStreamer().emitThumbSet(
Sym,
Value);
12504bool ARMAsmParser::parseDirectiveSEHAllocStack(
SMLoc L,
bool Wide) {
12506 if (parseImmExpr(
Size))
12508 getTargetStreamer().emitARMWinCFIAllocStack(
Size, Wide);
12515bool ARMAsmParser::parseDirectiveSEHSaveRegs(
SMLoc L,
bool Wide) {
12518 if (parseRegisterList(
Operands) || parseEOL())
12520 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12521 if (!
Op.isRegList())
12522 return Error(L,
".seh_save_regs{_w} expects GPR registers");
12525 for (
size_t i = 0; i < RegList.
size(); ++i) {
12526 unsigned Reg =
MRI->getEncodingValue(RegList[i]);
12530 return Error(L,
".seh_save_regs{_w} can't include SP");
12531 assert(Reg < 16U &&
"Register out of range");
12532 unsigned Bit = (1u <<
Reg);
12535 if (!Wide && (Mask & 0x1f00) != 0)
12537 ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12538 getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12544bool ARMAsmParser::parseDirectiveSEHSaveSP(
SMLoc L) {
12545 int Reg = tryParseRegister();
12546 if (Reg == -1 || !
MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12547 return Error(L,
"expected GPR");
12548 unsigned Index =
MRI->getEncodingValue(Reg);
12550 return Error(L,
"invalid register for .seh_save_sp");
12551 getTargetStreamer().emitARMWinCFISaveSP(
Index);
12557bool ARMAsmParser::parseDirectiveSEHSaveFRegs(
SMLoc L) {
12560 if (parseRegisterList(
Operands) || parseEOL())
12562 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12563 if (!
Op.isDPRRegList())
12564 return Error(L,
".seh_save_fregs expects DPR registers");
12567 for (
size_t i = 0; i < RegList.
size(); ++i) {
12568 unsigned Reg =
MRI->getEncodingValue(RegList[i]);
12569 assert(Reg < 32U &&
"Register out of range");
12570 unsigned Bit = (1u <<
Reg);
12575 return Error(L,
".seh_save_fregs missing registers");
12577 unsigned First = 0;
12578 while ((Mask & 1) == 0) {
12582 if (((Mask + 1) & Mask) != 0)
12584 ".seh_save_fregs must take a contiguous range of registers");
12586 while ((Mask & 2) != 0) {
12590 if (First < 16 && Last >= 16)
12591 return Error(L,
".seh_save_fregs must be all d0-d15 or d16-d31");
12592 getTargetStreamer().emitARMWinCFISaveFRegs(
First,
Last);
12598bool ARMAsmParser::parseDirectiveSEHSaveLR(
SMLoc L) {
12600 if (parseImmExpr(
Offset))
12602 getTargetStreamer().emitARMWinCFISaveLR(
Offset);
12609bool ARMAsmParser::parseDirectiveSEHPrologEnd(
SMLoc L,
bool Fragment) {
12610 getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12617bool ARMAsmParser::parseDirectiveSEHNop(
SMLoc L,
bool Wide) {
12618 getTargetStreamer().emitARMWinCFINop(Wide);
12625bool ARMAsmParser::parseDirectiveSEHEpilogStart(
SMLoc L,
bool Condition) {
12632 return Error(S,
".seh_startepilogue_cond missing condition");
12635 return Error(S,
"invalid condition");
12639 getTargetStreamer().emitARMWinCFIEpilogStart(
CC);
12645bool ARMAsmParser::parseDirectiveSEHEpilogEnd(
SMLoc L) {
12646 getTargetStreamer().emitARMWinCFIEpilogEnd();
12652bool ARMAsmParser::parseDirectiveSEHCustom(
SMLoc L) {
12653 unsigned Opcode = 0;
12656 if (parseImmExpr(Byte))
12658 if (Byte > 0xff || Byte < 0)
12659 return Error(L,
"Invalid byte value in .seh_custom");
12660 if (Opcode > 0x00ffffff)
12661 return Error(L,
"Too many bytes in .seh_custom");
12664 Opcode = (Opcode << 8) | Byte;
12666 getTargetStreamer().emitARMWinCFICustom(Opcode);
12678#define GET_REGISTER_MATCHER
12679#define GET_SUBTARGET_FEATURE_NAME
12680#define GET_MATCHER_IMPLEMENTATION
12681#define GET_MNEMONIC_SPELL_CHECKER
12682#include "ARMGenAsmMatcher.inc"
12688ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12689 switch (MatchError) {
12692 return hasV8Ops() ?
"operand must be a register in range [r0, r14]"
12693 :
"operand must be a register in range [r0, r12] or r14";
12696 return hasD32() ?
"operand must be a register in range [d0, d31]"
12697 :
"operand must be a register in range [d0, d15]";
12698 case Match_DPR_RegList:
12699 return hasD32() ?
"operand must be a list of registers in range [d0, d31]"
12700 :
"operand must be a list of registers in range [d0, d15]";
12704 return getMatchKindDiag(MatchError);
12727 std::multimap<unsigned, unsigned> OperandMissesSeen;
12729 bool ReportedTooFewOperands =
false;
12736 switch (
I.getKind()) {
12739 ((ARMOperand &)*
Operands[
I.getOperandIndex()]).getStartLoc();
12740 const char *OperandDiag =
12741 getCustomOperandDiag((ARMMatchResultTy)
I.getOperandError());
12748 unsigned DupCheckMatchClass = OperandDiag ?
I.getOperandClass() : ~0
U;
12749 auto PrevReports = OperandMissesSeen.equal_range(
I.getOperandIndex());
12750 if (std::any_of(PrevReports.first, PrevReports.second,
12751 [DupCheckMatchClass](
12752 const std::pair<unsigned, unsigned> Pair) {
12753 if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12754 return Pair.second == DupCheckMatchClass;
12756 return isSubclass((MatchClassKind)DupCheckMatchClass,
12757 (MatchClassKind)Pair.second);
12760 OperandMissesSeen.insert(
12761 std::make_pair(
I.getOperandIndex(), DupCheckMatchClass));
12763 NearMissMessage Message;
12764 Message.Loc = OperandLoc;
12766 Message.Message = OperandDiag;
12767 }
else if (
I.getOperandClass() == InvalidMatchClass) {
12768 Message.Message =
"too many operands for instruction";
12770 Message.Message =
"invalid operand for instruction";
12772 dbgs() <<
"Missing diagnostic string for operand class "
12773 << getMatchClassName((MatchClassKind)
I.getOperandClass())
12774 <<
I.getOperandClass() <<
", error " <<
I.getOperandError()
12775 <<
", opcode " << MII.getName(
I.getOpcode()) <<
"\n");
12783 if (FeatureMissesSeen.
count(MissingFeatures))
12785 FeatureMissesSeen.
insert(MissingFeatures);
12789 if (MissingFeatures.
test(Feature_IsARMBit) && !hasARM())
12793 if (
isThumb() && MissingFeatures.
test(Feature_IsARMBit) &&
12794 MissingFeatures.
count() > 1)
12796 if (!
isThumb() && MissingFeatures.
test(Feature_IsThumbBit) &&
12797 MissingFeatures.
count() > 1)
12799 if (!
isThumb() && MissingFeatures.
test(Feature_IsThumb2Bit) &&
12801 Feature_IsThumbBit})).
any())
12803 if (isMClass() && MissingFeatures.
test(Feature_HasNEONBit))
12806 NearMissMessage Message;
12807 Message.Loc = IDLoc;
12810 OS <<
"instruction requires:";
12811 for (
unsigned i = 0, e = MissingFeatures.
size(); i != e; ++i)
12812 if (MissingFeatures.
test(i))
12820 NearMissMessage Message;
12821 Message.Loc = IDLoc;
12822 switch (
I.getPredicateError()) {
12823 case Match_RequiresNotITBlock:
12824 Message.Message =
"flag setting instruction only valid outside IT block";
12826 case Match_RequiresITBlock:
12827 Message.Message =
"instruction only valid inside IT block";
12829 case Match_RequiresV6:
12830 Message.Message =
"instruction variant requires ARMv6 or later";
12832 case Match_RequiresThumb2:
12833 Message.Message =
"instruction variant requires Thumb2";
12835 case Match_RequiresV8:
12836 Message.Message =
"instruction variant requires ARMv8 or later";
12838 case Match_RequiresFlagSetting:
12839 Message.Message =
"no flag-preserving variant of this instruction available";
12841 case Match_InvalidTiedOperand: {
12842 ARMOperand &
Op =
static_cast<ARMOperand &
>(*
Operands[0]);
12843 if (
Op.isToken() &&
Op.getToken() ==
"mul") {
12844 Message.Message =
"destination register must match a source register";
12845 Message.Loc =
Operands[MnemonicOpsEndInd]->getStartLoc();
12851 case Match_InvalidOperand:
12852 Message.Message =
"invalid operand for instruction";
12862 if (!ReportedTooFewOperands) {
12863 SMLoc EndLoc = ((ARMOperand &)*
Operands.back()).getEndLoc();
12865 EndLoc,
StringRef(
"too few operands for instruction")});
12866 ReportedTooFewOperands =
true;
12881 FilterNearMisses(NearMisses, Messages, IDLoc,
Operands);
12883 if (Messages.
size() == 0) {
12886 Error(IDLoc,
"invalid instruction");
12887 }
else if (Messages.
size() == 1) {
12889 Error(Messages[0].Loc, Messages[0].Message);
12893 Error(IDLoc,
"invalid instruction, any one of the following would fix this:");
12894 for (
auto &M : Messages) {
12904 static const struct {
12909 {
ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12911 {Feature_HasV8Bit},
12912 {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12914 {Feature_HasV8Bit},
12915 {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12917 {Feature_HasV8Bit},
12918 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12920 {Feature_HasV8_1MMainlineBit},
12921 {ARM::HasMVEFloatOps}},
12923 {Feature_HasV8Bit},
12924 {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12926 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12927 {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12929 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12932 {Feature_HasV8Bit},
12933 {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12934 {
ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12936 {
ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12938 {Feature_HasV8_2aBit},
12939 {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12940 {
ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12941 {
ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12942 {
ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12950 bool EnableFeature = !
Name.consume_front_insensitive(
"no");
12953 return Error(ExtLoc,
"unknown architectural extension: " +
Name);
12960 return Error(ExtLoc,
"unsupported architectural extension: " +
Name);
12963 return Error(ExtLoc,
"architectural extension '" +
Name +
12965 "allowed for the current base architecture");
12968 if (EnableFeature) {
12974 setAvailableFeatures(Features);
12982bool ARMAsmParser::parseDirectiveArchExtension(
SMLoc L) {
12987 return Error(getLexer().getLoc(),
"expected architecture extension name");
12996 if (
Name ==
"nocrypto") {
12997 enableArchExtFeature(
"nosha2", ExtLoc);
12998 enableArchExtFeature(
"noaes", ExtLoc);
13001 if (enableArchExtFeature(
Name, ExtLoc))
13004 return Error(ExtLoc,
"unknown architectural extension: " +
Name);
13011 ARMOperand &
Op =
static_cast<ARMOperand &
>(AsmOp);
13020 if (
CE->getValue() == 0)
13021 return Match_Success;
13026 if (
CE->getValue() == 8)
13027 return Match_Success;
13032 if (
CE->getValue() == 16)
13033 return Match_Success;
13037 const MCExpr *SOExpr =
Op.getImm();
13039 if (!SOExpr->evaluateAsAbsolute(
Value))
13040 return Match_Success;
13041 assert((
Value >= std::numeric_limits<int32_t>::min() &&
13042 Value <= std::numeric_limits<uint32_t>::max()) &&
13043 "expression value must be representable in 32 bits");
13047 if (hasV8Ops() &&
Op.isReg() &&
Op.getReg() == ARM::SP)
13048 return Match_Success;
13051 return Match_InvalidOperand;
13054bool ARMAsmParser::isMnemonicVPTPredicable(
StringRef Mnemonic,
13059 if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
13060 (Mnemonic.
starts_with(
"vldrh") && Mnemonic !=
"vldrhi") ||
13062 !(ExtraToken ==
".f16" || ExtraToken ==
".32" || ExtraToken ==
".16" ||
13063 ExtraToken ==
".8")) ||
13064 (Mnemonic.
starts_with(
"vrint") && Mnemonic !=
"vrintr") ||
13065 (Mnemonic.
starts_with(
"vstrh") && Mnemonic !=
"vstrhi"))
13068 const char *predicable_prefixes[] = {
13069 "vabav",
"vabd",
"vabs",
"vadc",
"vadd",
13070 "vaddlv",
"vaddv",
"vand",
"vbic",
"vbrsr",
13071 "vcadd",
"vcls",
"vclz",
"vcmla",
"vcmp",
13072 "vcmul",
"vctp",
"vcvt",
"vddup",
"vdup",
13073 "vdwdup",
"veor",
"vfma",
"vfmas",
"vfms",
13074 "vhadd",
"vhcadd",
"vhsub",
"vidup",
"viwdup",
13075 "vldrb",
"vldrd",
"vldrw",
"vmax",
"vmaxa",
13076 "vmaxav",
"vmaxnm",
"vmaxnma",
"vmaxnmav",
"vmaxnmv",
13077 "vmaxv",
"vmin",
"vminav",
"vminnm",
"vminnmav",
13078 "vminnmv",
"vminv",
"vmla",
"vmladav",
"vmlaldav",
13079 "vmlalv",
"vmlas",
"vmlav",
"vmlsdav",
"vmlsldav",
13080 "vmovlb",
"vmovlt",
"vmovnb",
"vmovnt",
"vmul",
13081 "vmvn",
"vneg",
"vorn",
"vorr",
"vpnot",
13082 "vpsel",
"vqabs",
"vqadd",
"vqdmladh",
"vqdmlah",
13083 "vqdmlash",
"vqdmlsdh",
"vqdmulh",
"vqdmull",
"vqmovn",
13084 "vqmovun",
"vqneg",
"vqrdmladh",
"vqrdmlah",
"vqrdmlash",
13085 "vqrdmlsdh",
"vqrdmulh",
"vqrshl",
"vqrshrn",
"vqrshrun",
13086 "vqshl",
"vqshrn",
"vqshrun",
"vqsub",
"vrev16",
13087 "vrev32",
"vrev64",
"vrhadd",
"vrmlaldavh",
"vrmlalvh",
13088 "vrmlsldavh",
"vrmulh",
"vrshl",
"vrshr",
"vrshrn",
13089 "vsbc",
"vshl",
"vshlc",
"vshll",
"vshr",
13090 "vshrn",
"vsli",
"vsri",
"vstrb",
"vstrd",
13093 return any_of(predicable_prefixes, [&Mnemonic](
const char *prefix) {
13098std::unique_ptr<ARMOperand> ARMAsmParser::defaultCondCodeOp() {
13102std::unique_ptr<ARMOperand> ARMAsmParser::defaultCCOutOp() {
13103 return ARMOperand::CreateCCOut(0,
SMLoc(), *
this);
13106std::unique_ptr<ARMOperand> ARMAsmParser::defaultVPTPredOp() {
unsigned const MachineRegisterInfo * MRI
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static unsigned getNextRegister(unsigned Reg)
static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing)
static bool instIsBreakpoint(const MCInst &Inst)
unsigned findCCOutInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo, unsigned Reg, unsigned HiReg, bool &containsReg)
static bool isDataTypeToken(StringRef Tok)
}
static MCRegister MatchRegisterName(StringRef Name)
static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing)
unsigned getRegListInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
static const char * getSubtargetFeatureName(uint64_t Val)
static bool isVectorPredicable(const MCInstrDesc &MCID)
static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp)
MatchCoprocessorOperandName - Try to parse an coprocessor related instruction with a symbolic operand...
static void applyMnemonicAliases(StringRef &Mnemonic, const FeatureBitset &Features, unsigned VariantID)
void removeCCOut(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT)
static bool insertNoDuplicates(SmallVectorImpl< std::pair< unsigned, unsigned > > &Regs, unsigned Enc, unsigned Reg)
static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID)
static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp)
bool operandsContainWide(OperandVector &Operands, unsigned MnemonicOpsEndInd)
static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg)
void removeCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser()
Force static initialization.
static unsigned getMnemonicOpsEndInd(const OperandVector &Operands)
static bool isARMMCExpr(MCParsedAsmOperand &MCOp)
unsigned findCondCodeInd(const OperandVector &Operands, unsigned MnemonicOpsEndInd)
void removeVPTCondCode(OperandVector &Operands, unsigned &MnemonicOpsEndInd)
static bool isThumb(const MCSubtargetInfo &STI)
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Register getFPReg(const CSKYSubtarget &STI)
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_EXTERNAL_VISIBILITY
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > AddBuildAttributes("hexagon-add-build-attributes")
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Pre allocate WWM Registers
static cl::opt< std::set< SPIRV::Extension::Extension >, false, SPIRVExtensionsParser > Extensions("spirv-ext", cl::desc("Specify list of enabled SPIR-V extensions"))
This file implements the SmallBitVector class.
This file defines the SmallSet class.
This file defines the SmallVector class.
StringSet - A set-like wrapper for the StringMap.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=ARM::NoRegAltName)
VariantKind getKind() const
getOpcode - Get the kind of this expression.
static const ARMMCExpr * create(VariantKind Kind, const MCExpr *Expr, MCContext &Ctx)
Target independent representation for an assembler token.
int64_t getIntVal() const
bool isNot(TokenKind K) const
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
StringRef getStringContents() const
Get the contents of a string token (without quotes).
bool is(TokenKind K) const
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
This class represents an Operation in the Expression.
Implements a dense probed hash-table based set.
Base class for user error types.
Lightweight error class with error context and mandatory checking.
Container class for subtarget features.
constexpr bool test(unsigned I) const
constexpr size_t size() const
Generic assembler lexer interface, for use by target specific assembly lexers.
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
MCStreamer & getStreamer()
MCAsmParser & getParser()
Generic assembler parser interface, for use by target specific assembly parsers.
bool parseToken(AsmToken::TokenKind T, const Twine &Msg="unexpected token")
virtual bool parseEscapedString(std::string &Data)=0
Parse the current token as a string which may include escaped characters and return the string conten...
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual void Note(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a note at the location L, with the message Msg.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
bool parseOptionalToken(AsmToken::TokenKind T)
Attempt to parse and consume token, returning true on success.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual bool Warning(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a warning at the location L, with the message Msg.
bool Error(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)
Return an error at the location L, with the message Msg.
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Context object for machine code objects.
const MCRegisterInfo * getRegisterInfo() const
Base class for the full range of assembler expressions which are needed for parsing.
@ Constant
Constant expressions.
Instances of this class represent a single low-level machine instruction.
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCRegisterInfo *RegInfo=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
unsigned getNumOperands() const
unsigned getOpcode() const
iterator insert(iterator I, const MCOperand &Op)
void addOperand(const MCOperand Op)
void setOpcode(unsigned Op)
const MCOperand & getOperand(unsigned i) const
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool isIndirectBranch() const
Return true if this is an indirect branch, such as a branch through a register.
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
bool hasDefOfPhysReg(const MCInst &MI, unsigned Reg, const MCRegisterInfo &RI) const
Return true if this instruction defines the specified physical register, either explicitly or implici...
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned short NumOperands
bool isBranch() const
Returns true if this is a conditional, unconditional, or indirect branch.
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
bool isCall() const
Return true if the instruction is a call.
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
bool isReturn() const
Return true if the instruction is a return.
Interface to description of machine instruction set.
Instances of this class represent operands of the MCInst class.
static MCOperand createReg(unsigned Reg)
static MCOperand createExpr(const MCExpr *Val)
static MCOperand createImm(int64_t Val)
unsigned getReg() const
Returns the register number.
const MCExpr * getExpr() const
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual SMLoc getStartLoc() const =0
getStartLoc - Get the location of the first token of this operand.
virtual bool isReg() const =0
isReg - Is this a register operand?
virtual bool isMem() const =0
isMem - Is this a memory operand?
virtual MCRegister getReg() const =0
virtual void print(raw_ostream &OS) const =0
print - Print a debug representation of the operand to the given stream.
virtual bool isToken() const =0
isToken - Is this a token operand?
virtual bool isImm() const =0
isImm - Is this an immediate operand?
virtual SMLoc getEndLoc() const =0
getEndLoc - Get the location of the last token of this operand.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
unsigned getNumRegs() const
getNumRegs - Return the number of registers in this class.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Streaming machine code generation interface.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const FeatureBitset & getFeatureBits() const
FeatureBitset ApplyFeatureFlag(StringRef FS)
Apply a feature flag and return the re-computed feature bits, including all feature bits implied by t...
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
Represent a reference to a symbol from inside an expression.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual void onLabelParsed(MCSymbol *Symbol)
MCSubtargetInfo & copySTI()
Create a copy of STI and return a non-const reference to it.
@ FIRST_TARGET_MATCH_RESULT_TY
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual unsigned checkEarlyTargetMatchPredicate(MCInst &Inst, const OperandVector &Operands)
Validate the instruction match against any complex target predicates before rendering any operands to...
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual void flushPendingInstructions(MCStreamer &Out)
Ensure that all previously parsed instructions have been emitted to the output streamer,...
void setAvailableFeatures(const FeatureBitset &Value)
virtual MCSymbolRefExpr::VariantKind getVariantKindForName(StringRef Name) const
const MCSubtargetInfo & getSTI() const
virtual void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc)
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
ParseInstruction - Parse one assembly instruction.
virtual unsigned checkTargetMatchPredicate(MCInst &Inst)
checkTargetMatchPredicate - Validate the instruction match against any complex target predicates not ...
virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
MatchAndEmitInstruction - Recognize a series of operands of a parsed instruction as an actual MCInst ...
Target specific streamer interface.
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
Represents a location in source code.
static SMLoc getFromPointer(const char *Ptr)
constexpr const char * getPointer() const
Represents a range in source code.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
typename SuperClass::const_iterator const_iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
iterator find(StringRef Key)
size_type count(StringRef Key) const
count - Return 1 if the element is in the map, 0 otherwise.
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
std::string lower() const
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
static constexpr size_t npos
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
StringSet - A wrapper for StringMap that provides set-like functionality.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
Triple - Helper class for working with autoconf configuration names.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
LLVM Value Representation.
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an SmallVector or SmallString.
This class provides various memory handling functions that manipulate MemoryBlock instances.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const TagNameMap & getARMAttributeTags()
static CondCodes getOppositeCondition(CondCodes CC)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
unsigned encodeNEONi16splat(unsigned Value)
float getFPImmFloat(unsigned Imm)
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO, unsigned IdxMode=0)
unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset)
getAM5Opc - This function encodes the addrmode5 opc field.
ShiftOpc getSORegShOp(unsigned Op)
bool isNEONi16splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getAM5FP16Opc(AddrOpc Opc, unsigned char Offset)
getAM5FP16Opc - This function encodes the addrmode5fp16 opc field.
unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset, unsigned IdxMode=0)
getAM3Opc - This function encodes the addrmode3 opc field.
bool isNEONi32splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
unsigned encodeNEONi32splat(unsigned Value)
Encode NEON 32 bits Splat immediate for instructions like VBIC/VORR.
const StringRef getShiftOpcStr(ShiftOpc Op)
static const char * IFlagsToString(unsigned val)
bool getFPUFeatures(FPUKind FPUKind, std::vector< StringRef > &Features)
StringRef getArchName(ArchKind AK)
uint64_t parseArchExt(StringRef ArchExt)
ArchKind parseArch(StringRef Arch)
bool isVpred(OperandType op)
FPUKind parseFPU(StringRef FPU)
bool isCDECoproc(size_t Coproc, const MCSubtargetInfo &STI)
@ D16
Only 16 D registers.
constexpr bool any(E Val)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
std::optional< unsigned > attrTypeFromString(StringRef tag, TagNameMap tagNameMap)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool parseAssignmentExpression(StringRef Name, bool allow_redef, MCAsmParser &Parser, MCSymbol *&Symbol, const MCExpr *&Value)
Parse a value expression and return whether it can be assigned to a symbol with the given name.
@ CE
Windows NT (Windows on ARM)
Reg
All possible values of the reg field in the ModR/M byte.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
static const char * ARMVPTPredToString(ARMVCC::VPTCodes CC)
int popcount(T Value) noexcept
Count the number of set bits in a value.
Target & getTheThumbBETarget()
static unsigned ARMCondCodeFromString(StringRef CC)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
static bool isARMLowRegister(unsigned Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
auto reverse(ContainerTy &&C)
@ Never
Never set the bit.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
bool IsCPSRDead< MCInst >(const MCInst *Instr)
static bool isValidCoprocessorNumber(unsigned Num, const FeatureBitset &featureBits)
isValidCoprocessorNumber - decide whether an explicit coprocessor number is legal in generic instruct...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ MCAF_Code16
.code16 (X86) / .code 16 (ARM)
@ MCAF_Code32
.code32 (X86) / .code 32 (ARM)
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static unsigned ARMVectorCondCodeFromString(StringRef CC)
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
Target & getTheARMLETarget()
Target & getTheARMBETarget()
Target & getTheThumbLETarget()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
const FeatureBitset Features
This struct is a compact representation of a valid (non-zero power of two) alignment.
Holds functions to get, set or test bitfields.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...