68#define DEBUG_TYPE "asm-parser"
83enum class ImplicitItModeTy {
Always,
Never, ARMOnly, ThumbOnly };
86 "arm-implicit-it",
cl::init(ImplicitItModeTy::ARMOnly),
87 cl::desc(
"Allow conditional instructions outdside of an IT block"),
89 "Accept in both ISAs, emit implicit ITs in Thumb"),
91 "Warn in ARM, reject in Thumb"),
93 "Accept in ARM, reject in Thumb"),
94 clEnumValN(ImplicitItModeTy::ThumbOnly,
"thumb",
95 "Warn in ARM, emit implicit ITs in Thumb")));
100enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
102static inline unsigned extractITMaskBit(
unsigned Mask,
unsigned Position) {
109 return (Mask >> (5 - Position) & 1);
118 Locs PersonalityLocs;
119 Locs PersonalityIndexLocs;
120 Locs HandlerDataLocs;
126 bool hasFnStart()
const {
return !FnStartLocs.empty(); }
127 bool cantUnwind()
const {
return !CantUnwindLocs.empty(); }
128 bool hasHandlerData()
const {
return !HandlerDataLocs.empty(); }
130 bool hasPersonality()
const {
131 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
134 void recordFnStart(
SMLoc L) { FnStartLocs.push_back(L); }
135 void recordCantUnwind(
SMLoc L) { CantUnwindLocs.push_back(L); }
136 void recordPersonality(
SMLoc L) { PersonalityLocs.push_back(L); }
137 void recordHandlerData(
SMLoc L) { HandlerDataLocs.push_back(L); }
138 void recordPersonalityIndex(
SMLoc L) { PersonalityIndexLocs.push_back(L); }
140 void saveFPReg(
int Reg) { FPReg =
Reg; }
141 int getFPReg()
const {
return FPReg; }
143 void emitFnStartLocNotes()
const {
144 for (
const SMLoc &Loc : FnStartLocs)
145 Parser.
Note(Loc,
".fnstart was specified here");
148 void emitCantUnwindLocNotes()
const {
149 for (
const SMLoc &Loc : CantUnwindLocs)
150 Parser.
Note(Loc,
".cantunwind was specified here");
153 void emitHandlerDataLocNotes()
const {
154 for (
const SMLoc &Loc : HandlerDataLocs)
155 Parser.
Note(Loc,
".handlerdata was specified here");
158 void emitPersonalityLocNotes()
const {
160 PE = PersonalityLocs.end(),
161 PII = PersonalityIndexLocs.begin(),
162 PIE = PersonalityIndexLocs.end();
163 PI != PE || PII != PIE;) {
164 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
165 Parser.
Note(*PI++,
".personality was specified here");
166 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
167 Parser.
Note(*PII++,
".personalityindex was specified here");
170 "at the same location");
175 FnStartLocs = Locs();
176 CantUnwindLocs = Locs();
177 PersonalityLocs = Locs();
178 HandlerDataLocs = Locs();
179 PersonalityIndexLocs = Locs();
185class ARMMnemonicSets {
196 return CDE.
count(Mnemonic);
201 bool isVPTPredicableCDEInstr(
StringRef Mnemonic) {
204 return CDEWithVPTSuffix.
count(Mnemonic);
209 bool isITPredicableCDEInstr(
StringRef Mnemonic) {
219 bool isCDEDualRegInstr(
StringRef Mnemonic) {
222 return Mnemonic ==
"cx1d" || Mnemonic ==
"cx1da" ||
223 Mnemonic ==
"cx2d" || Mnemonic ==
"cx2da" ||
224 Mnemonic ==
"cx3d" || Mnemonic ==
"cx3da";
229 for (
StringRef Mnemonic: {
"cx1",
"cx1a",
"cx1d",
"cx1da",
230 "cx2",
"cx2a",
"cx2d",
"cx2da",
231 "cx3",
"cx3a",
"cx3d",
"cx3da", })
234 {
"vcx1",
"vcx1a",
"vcx2",
"vcx2a",
"vcx3",
"vcx3a"}) {
236 CDEWithVPTSuffix.
insert(Mnemonic);
237 CDEWithVPTSuffix.
insert(std::string(Mnemonic) +
"t");
238 CDEWithVPTSuffix.
insert(std::string(Mnemonic) +
"e");
249 "do not have a target streamer");
257 bool NextSymbolIsThumb;
259 bool useImplicitITThumb()
const {
260 return ImplicitItMode == ImplicitItModeTy::Always ||
261 ImplicitItMode == ImplicitItModeTy::ThumbOnly;
264 bool useImplicitITARM()
const {
265 return ImplicitItMode == ImplicitItModeTy::Always ||
266 ImplicitItMode == ImplicitItModeTy::ARMOnly;
281 unsigned CurPosition;
297 if (!inImplicitITBlock()) {
311 for (
const MCInst &Inst : PendingConditionalInsts) {
314 PendingConditionalInsts.clear();
318 ITState.CurPosition = ~0
U;
321 bool inITBlock() {
return ITState.CurPosition != ~0
U; }
322 bool inExplicitITBlock() {
return inITBlock() && ITState.IsExplicit; }
323 bool inImplicitITBlock() {
return inITBlock() && !ITState.IsExplicit; }
325 bool lastInITBlock() {
329 void forwardITPosition() {
330 if (!inITBlock())
return;
335 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
336 ITState.CurPosition = ~0
U;
340 void rewindImplicitITPosition() {
341 assert(inImplicitITBlock());
342 assert(ITState.CurPosition > 1);
343 ITState.CurPosition--;
345 unsigned NewMask = 0;
346 NewMask |= ITState.Mask & (0xC << TZ);
347 NewMask |= 0x2 << TZ;
348 ITState.Mask = NewMask;
353 void discardImplicitITBlock() {
354 assert(inImplicitITBlock());
355 assert(ITState.CurPosition == 1);
356 ITState.CurPosition = ~0
U;
360 unsigned getDRegFromQReg(
unsigned QReg)
const {
361 return MRI->getSubReg(QReg, ARM::dsub_0);
366 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
372 void invertCurrentITCondition() {
373 if (ITState.CurPosition == 1) {
376 ITState.Mask ^= 1 << (5 - ITState.CurPosition);
381 bool isITBlockFull() {
382 return inITBlock() && (ITState.Mask & 1);
388 assert(inImplicitITBlock());
393 unsigned NewMask = 0;
395 NewMask |= ITState.Mask & (0xE << TZ);
397 NewMask |= (
Cond != ITState.Cond) << TZ;
399 NewMask |= 1 << (TZ - 1);
400 ITState.Mask = NewMask;
404 void startImplicitITBlock() {
408 ITState.CurPosition = 1;
409 ITState.IsExplicit =
false;
420 ITState.CurPosition = 0;
421 ITState.IsExplicit =
true;
426 unsigned CurPosition;
428 bool inVPTBlock() {
return VPTState.CurPosition != ~0
U; }
429 void forwardVPTPosition() {
430 if (!inVPTBlock())
return;
432 if (++VPTState.CurPosition == 5 - TZ)
433 VPTState.CurPosition = ~0
U;
449 unsigned ListNo,
bool IsARPop =
false);
456 bool parseRegisterList(
OperandVector &,
bool EnforceOrder =
true,
457 bool AllowRAAC =
false,
458 bool AllowOutOfBoundReg =
false);
461 bool parseImmExpr(int64_t &Out);
464 unsigned &ShiftAmount);
465 bool parseLiteralValues(
unsigned Size,
SMLoc L);
466 bool parseDirectiveThumb(
SMLoc L);
467 bool parseDirectiveARM(
SMLoc L);
468 bool parseDirectiveThumbFunc(
SMLoc L);
469 bool parseDirectiveCode(
SMLoc L);
470 bool parseDirectiveSyntax(
SMLoc L);
472 bool parseDirectiveUnreq(
SMLoc L);
473 bool parseDirectiveArch(
SMLoc L);
474 bool parseDirectiveEabiAttr(
SMLoc L);
475 bool parseDirectiveCPU(
SMLoc L);
476 bool parseDirectiveFPU(
SMLoc L);
477 bool parseDirectiveFnStart(
SMLoc L);
478 bool parseDirectiveFnEnd(
SMLoc L);
479 bool parseDirectiveCantUnwind(
SMLoc L);
480 bool parseDirectivePersonality(
SMLoc L);
481 bool parseDirectiveHandlerData(
SMLoc L);
482 bool parseDirectiveSetFP(
SMLoc L);
483 bool parseDirectivePad(
SMLoc L);
484 bool parseDirectiveRegSave(
SMLoc L,
bool IsVector);
485 bool parseDirectiveInst(
SMLoc L,
char Suffix =
'\0');
486 bool parseDirectiveLtorg(
SMLoc L);
487 bool parseDirectiveEven(
SMLoc L);
488 bool parseDirectivePersonalityIndex(
SMLoc L);
489 bool parseDirectiveUnwindRaw(
SMLoc L);
490 bool parseDirectiveTLSDescSeq(
SMLoc L);
491 bool parseDirectiveMovSP(
SMLoc L);
492 bool parseDirectiveObjectArch(
SMLoc L);
493 bool parseDirectiveArchExtension(
SMLoc L);
494 bool parseDirectiveAlign(
SMLoc L);
495 bool parseDirectiveThumbSet(
SMLoc L);
497 bool parseDirectiveSEHAllocStack(
SMLoc L,
bool Wide);
498 bool parseDirectiveSEHSaveRegs(
SMLoc L,
bool Wide);
499 bool parseDirectiveSEHSaveSP(
SMLoc L);
500 bool parseDirectiveSEHSaveFRegs(
SMLoc L);
501 bool parseDirectiveSEHSaveLR(
SMLoc L);
502 bool parseDirectiveSEHPrologEnd(
SMLoc L,
bool Fragment);
503 bool parseDirectiveSEHNop(
SMLoc L,
bool Wide);
504 bool parseDirectiveSEHEpilogStart(
SMLoc L,
bool Condition);
505 bool parseDirectiveSEHEpilogEnd(
SMLoc L);
506 bool parseDirectiveSEHCustom(
SMLoc L);
512 bool &CarrySetting,
unsigned &ProcessorIMod,
515 StringRef FullInst,
bool &CanAcceptCarrySet,
516 bool &CanAcceptPredicationCode,
517 bool &CanAcceptVPTPredicationCode);
520 void tryConvertingToTwoOperandForm(
StringRef Mnemonic,
bool CarrySetting,
529 bool isThumbOne()
const {
533 bool isThumbTwo()
const {
537 bool hasThumb()
const {
541 bool hasThumb2()
const {
545 bool hasV6Ops()
const {
549 bool hasV6T2Ops()
const {
553 bool hasV6MOps()
const {
557 bool hasV7Ops()
const {
561 bool hasV8Ops()
const {
565 bool hasV8MBaseline()
const {
569 bool hasV8MMainline()
const {
572 bool hasV8_1MMainline()
const {
575 bool hasMVE()
const {
578 bool hasMVEFloat()
const {
581 bool hasCDE()
const {
584 bool has8MSecExt()
const {
588 bool hasARM()
const {
592 bool hasDSP()
const {
596 bool hasD32()
const {
600 bool hasV8_1aOps()
const {
604 bool hasRAS()
const {
610 auto FB = ComputeAvailableFeatures(STI.
ToggleFeature(ARM::ModeThumb));
614 void FixModeAfterArchChange(
bool WasThumb,
SMLoc Loc);
616 bool isMClass()
const {
623#define GET_ASSEMBLER_HEADER
624#include "ARMGenAsmMatcher.inc"
640 return parsePKHImm(O,
"lsl", 0, 31);
643 return parsePKHImm(O,
"asr", 1, 32);
667 bool isITBlockTerminator(
MCInst &Inst)
const;
670 bool Load,
bool ARMMode,
bool Writeback);
673 enum ARMMatchResultTy {
675 Match_RequiresNotITBlock,
677 Match_RequiresThumb2,
679 Match_RequiresFlagSetting,
680#define GET_OPERAND_DIAGNOSTIC_TYPES
681#include "ARMGenAsmMatcher.inc"
698 getTargetStreamer().emitTargetAttributes(STI);
701 ITState.CurPosition = ~0
U;
703 VPTState.CurPosition = ~0
U;
705 NextSymbolIsThumb =
false;
711 SMLoc &EndLoc)
override;
717 unsigned Kind)
override;
723 bool MatchingInlineAsm)
override;
726 bool MatchingInlineAsm,
bool &EmitInITBlock,
729 struct NearMissMessage {
734 const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
763 k_InstSyncBarrierOpt,
764 k_TraceSyncBarrierOpt,
773 k_RegisterListWithAPSR,
776 k_FPSRegisterListWithVPR,
777 k_FPDRegisterListWithVPR,
779 k_VectorListAllLanes,
786 k_ConstantPoolImmediate,
787 k_BitfieldDescriptor,
791 SMLoc StartLoc, EndLoc, AlignmentLoc;
806 struct CoprocOptionOp {
848 struct VectorListOp {
855 struct VectorIndexOp {
869 unsigned OffsetRegNum;
874 unsigned isNegative : 1;
877 struct PostIdxRegOp {
884 struct ShifterImmOp {
889 struct RegShiftedRegOp {
896 struct RegShiftedImmOp {
920 struct CoprocOptionOp CoprocOption;
921 struct MBOptOp MBOpt;
922 struct ISBOptOp ISBOpt;
923 struct TSBOptOp TSBOpt;
924 struct ITMaskOp ITMask;
926 struct MMaskOp MMask;
927 struct BankedRegOp BankedReg;
930 struct VectorListOp VectorList;
931 struct VectorIndexOp VectorIndex;
934 struct PostIdxRegOp PostIdxReg;
935 struct ShifterImmOp ShifterImm;
936 struct RegShiftedRegOp RegShiftedReg;
937 struct RegShiftedImmOp RegShiftedImm;
938 struct RotImmOp RotImm;
939 struct ModImmOp ModImm;
944 ARMOperand(KindTy K) :
Kind(
K) {}
957 SMLoc getAlignmentLoc()
const {
958 assert(Kind == k_Memory &&
"Invalid access!");
963 assert(Kind == k_CondCode &&
"Invalid access!");
968 assert(isVPTPred() &&
"Invalid access!");
972 unsigned getCoproc()
const {
973 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) &&
"Invalid access!");
978 assert(Kind == k_Token &&
"Invalid access!");
982 unsigned getReg()
const override {
983 assert((Kind == k_Register || Kind == k_CCOut) &&
"Invalid access!");
988 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
989 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
990 Kind == k_FPSRegisterListWithVPR ||
991 Kind == k_FPDRegisterListWithVPR) &&
996 const MCExpr *getImm()
const {
1001 const MCExpr *getConstantPoolImm()
const {
1002 assert(isConstantPoolImm() &&
"Invalid access!");
1006 unsigned getVectorIndex()
const {
1007 assert(Kind == k_VectorIndex &&
"Invalid access!");
1008 return VectorIndex.Val;
1012 assert(Kind == k_MemBarrierOpt &&
"Invalid access!");
1017 assert(Kind == k_InstSyncBarrierOpt &&
"Invalid access!");
1022 assert(Kind == k_TraceSyncBarrierOpt &&
"Invalid access!");
1027 assert(Kind == k_ProcIFlags &&
"Invalid access!");
1031 unsigned getMSRMask()
const {
1032 assert(Kind == k_MSRMask &&
"Invalid access!");
1036 unsigned getBankedReg()
const {
1037 assert(Kind == k_BankedReg &&
"Invalid access!");
1038 return BankedReg.Val;
1041 bool isCoprocNum()
const {
return Kind == k_CoprocNum; }
1042 bool isCoprocReg()
const {
return Kind == k_CoprocReg; }
1043 bool isCoprocOption()
const {
return Kind == k_CoprocOption; }
1044 bool isCondCode()
const {
return Kind == k_CondCode; }
1045 bool isVPTPred()
const {
return Kind == k_VPTPred; }
1046 bool isCCOut()
const {
return Kind == k_CCOut; }
1047 bool isITMask()
const {
return Kind == k_ITCondMask; }
1048 bool isITCondCode()
const {
return Kind == k_CondCode; }
1049 bool isImm()
const override {
1050 return Kind == k_Immediate;
1053 bool isARMBranchTarget()
const {
1054 if (!
isImm())
return false;
1056 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1057 return CE->getValue() % 4 == 0;
1062 bool isThumbBranchTarget()
const {
1063 if (!
isImm())
return false;
1065 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
1066 return CE->getValue() % 2 == 0;
1072 template<
unsigned w
idth,
unsigned scale>
1073 bool isUnsignedOffset()
const {
1074 if (!
isImm())
return false;
1075 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1077 int64_t Val =
CE->getValue();
1079 int64_t
Max =
Align * ((1LL << width) - 1);
1080 return ((Val %
Align) == 0) && (Val >= 0) && (Val <= Max);
1087 template<
unsigned w
idth,
unsigned scale>
1088 bool isSignedOffset()
const {
1089 if (!
isImm())
return false;
1090 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1092 int64_t Val =
CE->getValue();
1094 int64_t
Max =
Align * ((1LL << (width-1)) - 1);
1095 int64_t Min = -
Align * (1LL << (width-1));
1096 return ((Val %
Align) == 0) && (Val >= Min) && (Val <= Max);
1103 bool isLEOffset()
const {
1104 if (!
isImm())
return false;
1105 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1107 int64_t Val =
CE->getValue();
1108 return Val < 0 && Val >= -4094 && (Val & 1) == 0;
1117 bool isThumbMemPC()
const {
1120 if (isa<MCSymbolRefExpr>(
Imm.Val))
return true;
1122 if (!CE)
return false;
1123 Val =
CE->getValue();
1125 else if (isGPRMem()) {
1126 if(!
Memory.OffsetImm ||
Memory.OffsetRegNum)
return false;
1127 if(
Memory.BaseRegNum != ARM::PC)
return false;
1128 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
1129 Val =
CE->getValue();
1134 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1137 bool isFPImm()
const {
1138 if (!
isImm())
return false;
1140 if (!CE)
return false;
1145 template<
int64_t N,
int64_t M>
1146 bool isImmediate()
const {
1147 if (!
isImm())
return false;
1149 if (!CE)
return false;
1150 int64_t
Value =
CE->getValue();
1154 template<
int64_t N,
int64_t M>
1155 bool isImmediateS4()
const {
1156 if (!
isImm())
return false;
1158 if (!CE)
return false;
1159 int64_t
Value =
CE->getValue();
1162 template<
int64_t N,
int64_t M>
1163 bool isImmediateS2()
const {
1164 if (!
isImm())
return false;
1166 if (!CE)
return false;
1167 int64_t
Value =
CE->getValue();
1170 bool isFBits16()
const {
1171 return isImmediate<0, 17>();
1173 bool isFBits32()
const {
1174 return isImmediate<1, 33>();
1176 bool isImm8s4()
const {
1177 return isImmediateS4<-1020, 1020>();
1179 bool isImm7s4()
const {
1180 return isImmediateS4<-508, 508>();
1182 bool isImm7Shift0()
const {
1183 return isImmediate<-127, 127>();
1185 bool isImm7Shift1()
const {
1186 return isImmediateS2<-255, 255>();
1188 bool isImm7Shift2()
const {
1189 return isImmediateS4<-511, 511>();
1191 bool isImm7()
const {
1192 return isImmediate<-127, 127>();
1194 bool isImm0_1020s4()
const {
1195 return isImmediateS4<0, 1020>();
1197 bool isImm0_508s4()
const {
1198 return isImmediateS4<0, 508>();
1200 bool isImm0_508s4Neg()
const {
1201 if (!
isImm())
return false;
1203 if (!CE)
return false;
1204 int64_t
Value = -
CE->getValue();
1209 bool isImm0_4095Neg()
const {
1210 if (!
isImm())
return false;
1212 if (!CE)
return false;
1217 if ((
CE->getValue() >> 32) > 0)
return false;
1222 bool isImm0_7()
const {
1223 return isImmediate<0, 7>();
1226 bool isImm1_16()
const {
1227 return isImmediate<1, 16>();
1230 bool isImm1_32()
const {
1231 return isImmediate<1, 32>();
1234 bool isImm8_255()
const {
1235 return isImmediate<8, 255>();
1238 bool isImm0_255Expr()
const {
1246 int64_t
Value =
CE->getValue();
1247 return isUInt<8>(
Value);
1250 bool isImm256_65535Expr()
const {
1251 if (!
isImm())
return false;
1255 if (!CE)
return true;
1256 int64_t
Value =
CE->getValue();
1260 bool isImm0_65535Expr()
const {
1261 if (!
isImm())
return false;
1265 if (!CE)
return true;
1266 int64_t
Value =
CE->getValue();
1270 bool isImm24bit()
const {
1271 return isImmediate<0, 0xffffff + 1>();
1274 bool isImmThumbSR()
const {
1275 return isImmediate<1, 33>();
1278 bool isPKHLSLImm()
const {
1279 return isImmediate<0, 32>();
1282 bool isPKHASRImm()
const {
1283 return isImmediate<0, 33>();
1286 bool isAdrLabel()
const {
1289 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1293 if (!
isImm())
return false;
1295 if (!CE)
return false;
1296 int64_t
Value =
CE->getValue();
1301 bool isT2SOImm()
const {
1304 if (
isImm() && !isa<MCConstantExpr>(getImm())) {
1307 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1311 if (!
isImm())
return false;
1313 if (!CE)
return false;
1314 int64_t
Value =
CE->getValue();
1318 bool isT2SOImmNot()
const {
1319 if (!
isImm())
return false;
1321 if (!CE)
return false;
1322 int64_t
Value =
CE->getValue();
1327 bool isT2SOImmNeg()
const {
1328 if (!
isImm())
return false;
1330 if (!CE)
return false;
1331 int64_t
Value =
CE->getValue();
1337 bool isSetEndImm()
const {
1338 if (!
isImm())
return false;
1340 if (!CE)
return false;
1341 int64_t
Value =
CE->getValue();
1345 bool isReg()
const override {
return Kind == k_Register; }
1346 bool isRegList()
const {
return Kind == k_RegisterList; }
1347 bool isRegListWithAPSR()
const {
1348 return Kind == k_RegisterListWithAPSR ||
Kind == k_RegisterList;
1350 bool isDPRRegList()
const {
return Kind == k_DPRRegisterList; }
1351 bool isSPRRegList()
const {
return Kind == k_SPRRegisterList; }
1352 bool isFPSRegListWithVPR()
const {
return Kind == k_FPSRegisterListWithVPR; }
1353 bool isFPDRegListWithVPR()
const {
return Kind == k_FPDRegisterListWithVPR; }
1354 bool isToken()
const override {
return Kind == k_Token; }
1355 bool isMemBarrierOpt()
const {
return Kind == k_MemBarrierOpt; }
1356 bool isInstSyncBarrierOpt()
const {
return Kind == k_InstSyncBarrierOpt; }
1357 bool isTraceSyncBarrierOpt()
const {
return Kind == k_TraceSyncBarrierOpt; }
1358 bool isMem()
const override {
1359 return isGPRMem() || isMVEMem();
1361 bool isMVEMem()
const {
1362 if (Kind != k_Memory)
1365 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.BaseRegNum) &&
1366 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
Memory.BaseRegNum))
1368 if (
Memory.OffsetRegNum &&
1369 !ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
1374 bool isGPRMem()
const {
1375 if (Kind != k_Memory)
1378 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.BaseRegNum))
1380 if (
Memory.OffsetRegNum &&
1381 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
Memory.OffsetRegNum))
1385 bool isShifterImm()
const {
return Kind == k_ShifterImmediate; }
1386 bool isRegShiftedReg()
const {
1387 return Kind == k_ShiftedRegister &&
1388 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1389 RegShiftedReg.SrcReg) &&
1390 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1391 RegShiftedReg.ShiftReg);
1393 bool isRegShiftedImm()
const {
1394 return Kind == k_ShiftedImmediate &&
1395 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1396 RegShiftedImm.SrcReg);
1398 bool isRotImm()
const {
return Kind == k_RotateImmediate; }
1400 template<
unsigned Min,
unsigned Max>
1401 bool isPowerTwoInRange()
const {
1402 if (!
isImm())
return false;
1404 if (!CE)
return false;
1405 int64_t
Value =
CE->getValue();
1409 bool isModImm()
const {
return Kind == k_ModifiedImmediate; }
1411 bool isModImmNot()
const {
1412 if (!
isImm())
return false;
1414 if (!CE)
return false;
1415 int64_t
Value =
CE->getValue();
1419 bool isModImmNeg()
const {
1420 if (!
isImm())
return false;
1422 if (!CE)
return false;
1423 int64_t
Value =
CE->getValue();
1428 bool isThumbModImmNeg1_7()
const {
1429 if (!
isImm())
return false;
1431 if (!CE)
return false;
1432 int32_t
Value = -(int32_t)
CE->getValue();
1436 bool isThumbModImmNeg8_255()
const {
1437 if (!
isImm())
return false;
1439 if (!CE)
return false;
1440 int32_t
Value = -(int32_t)
CE->getValue();
1444 bool isConstantPoolImm()
const {
return Kind == k_ConstantPoolImmediate; }
1445 bool isBitfield()
const {
return Kind == k_BitfieldDescriptor; }
1446 bool isPostIdxRegShifted()
const {
1447 return Kind == k_PostIndexRegister &&
1448 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1450 bool isPostIdxReg()
const {
1453 bool isMemNoOffset(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1457 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1458 (alignOK ||
Memory.Alignment == Alignment);
1460 bool isMemNoOffsetT2(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1464 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1469 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1470 (alignOK ||
Memory.Alignment == Alignment);
1472 bool isMemNoOffsetT2NoSp(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1476 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].
contains(
1481 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1482 (alignOK ||
Memory.Alignment == Alignment);
1484 bool isMemNoOffsetT(
bool alignOK =
false,
unsigned Alignment = 0)
const {
1488 if (!ARMMCRegisterClasses[ARM::tGPRRegClassID].
contains(
1493 return Memory.OffsetRegNum == 0 &&
Memory.OffsetImm ==
nullptr &&
1494 (alignOK ||
Memory.Alignment == Alignment);
1496 bool isMemPCRelImm12()
const {
1497 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1500 if (
Memory.BaseRegNum != ARM::PC)
1503 if (!
Memory.OffsetImm)
return true;
1504 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1505 int64_t Val =
CE->getValue();
1506 return (Val > -4096 && Val < 4096) ||
1507 (Val == std::numeric_limits<int32_t>::min());
1512 bool isAlignedMemory()
const {
1513 return isMemNoOffset(
true);
1516 bool isAlignedMemoryNone()
const {
1517 return isMemNoOffset(
false, 0);
1520 bool isDupAlignedMemoryNone()
const {
1521 return isMemNoOffset(
false, 0);
1524 bool isAlignedMemory16()
const {
1525 if (isMemNoOffset(
false, 2))
1527 return isMemNoOffset(
false, 0);
1530 bool isDupAlignedMemory16()
const {
1531 if (isMemNoOffset(
false, 2))
1533 return isMemNoOffset(
false, 0);
1536 bool isAlignedMemory32()
const {
1537 if (isMemNoOffset(
false, 4))
1539 return isMemNoOffset(
false, 0);
1542 bool isDupAlignedMemory32()
const {
1543 if (isMemNoOffset(
false, 4))
1545 return isMemNoOffset(
false, 0);
1548 bool isAlignedMemory64()
const {
1549 if (isMemNoOffset(
false, 8))
1551 return isMemNoOffset(
false, 0);
1554 bool isDupAlignedMemory64()
const {
1555 if (isMemNoOffset(
false, 8))
1557 return isMemNoOffset(
false, 0);
1560 bool isAlignedMemory64or128()
const {
1561 if (isMemNoOffset(
false, 8))
1563 if (isMemNoOffset(
false, 16))
1565 return isMemNoOffset(
false, 0);
1568 bool isDupAlignedMemory64or128()
const {
1569 if (isMemNoOffset(
false, 8))
1571 if (isMemNoOffset(
false, 16))
1573 return isMemNoOffset(
false, 0);
1576 bool isAlignedMemory64or128or256()
const {
1577 if (isMemNoOffset(
false, 8))
1579 if (isMemNoOffset(
false, 16))
1581 if (isMemNoOffset(
false, 32))
1583 return isMemNoOffset(
false, 0);
1586 bool isAddrMode2()
const {
1587 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1589 if (
Memory.OffsetRegNum)
return true;
1591 if (!
Memory.OffsetImm)
return true;
1592 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1593 int64_t Val =
CE->getValue();
1594 return Val > -4096 && Val < 4096;
1599 bool isAM2OffsetImm()
const {
1600 if (!
isImm())
return false;
1603 if (!CE)
return false;
1604 int64_t Val =
CE->getValue();
1605 return (Val == std::numeric_limits<int32_t>::min()) ||
1606 (Val > -4096 && Val < 4096);
1609 bool isAddrMode3()
const {
1613 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1615 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1619 if (
Memory.OffsetRegNum)
return true;
1621 if (!
Memory.OffsetImm)
return true;
1622 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1623 int64_t Val =
CE->getValue();
1626 return (Val > -256 && Val < 256) ||
1627 Val == std::numeric_limits<int32_t>::min();
1632 bool isAM3Offset()
const {
1639 if (!CE)
return false;
1640 int64_t Val =
CE->getValue();
1642 return (Val > -256 && Val < 256) ||
1643 Val == std::numeric_limits<int32_t>::min();
1646 bool isAddrMode5()
const {
1650 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1652 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1654 if (
Memory.OffsetRegNum)
return false;
1656 if (!
Memory.OffsetImm)
return true;
1657 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1658 int64_t Val =
CE->getValue();
1659 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1660 Val == std::numeric_limits<int32_t>::min();
1665 bool isAddrMode5FP16()
const {
1669 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1671 if (!isGPRMem() ||
Memory.Alignment != 0)
return false;
1673 if (
Memory.OffsetRegNum)
return false;
1675 if (!
Memory.OffsetImm)
return true;
1676 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1677 int64_t Val =
CE->getValue();
1678 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1679 Val == std::numeric_limits<int32_t>::min();
1684 bool isMemTBB()
const {
1685 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1691 bool isMemTBH()
const {
1692 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1699 bool isMemRegOffset()
const {
1700 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.Alignment != 0)
1705 bool isT2MemRegOffset()
const {
1706 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1717 bool isMemThumbRR()
const {
1720 if (!isGPRMem() || !
Memory.OffsetRegNum ||
Memory.isNegative ||
1727 bool isMemThumbRIs4()
const {
1728 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1732 if (!
Memory.OffsetImm)
return true;
1733 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1734 int64_t Val =
CE->getValue();
1735 return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1740 bool isMemThumbRIs2()
const {
1741 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1745 if (!
Memory.OffsetImm)
return true;
1746 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1747 int64_t Val =
CE->getValue();
1748 return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1753 bool isMemThumbRIs1()
const {
1754 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1758 if (!
Memory.OffsetImm)
return true;
1759 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1760 int64_t Val =
CE->getValue();
1761 return Val >= 0 && Val <= 31;
1766 bool isMemThumbSPI()
const {
1767 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
1771 if (!
Memory.OffsetImm)
return true;
1772 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1773 int64_t Val =
CE->getValue();
1774 return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1779 bool isMemImm8s4Offset()
const {
1783 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1785 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1788 if (!
Memory.OffsetImm)
return true;
1789 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1790 int64_t Val =
CE->getValue();
1792 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1793 Val == std::numeric_limits<int32_t>::min();
1798 bool isMemImm7s4Offset()
const {
1802 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1804 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0 ||
1805 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1809 if (!
Memory.OffsetImm)
return true;
1810 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1811 int64_t Val =
CE->getValue();
1813 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1818 bool isMemImm0_1020s4Offset()
const {
1819 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1822 if (!
Memory.OffsetImm)
return true;
1823 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1824 int64_t Val =
CE->getValue();
1825 return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1830 bool isMemImm8Offset()
const {
1831 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1834 if (
Memory.BaseRegNum == ARM::PC)
return false;
1836 if (!
Memory.OffsetImm)
return true;
1837 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1838 int64_t Val =
CE->getValue();
1839 return (Val == std::numeric_limits<int32_t>::min()) ||
1840 (Val > -256 && Val < 256);
1845 template<
unsigned Bits,
unsigned RegClassID>
1846 bool isMemImm7ShiftedOffset()
const {
1847 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0 ||
1848 !ARMMCRegisterClasses[RegClassID].contains(
Memory.BaseRegNum))
1854 if (!
Memory.OffsetImm)
return true;
1855 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1856 int64_t Val =
CE->getValue();
1860 if (Val == INT32_MIN)
1863 unsigned Divisor = 1U <<
Bits;
1866 if (Val % Divisor != 0)
1871 return (Val >= -127 && Val <= 127);
1876 template <
int shift>
bool isMemRegRQOffset()
const {
1877 if (!isMVEMem() ||
Memory.OffsetImm !=
nullptr ||
Memory.Alignment != 0)
1880 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].
contains(
1883 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1897 template <
int shift>
bool isMemRegQOffset()
const {
1898 if (!isMVEMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1901 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
1907 static_assert(shift < 56,
1908 "Such that we dont shift by a value higher than 62");
1909 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1910 int64_t Val =
CE->getValue();
1913 if ((Val & ((1U << shift) - 1)) != 0)
1919 int64_t
Range = (1U << (7 + shift)) - 1;
1920 return (Val == INT32_MIN) || (Val > -
Range && Val <
Range);
1925 bool isMemPosImm8Offset()
const {
1926 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1929 if (!
Memory.OffsetImm)
return true;
1930 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1931 int64_t Val =
CE->getValue();
1932 return Val >= 0 && Val < 256;
1937 bool isMemNegImm8Offset()
const {
1938 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1941 if (
Memory.BaseRegNum == ARM::PC)
return false;
1943 if (!
Memory.OffsetImm)
return false;
1944 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1945 int64_t Val =
CE->getValue();
1946 return (Val == std::numeric_limits<int32_t>::min()) ||
1947 (Val > -256 && Val < 0);
1952 bool isMemUImm12Offset()
const {
1953 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1956 if (!
Memory.OffsetImm)
return true;
1957 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1958 int64_t Val =
CE->getValue();
1959 return (Val >= 0 && Val < 4096);
1964 bool isMemImm12Offset()
const {
1969 if (
isImm() && !isa<MCConstantExpr>(getImm()))
1972 if (!isGPRMem() ||
Memory.OffsetRegNum != 0 ||
Memory.Alignment != 0)
1975 if (!
Memory.OffsetImm)
return true;
1976 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
1977 int64_t Val =
CE->getValue();
1978 return (Val > -4096 && Val < 4096) ||
1979 (Val == std::numeric_limits<int32_t>::min());
1986 bool isConstPoolAsmImm()
const {
1989 return (isConstantPoolImm());
1992 bool isPostIdxImm8()
const {
1993 if (!
isImm())
return false;
1995 if (!CE)
return false;
1996 int64_t Val =
CE->getValue();
1997 return (Val > -256 && Val < 256) ||
1998 (Val == std::numeric_limits<int32_t>::min());
2001 bool isPostIdxImm8s4()
const {
2002 if (!
isImm())
return false;
2004 if (!CE)
return false;
2005 int64_t Val =
CE->getValue();
2006 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
2007 (Val == std::numeric_limits<int32_t>::min());
2010 bool isMSRMask()
const {
return Kind == k_MSRMask; }
2011 bool isBankedReg()
const {
return Kind == k_BankedReg; }
2012 bool isProcIFlags()
const {
return Kind == k_ProcIFlags; }
2015 bool isSingleSpacedVectorList()
const {
2016 return Kind == k_VectorList && !VectorList.isDoubleSpaced;
2019 bool isDoubleSpacedVectorList()
const {
2020 return Kind == k_VectorList && VectorList.isDoubleSpaced;
2023 bool isVecListOneD()
const {
2024 if (!isSingleSpacedVectorList())
return false;
2025 return VectorList.Count == 1;
2028 bool isVecListTwoMQ()
const {
2029 return isSingleSpacedVectorList() && VectorList.Count == 2 &&
2030 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2034 bool isVecListDPair()
const {
2035 if (!isSingleSpacedVectorList())
return false;
2036 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2040 bool isVecListThreeD()
const {
2041 if (!isSingleSpacedVectorList())
return false;
2042 return VectorList.Count == 3;
2045 bool isVecListFourD()
const {
2046 if (!isSingleSpacedVectorList())
return false;
2047 return VectorList.Count == 4;
2050 bool isVecListDPairSpaced()
const {
2051 if (Kind != k_VectorList)
return false;
2052 if (isSingleSpacedVectorList())
return false;
2053 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
2057 bool isVecListThreeQ()
const {
2058 if (!isDoubleSpacedVectorList())
return false;
2059 return VectorList.Count == 3;
2062 bool isVecListFourQ()
const {
2063 if (!isDoubleSpacedVectorList())
return false;
2064 return VectorList.Count == 4;
2067 bool isVecListFourMQ()
const {
2068 return isSingleSpacedVectorList() && VectorList.Count == 4 &&
2069 ARMMCRegisterClasses[ARM::MQPRRegClassID].contains(
2073 bool isSingleSpacedVectorAllLanes()
const {
2074 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
2077 bool isDoubleSpacedVectorAllLanes()
const {
2078 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
2081 bool isVecListOneDAllLanes()
const {
2082 if (!isSingleSpacedVectorAllLanes())
return false;
2083 return VectorList.Count == 1;
2086 bool isVecListDPairAllLanes()
const {
2087 if (!isSingleSpacedVectorAllLanes())
return false;
2088 return (ARMMCRegisterClasses[ARM::DPairRegClassID]
2092 bool isVecListDPairSpacedAllLanes()
const {
2093 if (!isDoubleSpacedVectorAllLanes())
return false;
2094 return VectorList.Count == 2;
2097 bool isVecListThreeDAllLanes()
const {
2098 if (!isSingleSpacedVectorAllLanes())
return false;
2099 return VectorList.Count == 3;
2102 bool isVecListThreeQAllLanes()
const {
2103 if (!isDoubleSpacedVectorAllLanes())
return false;
2104 return VectorList.Count == 3;
2107 bool isVecListFourDAllLanes()
const {
2108 if (!isSingleSpacedVectorAllLanes())
return false;
2109 return VectorList.Count == 4;
2112 bool isVecListFourQAllLanes()
const {
2113 if (!isDoubleSpacedVectorAllLanes())
return false;
2114 return VectorList.Count == 4;
2117 bool isSingleSpacedVectorIndexed()
const {
2118 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
2121 bool isDoubleSpacedVectorIndexed()
const {
2122 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
2125 bool isVecListOneDByteIndexed()
const {
2126 if (!isSingleSpacedVectorIndexed())
return false;
2127 return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
2130 bool isVecListOneDHWordIndexed()
const {
2131 if (!isSingleSpacedVectorIndexed())
return false;
2132 return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
2135 bool isVecListOneDWordIndexed()
const {
2136 if (!isSingleSpacedVectorIndexed())
return false;
2137 return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
2140 bool isVecListTwoDByteIndexed()
const {
2141 if (!isSingleSpacedVectorIndexed())
return false;
2142 return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
2145 bool isVecListTwoDHWordIndexed()
const {
2146 if (!isSingleSpacedVectorIndexed())
return false;
2147 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2150 bool isVecListTwoQWordIndexed()
const {
2151 if (!isDoubleSpacedVectorIndexed())
return false;
2152 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2155 bool isVecListTwoQHWordIndexed()
const {
2156 if (!isDoubleSpacedVectorIndexed())
return false;
2157 return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
2160 bool isVecListTwoDWordIndexed()
const {
2161 if (!isSingleSpacedVectorIndexed())
return false;
2162 return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
2165 bool isVecListThreeDByteIndexed()
const {
2166 if (!isSingleSpacedVectorIndexed())
return false;
2167 return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
2170 bool isVecListThreeDHWordIndexed()
const {
2171 if (!isSingleSpacedVectorIndexed())
return false;
2172 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2175 bool isVecListThreeQWordIndexed()
const {
2176 if (!isDoubleSpacedVectorIndexed())
return false;
2177 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2180 bool isVecListThreeQHWordIndexed()
const {
2181 if (!isDoubleSpacedVectorIndexed())
return false;
2182 return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
2185 bool isVecListThreeDWordIndexed()
const {
2186 if (!isSingleSpacedVectorIndexed())
return false;
2187 return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
2190 bool isVecListFourDByteIndexed()
const {
2191 if (!isSingleSpacedVectorIndexed())
return false;
2192 return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
2195 bool isVecListFourDHWordIndexed()
const {
2196 if (!isSingleSpacedVectorIndexed())
return false;
2197 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2200 bool isVecListFourQWordIndexed()
const {
2201 if (!isDoubleSpacedVectorIndexed())
return false;
2202 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2205 bool isVecListFourQHWordIndexed()
const {
2206 if (!isDoubleSpacedVectorIndexed())
return false;
2207 return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
2210 bool isVecListFourDWordIndexed()
const {
2211 if (!isSingleSpacedVectorIndexed())
return false;
2212 return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
2215 bool isVectorIndex()
const {
return Kind == k_VectorIndex; }
2217 template <
unsigned NumLanes>
2218 bool isVectorIndexInRange()
const {
2219 if (Kind != k_VectorIndex)
return false;
2220 return VectorIndex.Val < NumLanes;
2223 bool isVectorIndex8()
const {
return isVectorIndexInRange<8>(); }
2224 bool isVectorIndex16()
const {
return isVectorIndexInRange<4>(); }
2225 bool isVectorIndex32()
const {
return isVectorIndexInRange<2>(); }
2226 bool isVectorIndex64()
const {
return isVectorIndexInRange<1>(); }
2228 template<
int PermittedValue,
int OtherPermittedValue>
2229 bool isMVEPairVectorIndex()
const {
2230 if (Kind != k_VectorIndex)
return false;
2231 return VectorIndex.Val == PermittedValue ||
2232 VectorIndex.Val == OtherPermittedValue;
2235 bool isNEONi8splat()
const {
2236 if (!
isImm())
return false;
2239 if (!CE)
return false;
2240 int64_t
Value =
CE->getValue();
2247 if (isNEONByteReplicate(2))
2253 if (!CE)
return false;
2254 unsigned Value =
CE->getValue();
2258 bool isNEONi16splatNot()
const {
2263 if (!CE)
return false;
2264 unsigned Value =
CE->getValue();
2269 if (isNEONByteReplicate(4))
2275 if (!CE)
return false;
2276 unsigned Value =
CE->getValue();
2280 bool isNEONi32splatNot()
const {
2285 if (!CE)
return false;
2286 unsigned Value =
CE->getValue();
2290 static bool isValidNEONi32vmovImm(int64_t
Value) {
2293 return ((
Value & 0xffffffffffffff00) == 0) ||
2294 ((
Value & 0xffffffffffff00ff) == 0) ||
2295 ((
Value & 0xffffffffff00ffff) == 0) ||
2296 ((
Value & 0xffffffff00ffffff) == 0) ||
2297 ((
Value & 0xffffffffffff00ff) == 0xff) ||
2298 ((
Value & 0xffffffffff00ffff) == 0xffff);
2301 bool isNEONReplicate(
unsigned Width,
unsigned NumElems,
bool Inv)
const {
2302 assert((Width == 8 || Width == 16 || Width == 32) &&
2303 "Invalid element width");
2304 assert(NumElems * Width <= 64 &&
"Invalid result width");
2312 int64_t
Value =
CE->getValue();
2320 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2322 if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2325 for (
unsigned i = 1; i < NumElems; ++i) {
2327 if ((
Value & Mask) != Elem)
2333 bool isNEONByteReplicate(
unsigned NumBytes)
const {
2334 return isNEONReplicate(8, NumBytes,
false);
2337 static void checkNeonReplicateArgs(
unsigned FromW,
unsigned ToW) {
2338 assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2339 "Invalid source width");
2340 assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2341 "Invalid destination width");
2342 assert(FromW < ToW &&
"ToW is not less than FromW");
2345 template<
unsigned FromW,
unsigned ToW>
2346 bool isNEONmovReplicate()
const {
2347 checkNeonReplicateArgs(FromW, ToW);
2348 if (ToW == 64 && isNEONi64splat())
2350 return isNEONReplicate(FromW, ToW / FromW,
false);
2353 template<
unsigned FromW,
unsigned ToW>
2354 bool isNEONinvReplicate()
const {
2355 checkNeonReplicateArgs(FromW, ToW);
2356 return isNEONReplicate(FromW, ToW / FromW,
true);
2359 bool isNEONi32vmov()
const {
2360 if (isNEONByteReplicate(4))
2368 return isValidNEONi32vmovImm(
CE->getValue());
2371 bool isNEONi32vmovNeg()
const {
2372 if (!
isImm())
return false;
2375 if (!CE)
return false;
2376 return isValidNEONi32vmovImm(~
CE->getValue());
2379 bool isNEONi64splat()
const {
2380 if (!
isImm())
return false;
2383 if (!CE)
return false;
2386 for (
unsigned i = 0; i < 8; ++i, Value >>= 8)
2387 if ((
Value & 0xff) != 0 && (
Value & 0xff) != 0xff)
return false;
2391 template<
int64_t Angle,
int64_t Remainder>
2392 bool isComplexRotation()
const {
2393 if (!
isImm())
return false;
2396 if (!CE)
return false;
2399 return (
Value % Angle == Remainder &&
Value <= 270);
2402 bool isMVELongShift()
const {
2403 if (!
isImm())
return false;
2406 if (!CE)
return false;
2411 bool isMveSaturateOp()
const {
2412 if (!
isImm())
return false;
2414 if (!CE)
return false;
2419 bool isITCondCodeNoAL()
const {
2420 if (!isITCondCode())
return false;
2425 bool isITCondCodeRestrictedI()
const {
2426 if (!isITCondCode())
2432 bool isITCondCodeRestrictedS()
const {
2433 if (!isITCondCode())
2440 bool isITCondCodeRestrictedU()
const {
2441 if (!isITCondCode())
2447 bool isITCondCodeRestrictedFP()
const {
2448 if (!isITCondCode())
2459 else if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2465 void addARMBranchTargetOperands(
MCInst &Inst,
unsigned N)
const {
2466 assert(
N == 1 &&
"Invalid number of operands!");
2467 addExpr(Inst, getImm());
2470 void addThumbBranchTargetOperands(
MCInst &Inst,
unsigned N)
const {
2471 assert(
N == 1 &&
"Invalid number of operands!");
2472 addExpr(Inst, getImm());
2475 void addCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
2476 assert(
N == 2 &&
"Invalid number of operands!");
2482 void addVPTPredNOperands(
MCInst &Inst,
unsigned N)
const {
2483 assert(
N == 3 &&
"Invalid number of operands!");
2485 unsigned RegNum = getVPTPred() ==
ARMVCC::None ? 0: ARM::P0;
2490 void addVPTPredROperands(
MCInst &Inst,
unsigned N)
const {
2491 assert(
N == 4 &&
"Invalid number of operands!");
2492 addVPTPredNOperands(Inst,
N-1);
2502 "Inactive register in vpred_r is not tied to an output!");
2508 void addCoprocNumOperands(
MCInst &Inst,
unsigned N)
const {
2509 assert(
N == 1 &&
"Invalid number of operands!");
2513 void addCoprocRegOperands(
MCInst &Inst,
unsigned N)
const {
2514 assert(
N == 1 &&
"Invalid number of operands!");
2518 void addCoprocOptionOperands(
MCInst &Inst,
unsigned N)
const {
2519 assert(
N == 1 &&
"Invalid number of operands!");
2523 void addITMaskOperands(
MCInst &Inst,
unsigned N)
const {
2524 assert(
N == 1 &&
"Invalid number of operands!");
2528 void addITCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
2529 assert(
N == 1 &&
"Invalid number of operands!");
2533 void addITCondCodeInvOperands(
MCInst &Inst,
unsigned N)
const {
2534 assert(
N == 1 &&
"Invalid number of operands!");
2538 void addCCOutOperands(
MCInst &Inst,
unsigned N)
const {
2539 assert(
N == 1 &&
"Invalid number of operands!");
2543 void addRegOperands(
MCInst &Inst,
unsigned N)
const {
2544 assert(
N == 1 &&
"Invalid number of operands!");
2548 void addRegShiftedRegOperands(
MCInst &Inst,
unsigned N)
const {
2549 assert(
N == 3 &&
"Invalid number of operands!");
2550 assert(isRegShiftedReg() &&
2551 "addRegShiftedRegOperands() on non-RegShiftedReg!");
2558 void addRegShiftedImmOperands(
MCInst &Inst,
unsigned N)
const {
2559 assert(
N == 2 &&
"Invalid number of operands!");
2560 assert(isRegShiftedImm() &&
2561 "addRegShiftedImmOperands() on non-RegShiftedImm!");
2564 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2569 void addShifterImmOperands(
MCInst &Inst,
unsigned N)
const {
2570 assert(
N == 1 &&
"Invalid number of operands!");
2575 void addRegListOperands(
MCInst &Inst,
unsigned N)
const {
2576 assert(
N == 1 &&
"Invalid number of operands!");
2578 for (
unsigned Reg : RegList)
2582 void addRegListWithAPSROperands(
MCInst &Inst,
unsigned N)
const {
2583 assert(
N == 1 &&
"Invalid number of operands!");
2585 for (
unsigned Reg : RegList)
2589 void addDPRRegListOperands(
MCInst &Inst,
unsigned N)
const {
2590 addRegListOperands(Inst,
N);
2593 void addSPRRegListOperands(
MCInst &Inst,
unsigned N)
const {
2594 addRegListOperands(Inst,
N);
2597 void addFPSRegListWithVPROperands(
MCInst &Inst,
unsigned N)
const {
2598 addRegListOperands(Inst,
N);
2601 void addFPDRegListWithVPROperands(
MCInst &Inst,
unsigned N)
const {
2602 addRegListOperands(Inst,
N);
2605 void addRotImmOperands(
MCInst &Inst,
unsigned N)
const {
2606 assert(
N == 1 &&
"Invalid number of operands!");
2611 void addModImmOperands(
MCInst &Inst,
unsigned N)
const {
2612 assert(
N == 1 &&
"Invalid number of operands!");
2616 return addImmOperands(Inst,
N);
2621 void addModImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2622 assert(
N == 1 &&
"Invalid number of operands!");
2628 void addModImmNegOperands(
MCInst &Inst,
unsigned N)
const {
2629 assert(
N == 1 &&
"Invalid number of operands!");
2635 void addThumbModImmNeg8_255Operands(
MCInst &Inst,
unsigned N)
const {
2636 assert(
N == 1 &&
"Invalid number of operands!");
2642 void addThumbModImmNeg1_7Operands(
MCInst &Inst,
unsigned N)
const {
2643 assert(
N == 1 &&
"Invalid number of operands!");
2649 void addBitfieldOperands(
MCInst &Inst,
unsigned N)
const {
2650 assert(
N == 1 &&
"Invalid number of operands!");
2656 (32 - (lsb + width)));
2660 void addImmOperands(
MCInst &Inst,
unsigned N)
const {
2661 assert(
N == 1 &&
"Invalid number of operands!");
2662 addExpr(Inst, getImm());
2665 void addFBits16Operands(
MCInst &Inst,
unsigned N)
const {
2666 assert(
N == 1 &&
"Invalid number of operands!");
2671 void addFBits32Operands(
MCInst &Inst,
unsigned N)
const {
2672 assert(
N == 1 &&
"Invalid number of operands!");
2677 void addFPImmOperands(
MCInst &Inst,
unsigned N)
const {
2678 assert(
N == 1 &&
"Invalid number of operands!");
2684 void addImm8s4Operands(
MCInst &Inst,
unsigned N)
const {
2685 assert(
N == 1 &&
"Invalid number of operands!");
2692 void addImm7s4Operands(
MCInst &Inst,
unsigned N)
const {
2693 assert(
N == 1 &&
"Invalid number of operands!");
2700 void addImm7Shift0Operands(
MCInst &Inst,
unsigned N)
const {
2701 assert(
N == 1 &&
"Invalid number of operands!");
2706 void addImm7Shift1Operands(
MCInst &Inst,
unsigned N)
const {
2707 assert(
N == 1 &&
"Invalid number of operands!");
2712 void addImm7Shift2Operands(
MCInst &Inst,
unsigned N)
const {
2713 assert(
N == 1 &&
"Invalid number of operands!");
2718 void addImm7Operands(
MCInst &Inst,
unsigned N)
const {
2719 assert(
N == 1 &&
"Invalid number of operands!");
2724 void addImm0_1020s4Operands(
MCInst &Inst,
unsigned N)
const {
2725 assert(
N == 1 &&
"Invalid number of operands!");
2732 void addImm0_508s4NegOperands(
MCInst &Inst,
unsigned N)
const {
2733 assert(
N == 1 &&
"Invalid number of operands!");
2740 void addImm0_508s4Operands(
MCInst &Inst,
unsigned N)
const {
2741 assert(
N == 1 &&
"Invalid number of operands!");
2748 void addImm1_16Operands(
MCInst &Inst,
unsigned N)
const {
2749 assert(
N == 1 &&
"Invalid number of operands!");
2756 void addImm1_32Operands(
MCInst &Inst,
unsigned N)
const {
2757 assert(
N == 1 &&
"Invalid number of operands!");
2764 void addImmThumbSROperands(
MCInst &Inst,
unsigned N)
const {
2765 assert(
N == 1 &&
"Invalid number of operands!");
2769 unsigned Imm =
CE->getValue();
2773 void addPKHASRImmOperands(
MCInst &Inst,
unsigned N)
const {
2774 assert(
N == 1 &&
"Invalid number of operands!");
2778 int Val =
CE->getValue();
2782 void addT2SOImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2783 assert(
N == 1 &&
"Invalid number of operands!");
2790 void addT2SOImmNegOperands(
MCInst &Inst,
unsigned N)
const {
2791 assert(
N == 1 &&
"Invalid number of operands!");
2798 void addImm0_4095NegOperands(
MCInst &Inst,
unsigned N)
const {
2799 assert(
N == 1 &&
"Invalid number of operands!");
2806 void addUnsignedOffset_b8s2Operands(
MCInst &Inst,
unsigned N)
const {
2807 if(
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2815 void addThumbMemPCOperands(
MCInst &Inst,
unsigned N)
const {
2816 assert(
N == 1 &&
"Invalid number of operands!");
2828 assert(isGPRMem() &&
"Unknown value type!");
2829 assert(isa<MCConstantExpr>(
Memory.OffsetImm) &&
"Unknown value type!");
2830 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
2836 void addMemBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2837 assert(
N == 1 &&
"Invalid number of operands!");
2841 void addInstSyncBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2842 assert(
N == 1 &&
"Invalid number of operands!");
2846 void addTraceSyncBarrierOptOperands(
MCInst &Inst,
unsigned N)
const {
2847 assert(
N == 1 &&
"Invalid number of operands!");
2851 void addMemNoOffsetOperands(
MCInst &Inst,
unsigned N)
const {
2852 assert(
N == 1 &&
"Invalid number of operands!");
2856 void addMemNoOffsetT2Operands(
MCInst &Inst,
unsigned N)
const {
2857 assert(
N == 1 &&
"Invalid number of operands!");
2861 void addMemNoOffsetT2NoSpOperands(
MCInst &Inst,
unsigned N)
const {
2862 assert(
N == 1 &&
"Invalid number of operands!");
2866 void addMemNoOffsetTOperands(
MCInst &Inst,
unsigned N)
const {
2867 assert(
N == 1 &&
"Invalid number of operands!");
2871 void addMemPCRelImm12Operands(
MCInst &Inst,
unsigned N)
const {
2872 assert(
N == 1 &&
"Invalid number of operands!");
2873 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
2879 void addAdrLabelOperands(
MCInst &Inst,
unsigned N)
const {
2880 assert(
N == 1 &&
"Invalid number of operands!");
2885 if (!isa<MCConstantExpr>(getImm())) {
2891 int Val =
CE->getValue();
2895 void addAlignedMemoryOperands(
MCInst &Inst,
unsigned N)
const {
2896 assert(
N == 2 &&
"Invalid number of operands!");
2901 void addDupAlignedMemoryNoneOperands(
MCInst &Inst,
unsigned N)
const {
2902 addAlignedMemoryOperands(Inst,
N);
2905 void addAlignedMemoryNoneOperands(
MCInst &Inst,
unsigned N)
const {
2906 addAlignedMemoryOperands(Inst,
N);
2909 void addAlignedMemory16Operands(
MCInst &Inst,
unsigned N)
const {
2910 addAlignedMemoryOperands(Inst,
N);
2913 void addDupAlignedMemory16Operands(
MCInst &Inst,
unsigned N)
const {
2914 addAlignedMemoryOperands(Inst,
N);
2917 void addAlignedMemory32Operands(
MCInst &Inst,
unsigned N)
const {
2918 addAlignedMemoryOperands(Inst,
N);
2921 void addDupAlignedMemory32Operands(
MCInst &Inst,
unsigned N)
const {
2922 addAlignedMemoryOperands(Inst,
N);
2925 void addAlignedMemory64Operands(
MCInst &Inst,
unsigned N)
const {
2926 addAlignedMemoryOperands(Inst,
N);
2929 void addDupAlignedMemory64Operands(
MCInst &Inst,
unsigned N)
const {
2930 addAlignedMemoryOperands(Inst,
N);
2933 void addAlignedMemory64or128Operands(
MCInst &Inst,
unsigned N)
const {
2934 addAlignedMemoryOperands(Inst,
N);
2937 void addDupAlignedMemory64or128Operands(
MCInst &Inst,
unsigned N)
const {
2938 addAlignedMemoryOperands(Inst,
N);
2941 void addAlignedMemory64or128or256Operands(
MCInst &Inst,
unsigned N)
const {
2942 addAlignedMemoryOperands(Inst,
N);
2945 void addAddrMode2Operands(
MCInst &Inst,
unsigned N)
const {
2946 assert(
N == 3 &&
"Invalid number of operands!");
2949 if (!
Memory.OffsetRegNum) {
2952 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
2953 int32_t Val =
CE->getValue();
2956 if (Val == std::numeric_limits<int32_t>::min())
2974 void addAM2OffsetImmOperands(
MCInst &Inst,
unsigned N)
const {
2975 assert(
N == 2 &&
"Invalid number of operands!");
2977 assert(CE &&
"non-constant AM2OffsetImm operand!");
2978 int32_t Val =
CE->getValue();
2981 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2982 if (Val < 0) Val = -Val;
2988 void addAddrMode3Operands(
MCInst &Inst,
unsigned N)
const {
2989 assert(
N == 3 &&
"Invalid number of operands!");
3002 if (!
Memory.OffsetRegNum) {
3005 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3006 int32_t Val =
CE->getValue();
3009 if (Val == std::numeric_limits<int32_t>::min())
3026 void addAM3OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3027 assert(
N == 2 &&
"Invalid number of operands!");
3028 if (Kind == k_PostIndexRegister) {
3038 int32_t Val =
CE->getValue();
3041 if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
3042 if (Val < 0) Val = -Val;
3048 void addAddrMode5Operands(
MCInst &Inst,
unsigned N)
const {
3049 assert(
N == 2 &&
"Invalid number of operands!");
3062 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3064 int32_t Val =
CE->getValue() / 4;
3067 if (Val == std::numeric_limits<int32_t>::min())
3077 void addAddrMode5FP16Operands(
MCInst &Inst,
unsigned N)
const {
3078 assert(
N == 2 &&
"Invalid number of operands!");
3092 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm)) {
3093 int32_t Val =
CE->getValue() / 2;
3096 if (Val == std::numeric_limits<int32_t>::min())
3106 void addMemImm8s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3107 assert(
N == 2 &&
"Invalid number of operands!");
3118 addExpr(Inst,
Memory.OffsetImm);
3121 void addMemImm7s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3122 assert(
N == 2 &&
"Invalid number of operands!");
3133 addExpr(Inst,
Memory.OffsetImm);
3136 void addMemImm0_1020s4OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3137 assert(
N == 2 &&
"Invalid number of operands!");
3141 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3148 void addMemImmOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3149 assert(
N == 2 &&
"Invalid number of operands!");
3151 addExpr(Inst,
Memory.OffsetImm);
3154 void addMemRegRQOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3155 assert(
N == 2 &&
"Invalid number of operands!");
3160 void addMemUImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3161 assert(
N == 2 &&
"Invalid number of operands!");
3164 addExpr(Inst, getImm());
3171 addExpr(Inst,
Memory.OffsetImm);
3174 void addMemImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
3175 assert(
N == 2 &&
"Invalid number of operands!");
3178 addExpr(Inst, getImm());
3185 addExpr(Inst,
Memory.OffsetImm);
3188 void addConstPoolAsmImmOperands(
MCInst &Inst,
unsigned N)
const {
3189 assert(
N == 1 &&
"Invalid number of operands!");
3192 addExpr(Inst, getConstantPoolImm());
3195 void addMemTBBOperands(
MCInst &Inst,
unsigned N)
const {
3196 assert(
N == 2 &&
"Invalid number of operands!");
3201 void addMemTBHOperands(
MCInst &Inst,
unsigned N)
const {
3202 assert(
N == 2 &&
"Invalid number of operands!");
3207 void addMemRegOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3208 assert(
N == 3 &&
"Invalid number of operands!");
3217 void addT2MemRegOffsetOperands(
MCInst &Inst,
unsigned N)
const {
3218 assert(
N == 3 &&
"Invalid number of operands!");
3224 void addMemThumbRROperands(
MCInst &Inst,
unsigned N)
const {
3225 assert(
N == 2 &&
"Invalid number of operands!");
3230 void addMemThumbRIs4Operands(
MCInst &Inst,
unsigned N)
const {
3231 assert(
N == 2 &&
"Invalid number of operands!");
3235 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3242 void addMemThumbRIs2Operands(
MCInst &Inst,
unsigned N)
const {
3243 assert(
N == 2 &&
"Invalid number of operands!");
3247 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3253 void addMemThumbRIs1Operands(
MCInst &Inst,
unsigned N)
const {
3254 assert(
N == 2 &&
"Invalid number of operands!");
3256 addExpr(Inst,
Memory.OffsetImm);
3259 void addMemThumbSPIOperands(
MCInst &Inst,
unsigned N)
const {
3260 assert(
N == 2 &&
"Invalid number of operands!");
3264 else if (
const auto *CE = dyn_cast<MCConstantExpr>(
Memory.OffsetImm))
3271 void addPostIdxImm8Operands(
MCInst &Inst,
unsigned N)
const {
3272 assert(
N == 1 &&
"Invalid number of operands!");
3274 assert(CE &&
"non-constant post-idx-imm8 operand!");
3275 int Imm =
CE->getValue();
3276 bool isAdd =
Imm >= 0;
3277 if (Imm == std::numeric_limits<int32_t>::min())
Imm = 0;
3282 void addPostIdxImm8s4Operands(
MCInst &Inst,
unsigned N)
const {
3283 assert(
N == 1 &&
"Invalid number of operands!");
3285 assert(CE &&
"non-constant post-idx-imm8s4 operand!");
3286 int Imm =
CE->getValue();
3287 bool isAdd =
Imm >= 0;
3288 if (Imm == std::numeric_limits<int32_t>::min())
Imm = 0;
3294 void addPostIdxRegOperands(
MCInst &Inst,
unsigned N)
const {
3295 assert(
N == 2 &&
"Invalid number of operands!");
3300 void addPostIdxRegShiftedOperands(
MCInst &Inst,
unsigned N)
const {
3301 assert(
N == 2 &&
"Invalid number of operands!");
3307 PostIdxReg.ShiftTy);
3311 void addPowerTwoOperands(
MCInst &Inst,
unsigned N)
const {
3312 assert(
N == 1 &&
"Invalid number of operands!");
3317 void addMSRMaskOperands(
MCInst &Inst,
unsigned N)
const {
3318 assert(
N == 1 &&
"Invalid number of operands!");
3322 void addBankedRegOperands(
MCInst &Inst,
unsigned N)
const {
3323 assert(
N == 1 &&
"Invalid number of operands!");
3327 void addProcIFlagsOperands(
MCInst &Inst,
unsigned N)
const {
3328 assert(
N == 1 &&
"Invalid number of operands!");
3332 void addVecListOperands(
MCInst &Inst,
unsigned N)
const {
3333 assert(
N == 1 &&
"Invalid number of operands!");
3337 void addMVEVecListOperands(
MCInst &Inst,
unsigned N)
const {
3338 assert(
N == 1 &&
"Invalid number of operands!");
3354 const MCRegisterClass *RC_in = &ARMMCRegisterClasses[ARM::MQPRRegClassID];
3356 (VectorList.Count == 2) ? &ARMMCRegisterClasses[ARM::MQQPRRegClassID]
3357 : &ARMMCRegisterClasses[ARM::MQQQQPRRegClassID];
3360 for (
I = 0;
I <
E;
I++)
3363 assert(
I <
E &&
"Invalid vector list start register!");
3368 void addVecListIndexedOperands(
MCInst &Inst,
unsigned N)
const {
3369 assert(
N == 2 &&
"Invalid number of operands!");
3374 void addVectorIndex8Operands(
MCInst &Inst,
unsigned N)
const {
3375 assert(
N == 1 &&
"Invalid number of operands!");
3379 void addVectorIndex16Operands(
MCInst &Inst,
unsigned N)
const {
3380 assert(
N == 1 &&
"Invalid number of operands!");
3384 void addVectorIndex32Operands(
MCInst &Inst,
unsigned N)
const {
3385 assert(
N == 1 &&
"Invalid number of operands!");
3389 void addVectorIndex64Operands(
MCInst &Inst,
unsigned N)
const {
3390 assert(
N == 1 &&
"Invalid number of operands!");
3394 void addMVEVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
3395 assert(
N == 1 &&
"Invalid number of operands!");
3399 void addMVEPairVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
3400 assert(
N == 1 &&
"Invalid number of operands!");
3404 void addNEONi8splatOperands(
MCInst &Inst,
unsigned N)
const {
3405 assert(
N == 1 &&
"Invalid number of operands!");
3412 void addNEONi16splatOperands(
MCInst &Inst,
unsigned N)
const {
3413 assert(
N == 1 &&
"Invalid number of operands!");
3416 unsigned Value =
CE->getValue();
3421 void addNEONi16splatNotOperands(
MCInst &Inst,
unsigned N)
const {
3422 assert(
N == 1 &&
"Invalid number of operands!");
3425 unsigned Value =
CE->getValue();
3430 void addNEONi32splatOperands(
MCInst &Inst,
unsigned N)
const {
3431 assert(
N == 1 &&
"Invalid number of operands!");
3434 unsigned Value =
CE->getValue();
3439 void addNEONi32splatNotOperands(
MCInst &Inst,
unsigned N)
const {
3440 assert(
N == 1 &&
"Invalid number of operands!");
3443 unsigned Value =
CE->getValue();
3448 void addNEONi8ReplicateOperands(
MCInst &Inst,
bool Inv)
const {
3453 "All instructions that wants to replicate non-zero byte "
3454 "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3455 unsigned Value =
CE->getValue();
3458 unsigned B =
Value & 0xff;
3463 void addNEONinvi8ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3464 assert(
N == 1 &&
"Invalid number of operands!");
3465 addNEONi8ReplicateOperands(Inst,
true);
3468 static unsigned encodeNeonVMOVImmediate(
unsigned Value) {
3471 else if (
Value > 0xffff &&
Value <= 0xffffff)
3473 else if (
Value > 0xffffff)
3478 void addNEONi32vmovOperands(
MCInst &Inst,
unsigned N)
const {
3479 assert(
N == 1 &&
"Invalid number of operands!");
3482 unsigned Value = encodeNeonVMOVImmediate(
CE->getValue());
3486 void addNEONvmovi8ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3487 assert(
N == 1 &&
"Invalid number of operands!");
3488 addNEONi8ReplicateOperands(Inst,
false);
3491 void addNEONvmovi16ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3492 assert(
N == 1 &&
"Invalid number of operands!");
3498 "All instructions that want to replicate non-zero half-word "
3499 "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3501 unsigned Elem =
Value & 0xffff;
3503 Elem = (Elem >> 8) | 0x200;
3507 void addNEONi32vmovNegOperands(
MCInst &Inst,
unsigned N)
const {
3508 assert(
N == 1 &&
"Invalid number of operands!");
3511 unsigned Value = encodeNeonVMOVImmediate(~
CE->getValue());
3515 void addNEONvmovi32ReplicateOperands(
MCInst &Inst,
unsigned N)
const {
3516 assert(
N == 1 &&
"Invalid number of operands!");
3522 "All instructions that want to replicate non-zero word "
3523 "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3525 unsigned Elem = encodeNeonVMOVImmediate(
Value & 0xffffffff);
3529 void addNEONi64splatOperands(
MCInst &Inst,
unsigned N)
const {
3530 assert(
N == 1 &&
"Invalid number of operands!");
3535 for (
unsigned i = 0; i < 8; ++i, Value >>= 8) {
3541 void addComplexRotationEvenOperands(
MCInst &Inst,
unsigned N)
const {
3542 assert(
N == 1 &&
"Invalid number of operands!");
3547 void addComplexRotationOddOperands(
MCInst &Inst,
unsigned N)
const {
3548 assert(
N == 1 &&
"Invalid number of operands!");
3553 void addMveSaturateOperands(
MCInst &Inst,
unsigned N)
const {
3554 assert(
N == 1 &&
"Invalid number of operands!");
3556 unsigned Imm =
CE->getValue();
3557 assert((Imm == 48 || Imm == 64) &&
"Invalid saturate operand");
3563 static std::unique_ptr<ARMOperand> CreateITMask(
unsigned Mask,
SMLoc S) {
3564 auto Op = std::make_unique<ARMOperand>(k_ITCondMask);
3573 auto Op = std::make_unique<ARMOperand>(k_CondCode);
3582 auto Op = std::make_unique<ARMOperand>(k_VPTPred);
3589 static std::unique_ptr<ARMOperand> CreateCoprocNum(
unsigned CopVal,
SMLoc S) {
3590 auto Op = std::make_unique<ARMOperand>(k_CoprocNum);
3591 Op->Cop.Val = CopVal;
3597 static std::unique_ptr<ARMOperand> CreateCoprocReg(
unsigned CopVal,
SMLoc S) {
3598 auto Op = std::make_unique<ARMOperand>(k_CoprocReg);
3599 Op->Cop.Val = CopVal;
3605 static std::unique_ptr<ARMOperand> CreateCoprocOption(
unsigned Val,
SMLoc S,
3607 auto Op = std::make_unique<ARMOperand>(k_CoprocOption);
3614 static std::unique_ptr<ARMOperand> CreateCCOut(
unsigned RegNum,
SMLoc S) {
3615 auto Op = std::make_unique<ARMOperand>(k_CCOut);
3616 Op->Reg.RegNum = RegNum;
3622 static std::unique_ptr<ARMOperand> CreateToken(
StringRef Str,
SMLoc S) {
3623 auto Op = std::make_unique<ARMOperand>(k_Token);
3624 Op->Tok.Data = Str.data();
3625 Op->Tok.Length = Str.size();
3631 static std::unique_ptr<ARMOperand> CreateReg(
unsigned RegNum,
SMLoc S,
3633 auto Op = std::make_unique<ARMOperand>(k_Register);
3634 Op->Reg.RegNum = RegNum;
3640 static std::unique_ptr<ARMOperand>
3642 unsigned ShiftReg,
unsigned ShiftImm,
SMLoc S,
3644 auto Op = std::make_unique<ARMOperand>(k_ShiftedRegister);
3645 Op->RegShiftedReg.ShiftTy = ShTy;
3646 Op->RegShiftedReg.SrcReg = SrcReg;
3647 Op->RegShiftedReg.ShiftReg = ShiftReg;
3648 Op->RegShiftedReg.ShiftImm = ShiftImm;
3654 static std::unique_ptr<ARMOperand>
3657 auto Op = std::make_unique<ARMOperand>(k_ShiftedImmediate);
3658 Op->RegShiftedImm.ShiftTy = ShTy;
3659 Op->RegShiftedImm.SrcReg = SrcReg;
3660 Op->RegShiftedImm.ShiftImm = ShiftImm;
3666 static std::unique_ptr<ARMOperand> CreateShifterImm(
bool isASR,
unsigned Imm,
3668 auto Op = std::make_unique<ARMOperand>(k_ShifterImmediate);
3669 Op->ShifterImm.isASR = isASR;
3670 Op->ShifterImm.Imm =
Imm;
3676 static std::unique_ptr<ARMOperand> CreateRotImm(
unsigned Imm,
SMLoc S,
3678 auto Op = std::make_unique<ARMOperand>(k_RotateImmediate);
3679 Op->RotImm.Imm =
Imm;
3685 static std::unique_ptr<ARMOperand> CreateModImm(
unsigned Bits,
unsigned Rot,
3687 auto Op = std::make_unique<ARMOperand>(k_ModifiedImmediate);
3689 Op->ModImm.Rot = Rot;
3695 static std::unique_ptr<ARMOperand>
3697 auto Op = std::make_unique<ARMOperand>(k_ConstantPoolImmediate);
3704 static std::unique_ptr<ARMOperand>
3705 CreateBitfield(
unsigned LSB,
unsigned Width,
SMLoc S,
SMLoc E) {
3706 auto Op = std::make_unique<ARMOperand>(k_BitfieldDescriptor);
3707 Op->Bitfield.LSB = LSB;
3708 Op->Bitfield.Width = Width;
3714 static std::unique_ptr<ARMOperand>
3717 assert(Regs.size() > 0 &&
"RegList contains no registers?");
3718 KindTy
Kind = k_RegisterList;
3720 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
3721 Regs.front().second)) {
3722 if (Regs.back().second == ARM::VPR)
3723 Kind = k_FPDRegisterListWithVPR;
3725 Kind = k_DPRRegisterList;
3726 }
else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
3727 Regs.front().second)) {
3728 if (Regs.back().second == ARM::VPR)
3729 Kind = k_FPSRegisterListWithVPR;
3731 Kind = k_SPRRegisterList;
3734 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3735 Kind = k_RegisterListWithAPSR;
3739 auto Op = std::make_unique<ARMOperand>(Kind);
3740 for (
const auto &
P : Regs)
3741 Op->Registers.push_back(
P.second);
3743 Op->StartLoc = StartLoc;
3744 Op->EndLoc = EndLoc;
3748 static std::unique_ptr<ARMOperand> CreateVectorList(
unsigned RegNum,
3750 bool isDoubleSpaced,
3752 auto Op = std::make_unique<ARMOperand>(k_VectorList);
3753 Op->VectorList.RegNum = RegNum;
3754 Op->VectorList.Count = Count;
3755 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3761 static std::unique_ptr<ARMOperand>
3762 CreateVectorListAllLanes(
unsigned RegNum,
unsigned Count,
bool isDoubleSpaced,
3764 auto Op = std::make_unique<ARMOperand>(k_VectorListAllLanes);
3765 Op->VectorList.RegNum = RegNum;
3766 Op->VectorList.Count = Count;
3767 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3773 static std::unique_ptr<ARMOperand>
3774 CreateVectorListIndexed(
unsigned RegNum,
unsigned Count,
unsigned Index,
3776 auto Op = std::make_unique<ARMOperand>(k_VectorListIndexed);
3777 Op->VectorList.RegNum = RegNum;
3778 Op->VectorList.Count = Count;
3779 Op->VectorList.LaneIndex =
Index;
3780 Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3786 static std::unique_ptr<ARMOperand>
3788 auto Op = std::make_unique<ARMOperand>(k_VectorIndex);
3789 Op->VectorIndex.Val =
Idx;
3795 static std::unique_ptr<ARMOperand> CreateImm(
const MCExpr *Val,
SMLoc S,
3797 auto Op = std::make_unique<ARMOperand>(k_Immediate);
3804 static std::unique_ptr<ARMOperand>
3805 CreateMem(
unsigned BaseRegNum,
const MCExpr *OffsetImm,
unsigned OffsetRegNum,
3808 auto Op = std::make_unique<ARMOperand>(k_Memory);
3809 Op->Memory.BaseRegNum = BaseRegNum;
3810 Op->Memory.OffsetImm = OffsetImm;
3811 Op->Memory.OffsetRegNum = OffsetRegNum;
3812 Op->Memory.ShiftType = ShiftType;
3813 Op->Memory.ShiftImm = ShiftImm;
3814 Op->Memory.Alignment = Alignment;
3815 Op->Memory.isNegative = isNegative;
3818 Op->AlignmentLoc = AlignmentLoc;
3822 static std::unique_ptr<ARMOperand>
3825 auto Op = std::make_unique<ARMOperand>(k_PostIndexRegister);
3826 Op->PostIdxReg.RegNum = RegNum;
3827 Op->PostIdxReg.isAdd = isAdd;
3828 Op->PostIdxReg.ShiftTy = ShiftTy;
3829 Op->PostIdxReg.ShiftImm = ShiftImm;
3835 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(
ARM_MB::MemBOpt Opt,
3837 auto Op = std::make_unique<ARMOperand>(k_MemBarrierOpt);
3838 Op->MBOpt.Val = Opt;
3844 static std::unique_ptr<ARMOperand>
3846 auto Op = std::make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3847 Op->ISBOpt.Val = Opt;
3853 static std::unique_ptr<ARMOperand>
3855 auto Op = std::make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3856 Op->TSBOpt.Val = Opt;
3862 static std::unique_ptr<ARMOperand> CreateProcIFlags(
ARM_PROC::IFlags IFlags,
3864 auto Op = std::make_unique<ARMOperand>(k_ProcIFlags);
3871 static std::unique_ptr<ARMOperand> CreateMSRMask(
unsigned MMask,
SMLoc S) {
3872 auto Op = std::make_unique<ARMOperand>(k_MSRMask);
3873 Op->MMask.Val = MMask;
3879 static std::unique_ptr<ARMOperand> CreateBankedReg(
unsigned Reg,
SMLoc S) {
3880 auto Op = std::make_unique<ARMOperand>(k_BankedReg);
3881 Op->BankedReg.Val =
Reg;
3908 case k_ITCondMask: {
3909 static const char *
const MaskStr[] = {
3910 "(invalid)",
"(tttt)",
"(ttt)",
"(ttte)",
3911 "(tt)",
"(ttet)",
"(tte)",
"(ttee)",
3912 "(t)",
"(tett)",
"(tet)",
"(tete)",
3913 "(te)",
"(teet)",
"(tee)",
"(teee)",
3915 assert((ITMask.Mask & 0xf) == ITMask.Mask);
3916 OS <<
"<it-mask " << MaskStr[ITMask.Mask] <<
">";
3920 OS <<
"<coprocessor number: " << getCoproc() <<
">";
3923 OS <<
"<coprocessor register: " << getCoproc() <<
">";
3925 case k_CoprocOption:
3926 OS <<
"<coprocessor option: " << CoprocOption.Val <<
">";
3929 OS <<
"<mask: " << getMSRMask() <<
">";
3932 OS <<
"<banked reg: " << getBankedReg() <<
">";
3937 case k_MemBarrierOpt:
3938 OS <<
"<ARM_MB::" << MemBOptToString(getMemBarrierOpt(),
false) <<
">";
3940 case k_InstSyncBarrierOpt:
3941 OS <<
"<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) <<
">";
3943 case k_TraceSyncBarrierOpt:
3944 OS <<
"<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) <<
">";
3951 OS <<
" offset-imm:" << *
Memory.OffsetImm;
3953 OS <<
" offset-reg:" << (
Memory.isNegative ?
"-" :
"")
3957 OS <<
" shift-imm:" <<
Memory.ShiftImm;
3960 OS <<
" alignment:" <<
Memory.Alignment;
3963 case k_PostIndexRegister:
3964 OS <<
"post-idx register " << (PostIdxReg.isAdd ?
"" :
"-")
3965 <<
RegName(PostIdxReg.RegNum);
3968 << PostIdxReg.ShiftImm;
3971 case k_ProcIFlags: {
3972 OS <<
"<ARM_PROC::";
3973 unsigned IFlags = getProcIFlags();
3974 for (
int i=2; i >= 0; --i)
3975 if (IFlags & (1 << i))
3983 case k_ShifterImmediate:
3984 OS <<
"<shift " << (ShifterImm.isASR ?
"asr" :
"lsl")
3985 <<
" #" << ShifterImm.Imm <<
">";
3987 case k_ShiftedRegister:
3988 OS <<
"<so_reg_reg " <<
RegName(RegShiftedReg.SrcReg) <<
" "
3990 <<
RegName(RegShiftedReg.ShiftReg) <<
">";
3992 case k_ShiftedImmediate:
3993 OS <<
"<so_reg_imm " <<
RegName(RegShiftedImm.SrcReg) <<
" "
3995 << RegShiftedImm.ShiftImm <<
">";
3997 case k_RotateImmediate:
3998 OS <<
"<ror " <<
" #" << (RotImm.Imm * 8) <<
">";
4000 case k_ModifiedImmediate:
4001 OS <<
"<mod_imm #" << ModImm.Bits <<
", #"
4002 << ModImm.Rot <<
")>";
4004 case k_ConstantPoolImmediate:
4005 OS <<
"<constant_pool_imm #" << *getConstantPoolImm();
4007 case k_BitfieldDescriptor:
4008 OS <<
"<bitfield " <<
"lsb: " <<
Bitfield.LSB
4009 <<
", width: " <<
Bitfield.Width <<
">";
4011 case k_RegisterList:
4012 case k_RegisterListWithAPSR:
4013 case k_DPRRegisterList:
4014 case k_SPRRegisterList:
4015 case k_FPSRegisterListWithVPR:
4016 case k_FPDRegisterListWithVPR: {
4017 OS <<
"<register_list ";
4023 if (++
I <
E)
OS <<
", ";
4030 OS <<
"<vector_list " << VectorList.Count <<
" * "
4031 <<
RegName(VectorList.RegNum) <<
">";
4033 case k_VectorListAllLanes:
4034 OS <<
"<vector_list(all lanes) " << VectorList.Count <<
" * "
4035 <<
RegName(VectorList.RegNum) <<
">";
4037 case k_VectorListIndexed:
4038 OS <<
"<vector_list(lane " << VectorList.LaneIndex <<
") "
4039 << VectorList.Count <<
" * " <<
RegName(VectorList.RegNum) <<
">";
4042 OS <<
"'" << getToken() <<
"'";
4045 OS <<
"<vectorindex " << getVectorIndex() <<
">";
4059 const AsmToken &Tok = getParser().getTok();
4062 Reg = tryParseRegister();
4069 if (parseRegister(
Reg, StartLoc, EndLoc))
4077int ARMAsmParser::tryParseRegister(
bool AllowOutOfBoundReg) {
4086 .
Case(
"r13", ARM::SP)
4087 .
Case(
"r14", ARM::LR)
4088 .
Case(
"r15", ARM::PC)
4089 .
Case(
"ip", ARM::R12)
4091 .
Case(
"a1", ARM::R0)
4092 .
Case(
"a2", ARM::R1)
4093 .
Case(
"a3", ARM::R2)
4094 .
Case(
"a4", ARM::R3)
4095 .
Case(
"v1", ARM::R4)
4096 .
Case(
"v2", ARM::R5)
4097 .
Case(
"v3", ARM::R6)
4098 .
Case(
"v4", ARM::R7)
4099 .
Case(
"v5", ARM::R8)
4100 .
Case(
"v6", ARM::R9)
4101 .
Case(
"v7", ARM::R10)
4102 .
Case(
"v8", ARM::R11)
4103 .
Case(
"sb", ARM::R9)
4104 .
Case(
"sl", ARM::R10)
4105 .
Case(
"fp", ARM::R11)
4114 if (Entry == RegisterReqs.
end())
4117 return Entry->getValue();
4121 if (!AllowOutOfBoundReg && !hasD32() && RegNum >=
ARM::D16 &&
4160 std::unique_ptr<ARMOperand> PrevOp(
4161 (ARMOperand *)
Operands.pop_back_val().release());
4162 if (!PrevOp->isReg())
4163 return Error(PrevOp->getStartLoc(),
"shift must be of a register");
4164 int SrcReg = PrevOp->getReg();
4180 const MCExpr *ShiftExpr =
nullptr;
4181 if (getParser().parseExpression(ShiftExpr, EndLoc)) {
4182 Error(ImmLoc,
"invalid immediate shift value");
4188 Error(ImmLoc,
"invalid immediate shift value");
4194 Imm =
CE->getValue();
4198 Error(ImmLoc,
"immediate shift value out of range");
4208 ShiftReg = tryParseRegister();
4209 if (ShiftReg == -1) {
4210 Error(L,
"expected immediate or register in shift operand");
4215 "expected immediate or register in shift operand");
4221 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
4225 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
4241 int RegNo = tryParseRegister();
4245 Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
4263 if (getParser().parseExpression(ImmVal))
4267 return TokError(
"immediate value expected for vector index");
4295 if (
Name.size() < 2 ||
Name[0] != CoprocOp)
4299 switch (
Name.size()) {
4322 case '0':
return 10;
4323 case '1':
return 11;
4324 case '2':
return 12;
4325 case '3':
return 13;
4326 case '4':
return 14;
4327 case '5':
return 15;
4366 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
4385 Operands.push_back(ARMOperand::CreateCoprocReg(
Reg, S));
4402 if (getParser().parseExpression(Expr))
4403 return Error(Loc,
"illegal expression");
4405 if (!CE ||
CE->getValue() < 0 ||
CE->getValue() > 255)
4407 "coprocessor option must be an immediate in range [0, 255]");
4408 int Val =
CE->getValue();
4416 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S,
E));
4427 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4431 case ARM::R0:
return ARM::R1;
case ARM::R1:
return ARM::R2;
4432 case ARM::R2:
return ARM::R3;
case ARM::R3:
return ARM::R4;
4433 case ARM::R4:
return ARM::R5;
case ARM::R5:
return ARM::R6;
4434 case ARM::R6:
return ARM::R7;
case ARM::R7:
return ARM::R8;
4435 case ARM::R8:
return ARM::R9;
case ARM::R9:
return ARM::R10;
4436 case ARM::R10:
return ARM::R11;
case ARM::R11:
return ARM::R12;
4437 case ARM::R12:
return ARM::SP;
case ARM::SP:
return ARM::LR;
4438 case ARM::LR:
return ARM::PC;
case ARM::PC:
return ARM::R0;
4446 unsigned Enc,
unsigned Reg) {
4447 Regs.emplace_back(Enc,
Reg);
4448 for (
auto I = Regs.rbegin(), J =
I + 1,
E = Regs.rend(); J !=
E; ++
I, ++J) {
4449 if (J->first == Enc) {
4450 Regs.erase(J.base());
4462 bool AllowRAAC,
bool AllowOutOfBoundReg) {
4465 return TokError(
"Token is not a Left Curly Brace");
4472 int Reg = tryParseRegister();
4474 return Error(RegLoc,
"register expected");
4475 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4476 return Error(RegLoc,
"pseudo-register not allowed");
4483 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4484 Reg = getDRegFromQReg(
Reg);
4485 EReg =
MRI->getEncodingValue(
Reg);
4490 if (
Reg == ARM::RA_AUTH_CODE ||
4491 ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4492 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4493 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg))
4494 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4495 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
contains(
Reg))
4496 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4497 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4498 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4500 return Error(RegLoc,
"invalid register in register list");
4503 EReg =
MRI->getEncodingValue(
Reg);
4512 if (
Reg == ARM::RA_AUTH_CODE)
4513 return Error(RegLoc,
"pseudo-register not allowed");
4516 int EndReg = tryParseRegister(AllowOutOfBoundReg);
4518 return Error(AfterMinusLoc,
"register expected");
4519 if (EndReg == ARM::RA_AUTH_CODE)
4520 return Error(AfterMinusLoc,
"pseudo-register not allowed");
4522 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4523 EndReg = getDRegFromQReg(EndReg) + 1;
4530 return Error(AfterMinusLoc,
"invalid register in register list");
4532 if (
MRI->getEncodingValue(
Reg) >
MRI->getEncodingValue(EndReg))
4533 return Error(AfterMinusLoc,
"bad range in register list");
4536 while (
Reg != EndReg) {
4538 EReg =
MRI->getEncodingValue(
Reg);
4542 ") in register list");
4551 Reg = tryParseRegister(AllowOutOfBoundReg);
4553 return Error(RegLoc,
"register expected");
4554 if (!AllowRAAC &&
Reg == ARM::RA_AUTH_CODE)
4555 return Error(RegLoc,
"pseudo-register not allowed");
4557 bool isQReg =
false;
4558 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4559 Reg = getDRegFromQReg(
Reg);
4563 RC->
getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4564 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
Reg)) {
4567 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4569 if (
Reg == ARM::VPR &&
4570 (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4571 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID] ||
4572 RC == &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID])) {
4573 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4574 EReg =
MRI->getEncodingValue(
Reg);
4577 ") in register list");
4582 if ((
Reg == ARM::RA_AUTH_CODE &&
4583 RC != &ARMMCRegisterClasses[ARM::GPRRegClassID]) ||
4585 return Error(RegLoc,
"invalid register in register list");
4591 MRI->getEncodingValue(
Reg) <
MRI->getEncodingValue(OldReg)) {
4592 if (ARMMCRegisterClasses[ARM::GPRRegClassID].
contains(
Reg))
4593 Warning(RegLoc,
"register list not in ascending order");
4594 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].
contains(
Reg))
4595 return Error(RegLoc,
"register list not in ascending order");
4598 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4599 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4601 return Error(RegLoc,
"non-contiguous register range");
4602 EReg =
MRI->getEncodingValue(
Reg);
4605 ") in register list");
4608 EReg =
MRI->getEncodingValue(++
Reg);
4631ParseStatus ARMAsmParser::parseVectorLane(VectorLaneTy &LaneKind,
4639 LaneKind = AllLanes;
4652 if (getParser().parseExpression(LaneIndex))
4653 return Error(Loc,
"illegal expression");
4656 return Error(Loc,
"lane index must be empty or an integer");
4661 int64_t Val =
CE->getValue();
4664 if (Val < 0 || Val > 7)
4667 LaneKind = IndexedLane;
4677 VectorLaneTy LaneKind;
4685 int Reg = tryParseRegister();
4688 if (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
Reg)) {
4689 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex,
E);
4694 Operands.push_back(ARMOperand::CreateVectorList(
Reg, 1,
false, S,
E));
4697 Operands.push_back(ARMOperand::CreateVectorListAllLanes(
Reg, 1,
false,
4701 Operands.push_back(ARMOperand::CreateVectorListIndexed(
Reg, 1,
4708 if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4709 Reg = getDRegFromQReg(
Reg);
4710 ParseStatus Res = parseVectorLane(LaneKind, LaneIndex,
E);
4715 Reg =
MRI->getMatchingSuperReg(
Reg, ARM::dsub_0,
4716 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4717 Operands.push_back(ARMOperand::CreateVectorList(
Reg, 2,
false, S,
E));
4720 Reg =
MRI->getMatchingSuperReg(
Reg, ARM::dsub_0,
4721 &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4722 Operands.push_back(ARMOperand::CreateVectorListAllLanes(
Reg, 2,
false,
4726 Operands.push_back(ARMOperand::CreateVectorListIndexed(
Reg, 2,
4733 return Error(S,
"vector register expected");
4742 int Reg = tryParseRegister();
4744 return Error(RegLoc,
"register expected");
4747 unsigned FirstReg =
Reg;
4749 if (hasMVE() && !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg))
4751 "vector register in range Q0-Q7 expected");
4754 else if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4755 FirstReg =
Reg = getDRegFromQReg(
Reg);
4763 if (!parseVectorLane(LaneKind, LaneIndex,
E).isSuccess())
4771 else if (Spacing == 2)
4773 "sequential registers in double spaced list");
4776 int EndReg = tryParseRegister();
4778 return Error(AfterMinusLoc,
"register expected");
4780 if (!hasMVE() && ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(EndReg))
4781 EndReg = getDRegFromQReg(EndReg) + 1;
4788 !ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(EndReg)) ||
4790 !ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(EndReg)))
4791 return Error(AfterMinusLoc,
"invalid register in register list");
4794 return Error(AfterMinusLoc,
"bad range in register list");
4796 VectorLaneTy NextLaneKind;
4797 unsigned NextLaneIndex;
4798 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
4800 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4801 return Error(AfterMinusLoc,
"mismatched lane index in register list");
4804 Count += EndReg -
Reg;
4811 Reg = tryParseRegister();
4813 return Error(RegLoc,
"register expected");
4816 if (!ARMMCRegisterClasses[ARM::MQPRRegClassID].
contains(
Reg))
4817 return Error(RegLoc,
"vector register in range Q0-Q7 expected");
4826 else if (ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
Reg)) {
4829 else if (Spacing == 2)
4832 "invalid register in double-spaced list (must be 'D' register')");
4833 Reg = getDRegFromQReg(
Reg);
4834 if (
Reg != OldReg + 1)
4835 return Error(RegLoc,
"non-contiguous register range");
4839 VectorLaneTy NextLaneKind;
4840 unsigned NextLaneIndex;
4842 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
4844 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4845 return Error(LaneLoc,
"mismatched lane index in register list");
4852 Spacing = 1 + (
Reg == OldReg + 2);
4855 if (
Reg != OldReg + Spacing)
4856 return Error(RegLoc,
"non-contiguous register range");
4859 VectorLaneTy NextLaneKind;
4860 unsigned NextLaneIndex;
4862 if (!parseVectorLane(NextLaneKind, NextLaneIndex,
E).isSuccess())
4864 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex)
4865 return Error(EndLoc,
"mismatched lane index in register list");
4878 if (Count == 2 && !hasMVE()) {
4880 &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4881 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4882 FirstReg =
MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4884 auto Create = (LaneKind == NoLanes ? ARMOperand::CreateVectorList :
4885 ARMOperand::CreateVectorListAllLanes);
4886 Operands.push_back(Create(FirstReg, Count, (Spacing == 2), S,
E));
4890 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4944 const MCExpr *MemBarrierID;
4945 if (getParser().parseExpression(MemBarrierID))
4946 return Error(Loc,
"illegal expression");
4950 return Error(Loc,
"constant expression expected");
4952 int Val =
CE->getValue();
4954 return Error(Loc,
"immediate value out of range");
5006 const MCExpr *ISBarrierID;
5007 if (getParser().parseExpression(ISBarrierID))
5008 return Error(Loc,
"illegal expression");
5012 return Error(Loc,
"constant expression expected");
5014 int Val =
CE->getValue();
5016 return Error(Loc,
"immediate value out of range");
5022 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
5039 if (IFlagsStr !=
"none") {
5040 for (
int i = 0, e = IFlagsStr.
size(); i != e; ++i) {
5049 if (Flag == ~0U || (IFlags & Flag))
5069 if (Val > 255 || Val < 0) {
5072 unsigned SYSmvalue = Val & 0xFF;
5074 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5083 auto TheReg = ARMSysReg::lookupMClassSysRegByName(
Mask.lower());
5084 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
5087 unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
5090 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
5095 size_t Start = 0, Next =
Mask.find(
'_');
5097 std::string SpecReg =
Mask.slice(Start, Next).lower();
5104 unsigned FlagsVal = 0;
5106 if (SpecReg ==
"apsr") {
5110 .
Case(
"nzcvqg", 0xc)
5113 if (FlagsVal == ~0U) {
5119 }
else if (SpecReg ==
"cpsr" || SpecReg ==
"spsr") {
5121 if (Flags ==
"all" || Flags ==
"")
5123 for (
int i = 0, e =
Flags.size(); i != e; ++i) {
5133 if (Flag == ~0U || (FlagsVal & Flag))
5149 if (SpecReg ==
"spsr")
5153 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
5167 auto TheReg = ARMBankedReg::lookupBankedRegByName(
RegName.lower());
5170 unsigned Encoding = TheReg->Encoding;
5173 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
5184 std::string LowerOp =
Op.lower();
5185 std::string UpperOp =
Op.upper();
5186 if (ShiftName != LowerOp && ShiftName != UpperOp)
5196 const MCExpr *ShiftAmount;
5199 if (getParser().parseExpression(ShiftAmount, EndLoc))
5200 return Error(Loc,
"illegal expression");
5203 return Error(Loc,
"constant expression expected");
5204 int Val =
CE->getValue();
5205 if (Val < Low || Val >
High)
5206 return Error(Loc,
"immediate value out of range");
5208 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
5218 return Error(S,
"'be' or 'le' operand expected");
5226 return Error(S,
"'be' or 'le' operand expected");
5243 return Error(S,
"shift operator 'asr' or 'lsl' expected");
5246 if (ShiftName ==
"lsl" || ShiftName ==
"LSL")
5248 else if (ShiftName ==
"asr" || ShiftName ==
"ASR")
5251 return Error(S,
"shift operator 'asr' or 'lsl' expected");
5261 const MCExpr *ShiftAmount;
5263 if (getParser().parseExpression(ShiftAmount, EndLoc))
5264 return Error(ExLoc,
"malformed shift expression");
5267 return Error(ExLoc,
"shift amount must be an immediate");
5269 int64_t Val =
CE->getValue();
5272 if (Val < 1 || Val > 32)
5273 return Error(ExLoc,
"'asr' shift amount must be in range [1,32]");
5276 return Error(ExLoc,
"'asr #32' shift amount not allowed in Thumb mode");
5277 if (Val == 32) Val = 0;
5280 if (Val < 0 || Val > 31)
5281 return Error(ExLoc,
"'lsr' shift amount must be in range [0,31]");
5284 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
5299 if (ShiftName !=
"ror" && ShiftName !=
"ROR")
5310 const MCExpr *ShiftAmount;
5312 if (getParser().parseExpression(ShiftAmount, EndLoc))
5313 return Error(ExLoc,
"malformed rotate expression");
5316 return Error(ExLoc,
"rotate amount must be an immediate");
5318 int64_t Val =
CE->getValue();
5322 if (Val != 8 && Val != 16 && Val != 24 && Val != 0)
5323 return Error(ExLoc,
"'ror' rotate amount must be 8, 16, or 24");
5325 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
5364 if (getParser().parseExpression(Imm1Exp, Ex1))
5365 return Error(Sx1,
"malformed expression");
5371 Imm1 =
CE->getValue();
5375 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
5388 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5394 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5401 "expected modified immediate operand: #[0, 255], #even[0-30]");
5404 return Error(Sx1,
"immediate operand must a number in the range [0, 255]");
5419 if (getParser().parseExpression(Imm2Exp, Ex2))
5420 return Error(Sx2,
"malformed expression");
5422 CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5425 Imm2 =
CE->getValue();
5426 if (!(Imm2 & ~0x1E)) {
5428 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5432 "immediate operand must an even number in the range [0, 30]");
5434 return Error(Sx2,
"constant expression expected");
5449 if (getParser().parseExpression(LSBExpr))
5450 return Error(
E,
"malformed immediate expression");
5453 return Error(
E,
"'lsb' operand must be an immediate");
5455 int64_t LSB =
CE->getValue();
5457 if (LSB < 0 || LSB > 31)
5458 return Error(
E,
"'lsb' operand must be in the range [0,31]");
5472 if (getParser().parseExpression(WidthExpr, EndLoc))
5473 return Error(
E,
"malformed immediate expression");
5474 CE = dyn_cast<MCConstantExpr>(WidthExpr);
5476 return Error(
E,
"'width' operand must be an immediate");
5478 int64_t Width =
CE->getValue();
5480 if (Width < 1 || Width > 32 - LSB)
5481 return Error(
E,
"'width' operand must be in the range [1,32-lsb]");
5483 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5500 bool haveEaten =
false;
5512 int Reg = tryParseRegister();
5520 unsigned ShiftImm = 0;
5523 if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5530 Operands.push_back(ARMOperand::CreatePostIdxReg(
Reg, isAdd, ShiftTy,
5561 if (getParser().parseExpression(
Offset,
E))
5565 return Error(S,
"constant expression expected");
5568 int32_t Val =
CE->getValue();
5569 if (isNegative && Val == 0)
5570 Val = std::numeric_limits<int32_t>::min();
5578 bool haveEaten =
false;
5590 int Reg = tryParseRegister();
5606void ARMAsmParser::cvtThumbMultiply(
MCInst &Inst,
5608 ((ARMOperand &)*
Operands[3]).addRegOperands(Inst, 1);
5609 ((ARMOperand &)*
Operands[1]).addCCOutOperands(Inst, 1);
5614 ((ARMOperand &)*
Operands[4]).getReg() ==
5615 ((ARMOperand &)*
Operands[3]).getReg())
5617 ((ARMOperand &)*
Operands[RegOp]).addRegOperands(Inst, 1);
5619 ((ARMOperand &)*
Operands[2]).addCondCodeOperands(Inst, 2);
5622void ARMAsmParser::cvtThumbBranches(
MCInst &Inst,
5624 int CondOp = -1, ImmOp = -1;
5627 case ARM::tBcc: CondOp = 1; ImmOp = 2;
break;
5630 case ARM::t2Bcc: CondOp = 1; ImmOp = 3;
break;
5640 case ARM::tBcc: Inst.
setOpcode(ARM::tB);
break;
5641 case ARM::t2Bcc: Inst.
setOpcode(ARM::t2B);
break;
5663 ARMOperand &
op =
static_cast<ARMOperand &
>(*
Operands[ImmOp]);
5664 if (!
op.isSignedOffset<11, 1>() &&
isThumb() && hasV8MBaseline())
5670 ARMOperand &
op =
static_cast<ARMOperand &
>(*
Operands[ImmOp]);
5671 if (!
op.isSignedOffset<8, 1>() &&
isThumb() && hasV8MBaseline())
5676 ((ARMOperand &)*
Operands[ImmOp]).addImmOperands(Inst, 1);
5677 ((ARMOperand &)*
Operands[CondOp]).addCondCodeOperands(Inst, 2);
5680void ARMAsmParser::cvtMVEVMOVQtoDReg(
5686 ((ARMOperand &)*
Operands[2]).addRegOperands(Inst, 1);
5687 ((ARMOperand &)*
Operands[3]).addRegOperands(Inst, 1);
5688 ((ARMOperand &)*
Operands[4]).addRegOperands(Inst, 1);
5689 ((ARMOperand &)*
Operands[5]).addMVEPairVectorIndexOperands(Inst, 1);
5691 ((ARMOperand &)*
Operands[7]).addMVEPairVectorIndexOperands(Inst, 1);
5692 ((ARMOperand &)*
Operands[1]).addCondCodeOperands(Inst, 2);
5701 return TokError(
"Token is not a Left Bracket");
5706 int BaseRegNum = tryParseRegister();
5707 if (BaseRegNum == -1)
5708 return Error(BaseRegTok.
getLoc(),
"register expected");
5714 return Error(Tok.
getLoc(),
"malformed memory operand");
5720 Operands.push_back(ARMOperand::CreateMem(BaseRegNum,
nullptr, 0,
5735 "Lost colon or comma in memory operand?!");
5747 if (getParser().parseExpression(Expr))
5755 return Error (
E,
"constant expression expected");
5758 switch (
CE->getValue()) {
5761 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5762 case 16:
Align = 2;
break;
5763 case 32:
Align = 4;
break;
5764 case 64:
Align = 8;
break;
5765 case 128:
Align = 16;
break;
5766 case 256:
Align = 32;
break;
5777 Operands.push_back(ARMOperand::CreateMem(BaseRegNum,
nullptr, 0,
5779 false, S,
E, AlignmentLoc));
5805 if (getParser().parseExpression(
Offset))
5808 if (
const auto *CE = dyn_cast<MCConstantExpr>(
Offset)) {
5811 int32_t Val =
CE->getValue();
5812 if (isNegative && Val == 0)
5817 AdjustedOffset =
CE;
5820 Operands.push_back(ARMOperand::CreateMem(
5840 bool isNegative =
false;
5850 int OffsetRegNum = tryParseRegister();
5851 if (OffsetRegNum == -1)
5852 return Error(
E,
"register expected");
5856 unsigned ShiftImm = 0;
5859 if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5869 Operands.push_back(ARMOperand::CreateMem(BaseRegNum,
nullptr, OffsetRegNum,
5870 ShiftType, ShiftImm, 0, isNegative,
5893 return Error(Loc,
"illegal shift operator");
5895 if (ShiftName ==
"lsl" || ShiftName ==
"LSL" ||
5896 ShiftName ==
"asl" || ShiftName ==
"ASL")
5898 else if (ShiftName ==
"lsr" || ShiftName ==
"LSR")
5900 else if (ShiftName ==
"asr" || ShiftName ==
"ASR")
5902 else if (ShiftName ==
"ror" || ShiftName ==
"ROR")
5904 else if (ShiftName ==
"rrx" || ShiftName ==
"RRX")
5906 else if (ShiftName ==
"uxtw" || ShiftName ==
"UXTW")
5909 return Error(Loc,
"illegal shift operator");
5924 if (getParser().parseExpression(Expr))
5931 return Error(Loc,
"shift amount must be an immediate");
5932 int64_t
Imm =
CE->getValue();
5936 return Error(Loc,
"immediate shift value out of range");
5977 ARMOperand &TyOp =
static_cast<ARMOperand &
>(*
Operands[2]);
5978 bool isVmovf = TyOp.isToken() &&
5979 (TyOp.getToken() ==
".f32" || TyOp.getToken() ==
".f64" ||
5980 TyOp.getToken() ==
".f16");
5981 ARMOperand &Mnemonic =
static_cast<ARMOperand &
>(*
Operands[0]);
5982 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() ==
"fconstd" ||
5983 Mnemonic.getToken() ==
"fconsts");
5984 if (!(isVmovf || isFconst))
5990 bool isNegative =
false;
6003 Operands.push_back(ARMOperand::CreateImm(
6013 if (Val > 255 || Val < 0)
6014 return Error(Loc,
"encoded floating point value out of range");
6018 Operands.push_back(ARMOperand::CreateImm(
6024 return Error(Loc,
"invalid floating point immediate");
6044 switch (getLexer().getKind()) {
6052 bool ExpectLabel = Mnemonic ==
"b" || Mnemonic ==
"bl";
6054 if (!tryParseRegisterWithWriteBack(
Operands))
6056 int Res = tryParseShiftRegister(
Operands);
6062 if (Mnemonic ==
"vmrs" &&
6066 Operands.push_back(ARMOperand::CreateToken(
"APSR_nzcv", S));
6083 if (getParser().parseExpression(IdVal))
6086 Operands.push_back(ARMOperand::CreateImm(IdVal, S,
E));
6092 bool AllowOutOfBoundReg = Mnemonic ==
"vlldm" || Mnemonic ==
"vlstm";
6094 AllowOutOfBoundReg);
6107 auto AdjacentToken = getLexer().peekTok(
false);
6111 if (!ExpectIdentifier) {
6120 if (getParser().parseExpression(ImmVal))
6124 int32_t Val =
CE->getValue();
6125 if (IsNegative && Val == 0)
6130 Operands.push_back(ARMOperand::CreateImm(ImmVal, S,
E));
6152 if (parsePrefix(RefKind))
6155 const MCExpr *SubExprVal;
6156 if (getParser().parseExpression(SubExprVal))
6162 Operands.push_back(ARMOperand::CreateImm(ExprVal, S,
E));
6167 if (Mnemonic !=
"ldr")
6168 return Error(S,
"unexpected token in operand");
6170 const MCExpr *SubExprVal;
6171 if (getParser().parseExpression(SubExprVal))
6177 Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S,
E));
6183bool ARMAsmParser::parseImmExpr(int64_t &Out) {
6184 const MCExpr *Expr =
nullptr;
6185 SMLoc L = getParser().getTok().getLoc();
6186 if (
check(getParser().parseExpression(Expr), L,
"expected expression"))
6189 if (
check(!
Value, L,
"expected constant expression"))
6191 Out =
Value->getValue();
6220 static const struct PrefixEntry {
6221 const char *Spelling;
6223 uint8_t SupportedFormats;
6224 } PrefixEntries[] = {
6236 llvm::find_if(PrefixEntries, [&IDVal](
const PrefixEntry &PE) {
6237 return PE.Spelling == IDVal;
6239 if (Prefix == std::end(PrefixEntries)) {
6244 uint8_t CurrentFormat;
6245 switch (getContext().getObjectFileType()) {
6247 CurrentFormat = MACHO;
6250 CurrentFormat =
ELF;
6253 CurrentFormat =
COFF;
6256 CurrentFormat = WASM;
6266 if (~
Prefix->SupportedFormats & CurrentFormat) {
6268 "cannot represent relocation in the current file format");
6272 RefKind =
Prefix->VariantKind;
6296 unsigned &ProcessorIMod,
6300 CarrySetting =
false;
6306 if ((Mnemonic ==
"movs" &&
isThumb()) || Mnemonic ==
"teq" ||
6307 Mnemonic ==
"vceq" || Mnemonic ==
"svc" || Mnemonic ==
"mls" ||
6308 Mnemonic ==
"smmls" || Mnemonic ==
"vcls" || Mnemonic ==
"vmls" ||
6309 Mnemonic ==
"vnmls" || Mnemonic ==
"vacge" || Mnemonic ==
"vcge" ||
6310 Mnemonic ==
"vclt" || Mnemonic ==
"vacgt" || Mnemonic ==
"vaclt" ||
6311 Mnemonic ==
"vacle" || Mnemonic ==
"hlt" || Mnemonic ==
"vcgt" ||
6312 Mnemonic ==
"vcle" || Mnemonic ==
"smlal" || Mnemonic ==
"umaal" ||
6313 Mnemonic ==
"umlal" || Mnemonic ==
"vabal" || Mnemonic ==
"vmlal" ||
6314 Mnemonic ==
"vpadal" || Mnemonic ==
"vqdmlal" || Mnemonic ==
"fmuls" ||
6315 Mnemonic ==
"vmaxnm" || Mnemonic ==
"vminnm" || Mnemonic ==
"vcvta" ||
6316 Mnemonic ==
"vcvtn" || Mnemonic ==
"vcvtp" || Mnemonic ==
"vcvtm" ||
6317 Mnemonic ==
"vrinta" || Mnemonic ==
"vrintn" || Mnemonic ==
"vrintp" ||
6318 Mnemonic ==
"vrintm" || Mnemonic ==
"hvc" ||
6319 Mnemonic.
starts_with(
"vsel") || Mnemonic ==
"vins" ||
6320 Mnemonic ==
"vmovx" || Mnemonic ==
"bxns" || Mnemonic ==
"blxns" ||
6321 Mnemonic ==
"vdot" || Mnemonic ==
"vmmla" || Mnemonic ==
"vudot" ||
6322 Mnemonic ==
"vsdot" || Mnemonic ==
"vcmla" || Mnemonic ==
"vcadd" ||
6323 Mnemonic ==
"vfmal" || Mnemonic ==
"vfmsl" || Mnemonic ==
"wls" ||
6324 Mnemonic ==
"le" || Mnemonic ==
"dls" || Mnemonic ==
"csel" ||
6325 Mnemonic ==
"csinc" || Mnemonic ==
"csinv" || Mnemonic ==
"csneg" ||
6326 Mnemonic ==
"cinc" || Mnemonic ==
"cinv" || Mnemonic ==
"cneg" ||
6327 Mnemonic ==
"cset" || Mnemonic ==
"csetm" || Mnemonic ==
"aut" ||
6328 Mnemonic ==
"pac" || Mnemonic ==
"pacbti" || Mnemonic ==
"bti")
6333 if (Mnemonic !=
"adcs" && Mnemonic !=
"bics" && Mnemonic !=
"movs" &&
6334 Mnemonic !=
"muls" && Mnemonic !=
"smlals" && Mnemonic !=
"smulls" &&
6335 Mnemonic !=
"umlals" && Mnemonic !=
"umulls" && Mnemonic !=
"lsls" &&
6336 Mnemonic !=
"sbcs" && Mnemonic !=
"rscs" &&
6338 (Mnemonic ==
"vmine" || Mnemonic ==
"vshle" || Mnemonic ==
"vshlt" ||
6339 Mnemonic ==
"vshllt" || Mnemonic ==
"vrshle" || Mnemonic ==
"vrshlt" ||
6340 Mnemonic ==
"vmvne" || Mnemonic ==
"vorne" || Mnemonic ==
"vnege" ||
6341 Mnemonic ==
"vnegt" || Mnemonic ==
"vmule" || Mnemonic ==
"vmult" ||
6342 Mnemonic ==
"vrintne" || Mnemonic ==
"vcmult" ||
6343 Mnemonic ==
"vcmule" || Mnemonic ==
"vpsele" || Mnemonic ==
"vpselt" ||
6347 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size() - 2);
6355 !(Mnemonic ==
"cps" || Mnemonic ==
"mls" || Mnemonic ==
"mrs" ||
6356 Mnemonic ==
"smmls" || Mnemonic ==
"vabs" || Mnemonic ==
"vcls" ||
6357 Mnemonic ==
"vmls" || Mnemonic ==
"vmrs" || Mnemonic ==
"vnmls" ||
6358 Mnemonic ==
"vqabs" || Mnemonic ==
"vrecps" || Mnemonic ==
"vrsqrts" ||
6359 Mnemonic ==
"srs" || Mnemonic ==
"flds" || Mnemonic ==
"fmrs" ||
6360 Mnemonic ==
"fsqrts" || Mnemonic ==
"fsubs" || Mnemonic ==
"fsts" ||
6361 Mnemonic ==
"fcpys" || Mnemonic ==
"fdivs" || Mnemonic ==
"fmuls" ||
6362 Mnemonic ==
"fcmps" || Mnemonic ==
"fcmpzs" || Mnemonic ==
"vfms" ||
6363 Mnemonic ==
"vfnms" || Mnemonic ==
"fconsts" || Mnemonic ==
"bxns" ||
6364 Mnemonic ==
"blxns" || Mnemonic ==
"vfmas" || Mnemonic ==
"vmlas" ||
6365 (Mnemonic ==
"movs" &&
isThumb()))) {
6366 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size() - 1);
6367 CarrySetting =
true;
6380 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size()-2);
6381 ProcessorIMod =
IMod;
6385 if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic !=
"vmovlt" &&
6386 Mnemonic !=
"vshllt" && Mnemonic !=
"vrshrnt" && Mnemonic !=
"vshrnt" &&
6387 Mnemonic !=
"vqrshrunt" && Mnemonic !=
"vqshrunt" &&
6388 Mnemonic !=
"vqrshrnt" && Mnemonic !=
"vqshrnt" && Mnemonic !=
"vmullt" &&
6389 Mnemonic !=
"vqmovnt" && Mnemonic !=
"vqmovunt" &&
6390 Mnemonic !=
"vqmovnt" && Mnemonic !=
"vmovnt" && Mnemonic !=
"vqdmullt" &&
6391 Mnemonic !=
"vpnot" && Mnemonic !=
"vcvtt" && Mnemonic !=
"vcvt") {
6395 Mnemonic = Mnemonic.
slice(0, Mnemonic.
size()-1);
6403 ITMask = Mnemonic.
slice(2, Mnemonic.
size());
6404 Mnemonic = Mnemonic.
slice(0, 2);
6408 ITMask = Mnemonic.
slice(4, Mnemonic.
size());
6409 Mnemonic = Mnemonic.
slice(0, 4);
6411 ITMask = Mnemonic.
slice(3, Mnemonic.
size());
6412 Mnemonic = Mnemonic.
slice(0, 3);
6422void ARMAsmParser::getMnemonicAcceptInfo(
StringRef Mnemonic,
6425 bool &CanAcceptCarrySet,
6426 bool &CanAcceptPredicationCode,
6427 bool &CanAcceptVPTPredicationCode) {
6428 CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6431 Mnemonic ==
"and" || Mnemonic ==
"lsl" || Mnemonic ==
"lsr" ||
6432 Mnemonic ==
"rrx" || Mnemonic ==
"ror" || Mnemonic ==
"sub" ||
6433 Mnemonic ==
"add" || Mnemonic ==
"adc" || Mnemonic ==
"mul" ||
6434 Mnemonic ==
"bic" || Mnemonic ==
"asr" || Mnemonic ==
"orr" ||
6435 Mnemonic ==
"mvn" || Mnemonic ==
"rsb" || Mnemonic ==
"rsc" ||
6436 Mnemonic ==
"orn" || Mnemonic ==
"sbc" || Mnemonic ==
"eor" ||
6437 Mnemonic ==
"neg" || Mnemonic ==
"vfm" || Mnemonic ==
"vfnm" ||
6439 (Mnemonic ==
"smull" || Mnemonic ==
"mov" || Mnemonic ==
"mla" ||
6440 Mnemonic ==
"smlal" || Mnemonic ==
"umlal" || Mnemonic ==
"umull"));
6442 if (Mnemonic ==
"bkpt" || Mnemonic ==
"cbnz" || Mnemonic ==
"setend" ||
6443 Mnemonic ==
"cps" || Mnemonic ==
"it" || Mnemonic ==
"cbz" ||
6444 Mnemonic ==
"trap" || Mnemonic ==
"hlt" || Mnemonic ==
"udf" ||
6446 Mnemonic.
starts_with(
"vsel") || Mnemonic ==
"vmaxnm" ||
6447 Mnemonic ==
"vminnm" || Mnemonic ==
"vcvta" || Mnemonic ==
"vcvtn" ||
6448 Mnemonic ==
"vcvtp" || Mnemonic ==
"vcvtm" || Mnemonic ==
"vrinta" ||
6449 Mnemonic ==
"vrintn" || Mnemonic ==
"vrintp" || Mnemonic ==
"vrintm" ||
6450 Mnemonic.
starts_with(
"aes") || Mnemonic ==
"hvc" ||
6451 Mnemonic ==
"setpan" || Mnemonic.
starts_with(
"sha1") ||
6454 Mnemonic ==
"vmovx" || Mnemonic ==
"vins" || Mnemonic ==
"vudot" ||
6455 Mnemonic ==
"vsdot" || Mnemonic ==
"vcmla" || Mnemonic ==
"vcadd" ||
6456 Mnemonic ==
"vfmal" || Mnemonic ==
"vfmsl" || Mnemonic ==
"vfmat" ||
6457 Mnemonic ==
"vfmab" || Mnemonic ==
"vdot" || Mnemonic ==
"vmmla" ||
6458 Mnemonic ==
"sb" || Mnemonic ==
"ssbb" || Mnemonic ==
"pssbb" ||
6459 Mnemonic ==
"vsmmla" || Mnemonic ==
"vummla" || Mnemonic ==
"vusmmla" ||
6460 Mnemonic ==
"vusdot" || Mnemonic ==
"vsudot" || Mnemonic ==
"bfcsel" ||
6461 Mnemonic ==
"wls" || Mnemonic ==
"dls" || Mnemonic ==
"le" ||
6462 Mnemonic ==
"csel" || Mnemonic ==
"csinc" || Mnemonic ==
"csinv" ||
6463 Mnemonic ==
"csneg" || Mnemonic ==
"cinc" || Mnemonic ==
"cinv" ||
6464 Mnemonic ==
"cneg" || Mnemonic ==
"cset" || Mnemonic ==
"csetm" ||
6465 (hasCDE() && MS.isCDEInstr(Mnemonic) &&
6466 !MS.isITPredicableCDEInstr(Mnemonic)) ||
6468 Mnemonic ==
"pac" || Mnemonic ==
"pacbti" || Mnemonic ==
"aut" ||
6469 Mnemonic ==
"bti" ||
6476 CanAcceptPredicationCode =
false;
6479 CanAcceptPredicationCode =
6480 Mnemonic !=
"cdp2" && Mnemonic !=
"clrex" && Mnemonic !=
"mcr2" &&
6481 Mnemonic !=
"mcrr2" && Mnemonic !=
"mrc2" && Mnemonic !=
"mrrc2" &&
6482 Mnemonic !=
"dmb" && Mnemonic !=
"dfb" && Mnemonic !=
"dsb" &&
6483 Mnemonic !=
"isb" && Mnemonic !=
"pld" && Mnemonic !=
"pli" &&
6484 Mnemonic !=
"pldw" && Mnemonic !=
"ldc2" && Mnemonic !=
"ldc2l" &&
6485 Mnemonic !=
"stc2" && Mnemonic !=
"stc2l" && Mnemonic !=
"tsb" &&
6487 }
else if (isThumbOne()) {
6489 CanAcceptPredicationCode = Mnemonic !=
"movs";
6491 CanAcceptPredicationCode = Mnemonic !=
"nop" && Mnemonic !=
"movs";
6493 CanAcceptPredicationCode =
true;
6500void ARMAsmParser::tryConvertingToTwoOperandForm(
StringRef Mnemonic,
6506 const auto &Op3 =
static_cast<ARMOperand &
>(*
Operands[3]);
6507 auto &Op4 =
static_cast<ARMOperand &
>(*
Operands[4]);
6508 if (!Op3.isReg() || !Op4.isReg())
6511 auto Op3Reg = Op3.getReg();
6512 auto Op4Reg = Op4.getReg();
6518 auto &Op5 =
static_cast<ARMOperand &
>(*
Operands[5]);
6520 if (Mnemonic !=
"add")
6522 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6523 (Op5.isReg() && Op5.getReg() == ARM::PC);
6524 if (!TryTransform) {
6525 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6526 (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6527 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6528 Op5.isImm() && !Op5.isImm0_508s4());
6532 }
else if (!isThumbOne())
6535 if (!(Mnemonic ==
"add" || Mnemonic ==
"sub" || Mnemonic ==
"and" ||
6536 Mnemonic ==
"eor" || Mnemonic ==
"lsl" || Mnemonic ==
"lsr" ||
6537 Mnemonic ==
"asr" || Mnemonic ==
"adc" || Mnemonic ==
"sbc" ||
6538 Mnemonic ==
"ror" || Mnemonic ==
"orr" || Mnemonic ==
"bic"))
6544 bool Transform = Op3Reg == Op4Reg;
6549 const ARMOperand *LastOp = &Op5;
6551 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6552 ((Mnemonic ==
"add" && Op4Reg != ARM::SP) ||
6553 Mnemonic ==
"and" || Mnemonic ==
"eor" ||
6554 Mnemonic ==
"adc" || Mnemonic ==
"orr")) {
6565 if (((Mnemonic ==
"add" && CarrySetting) || Mnemonic ==
"sub") &&
6571 if ((Mnemonic ==
"add" || Mnemonic ==
"sub") && LastOp->isImm0_7())
6585 ARMOperand &
Op =
static_cast<ARMOperand &
>(MCOp);
6591 const MCExpr *
E = dyn_cast<MCExpr>(
Op.getImm());
6594 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(
E);
6603bool ARMAsmParser::shouldOmitCCOutOperand(
StringRef Mnemonic,
6617 !
static_cast<ARMOperand &
>(*
Operands[4]).isModImm() &&
6618 static_cast<ARMOperand &
>(*
Operands[4]).isImm0_65535Expr() &&
6629 static_cast<ARMOperand &
>(*
Operands[3]).isReg() &&
6630 static_cast<ARMOperand &
>(*
Operands[4]).isReg() &&
6631 static_cast<ARMOperand &
>(*
Operands[1]).getReg() == 0)
6637 if (((
isThumb() && Mnemonic ==
"add") ||
6638 (isThumbTwo() && Mnemonic ==
"sub")) &&
6640 static_cast<ARMOperand &
>(*
Operands[4]).isReg() &&
6641 static_cast<ARMOperand &
>(*
Operands[4]).getReg() == ARM::SP &&
6642 static_cast<ARMOperand &
>(*
Operands[1]).getReg() == 0 &&
6643 ((Mnemonic ==
"add" &&
static_cast<ARMOperand &
>(*
Operands[5]).isReg()) ||
6644 static_cast<ARMOperand &
>(*
Operands[5]).isImm0_1020s4()))
6651 if (isThumbTwo() && (Mnemonic ==
"add" || Mnemonic ==
"sub") &&
6653 static_cast<ARMOperand &
>(*
Operands[4]).isReg() &&
6654 static_cast<ARMOperand &
>(*
Operands[5]).isImm()) {
6662 static_cast<ARMOperand &
>(*
Operands[5]).isImm0_7())
6666 if (
static_cast<ARMOperand &
>(*
Operands[4]).
getReg() != ARM::PC &&
6667 (
static_cast<ARMOperand &
>(*
Operands[5]).isT2SOImm() ||
6668 static_cast<ARMOperand &
>(*
Operands[5]).isT2SOImmNeg()))
6679 if (isThumbTwo() && Mnemonic ==
"mul" &&
Operands.size() == 6 &&
6680 static_cast<ARMOperand &
>(*
Operands[1]).getReg() == 0 &&
6681 static_cast<ARMOperand &
>(*
Operands[3]).isReg() &&
6682 static_cast<ARMOperand &
>(*
Operands[4]).isReg() &&
6683 static_cast<ARMOperand &
>(*
Operands[5]).isReg() &&
6691 !inITBlock() || (
static_cast<ARMOperand &
>(*
Operands[3]).
getReg() !=
6699 if (isThumbTwo() && Mnemonic ==
"mul" &&
Operands.size() == 5 &&
6700 static_cast<ARMOperand &
>(*
Operands[1]).getReg() == 0 &&
6701 static_cast<ARMOperand &
>(*
Operands[3]).isReg() &&
6702 static_cast<ARMOperand &
>(*
Operands[4]).isReg() &&
6716 if (
isThumb() && (Mnemonic ==
"add" || Mnemonic ==
"sub") &&
6718 static_cast<ARMOperand &
>(*
Operands[3]).isReg() &&
6719 static_cast<ARMOperand &
>(*
Operands[3]).getReg() == ARM::SP &&
6720 static_cast<ARMOperand &
>(*
Operands[1]).getReg() == 0 &&
6721 (
static_cast<ARMOperand &
>(*
Operands[4]).isImm() ||
6723 static_cast<ARMOperand &
>(*
Operands[5]).isImm()))) {
6725 return (!(isThumbTwo() &&
6726 (
static_cast<ARMOperand &
>(*
Operands[4]).isT2SOImm() ||
6727 static_cast<ARMOperand &
>(*
Operands[4]).isT2SOImmNeg())));
6732 if (isThumbTwo() && (Mnemonic ==
"add" || Mnemonic ==
"sub") &&
6734 static_cast<ARMOperand &
>(*
Operands[3]).isReg() &&
6735 static_cast<ARMOperand &
>(*
Operands[3]).getReg() != ARM::SP &&
6736 static_cast<ARMOperand &
>(*
Operands[3]).getReg() != ARM::PC &&
6737 static_cast<ARMOperand &
>(*
Operands[1]).getReg() == 0 &&
6738 static_cast<ARMOperand &
>(*
Operands[4]).isImm()) {
6739 const ARMOperand &IMM =
static_cast<ARMOperand &
>(*
Operands[4]);
6740 if (IMM.isT2SOImm() || IMM.isT2SOImmNeg())
6742 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IMM.getImm())) {
6743 const int64_t
Value =
CE->getValue();
6745 if ((
Value < ((1 << 7) - 1) << 2) && inITBlock() && (!(
Value & 3)) &&
6754bool ARMAsmParser::shouldOmitPredicateOperand(
StringRef Mnemonic,
6757 unsigned RegIdx = 3;
6758 if ((((Mnemonic ==
"vrintz" || Mnemonic ==
"vrintx") && !hasMVE()) ||
6759 Mnemonic ==
"vrintr") &&
6760 (
static_cast<ARMOperand &
>(*
Operands[2]).getToken() ==
".f32" ||
6761 static_cast<ARMOperand &
>(*
Operands[2]).getToken() ==
".f16")) {
6762 if (
static_cast<ARMOperand &
>(*
Operands[3]).isToken() &&
6763 (
static_cast<ARMOperand &
>(*
Operands[3]).getToken() ==
".f32" ||
6764 static_cast<ARMOperand &
>(*
Operands[3]).getToken() ==
".f16"))
6768 (ARMMCRegisterClasses[ARM::DPRRegClassID].
contains(
6770 ARMMCRegisterClasses[ARM::QPRRegClassID].
contains(
6777bool ARMAsmParser::shouldOmitVectorPredicateOperand(
StringRef Mnemonic,
6779 if (!hasMVE() ||
Operands.size() < 3)
6793 if (
static_cast<ARMOperand &
>(*Operand).isVectorIndex() ||
6794 ((*Operand).isReg() &&
6795 (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
6796 (*Operand).getReg()) ||
6797 ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6798 (*Operand).getReg())))) {
6808 if (
static_cast<ARMOperand &
>(*Operand).isVectorIndex() ||
6809 (Operand->isReg() &&
6810 (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6811 Operand->getReg()))))
6819 return Tok ==
".8" || Tok ==
".16" || Tok ==
".32" || Tok ==
".64" ||
6820 Tok ==
".i8" || Tok ==
".i16" || Tok ==
".i32" || Tok ==
".i64" ||
6821 Tok ==
".u8" || Tok ==
".u16" || Tok ==
".u32" || Tok ==
".u64" ||
6822 Tok ==
".s8" || Tok ==
".s16" || Tok ==
".s32" || Tok ==
".s64" ||
6823 Tok ==
".p8" || Tok ==
".p16" || Tok ==
".f32" || Tok ==
".f64" ||
6824 Tok ==
".f" || Tok ==
".d";
6836 unsigned VariantID);
6846void ARMAsmParser::fixupGNULDRDAlias(
StringRef Mnemonic,
6848 if (Mnemonic !=
"ldrd" && Mnemonic !=
"strd")
6853 ARMOperand &Op2 =
static_cast<ARMOperand &
>(*
Operands[2]);
6854 ARMOperand &Op3 =
static_cast<ARMOperand &
>(*
Operands[3]);
6858 if (!Op3.isGPRMem())
6865 unsigned RtEncoding =
MRI->getEncodingValue(Op2.getReg());
6866 if (!
isThumb() && (RtEncoding & 1)) {
6871 if (Op2.getReg() == ARM::PC)
6873 unsigned PairedReg = GPR.
getRegister(RtEncoding + 1);
6874 if (!PairedReg || PairedReg == ARM::PC ||
6875 (PairedReg == ARM::SP && !hasV8Ops()))
6880 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6888bool ARMAsmParser::CDEConvertDualRegOperand(
StringRef Mnemonic,
6890 assert(MS.isCDEDualRegInstr(Mnemonic));
6892 Mnemonic ==
"cx1da" || Mnemonic ==
"cx2da" || Mnemonic ==
"cx3da";
6893 size_t NumPredOps = isPredicable ? 1 : 0;
6895 if (
Operands.size() <= 3 + NumPredOps)
6899 "operand must be an even-numbered register in the range [r0, r10]");
6932 RPair = ARM::R10_R11;
6956 const FeatureBitset &AvailableFeatures = getAvailableFeatures();
6957 unsigned AssemblerDialect = getParser().getAssemblerDialect();
6963 parseDirectiveReq(
Name, NameLoc);
6970 size_t Start = 0, Next =
Name.find(
'.');
6977 unsigned ProcessorIMod;
6980 Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
6981 CarrySetting, ProcessorIMod, ITMask);
6984 if (isThumbOne() && PredicationCode !=
ARMCC::AL && Mnemonic !=
"b") {
6985 return Error(NameLoc,
"conditional execution not supported in Thumb1");
6988 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
7001 if (Mnemonic ==
"it" || Mnemonic.
starts_with(
"vpt") ||
7004 Mnemonic ==
"vpt" ?
SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
7005 SMLoc::getFromPointer(NameLoc.getPointer() + 4);
7006 if (ITMask.
size() > 3) {
7007 if (Mnemonic ==
"it")
7008 return Error(Loc,
"too many conditions on IT instruction");
7009 return Error(Loc,
"too many conditions on VPT instruction");
7013 if (Pos !=
't' && Pos !=
'e') {
7014 return Error(Loc,
"illegal IT block condition mask '" + ITMask +
"'");
7020 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
7033 bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
7034 getMnemonicAcceptInfo(Mnemonic, ExtraToken,
Name, CanAcceptCarrySet,
7035 CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
7039 if (!CanAcceptCarrySet && CarrySetting) {
7040 return Error(NameLoc,
"instruction '" + Mnemonic +
7041 "' can not set flags, but 's' suffix specified");
7045 if (!CanAcceptPredicationCode && PredicationCode !=
ARMCC::AL) {
7046 return Error(NameLoc,
"instruction '" + Mnemonic +
7047 "' is not predicable, but condition code specified");
7052 if (!CanAcceptVPTPredicationCode && VPTPredicationCode !=
ARMVCC::None) {
7053 return Error(NameLoc,
"instruction '" + Mnemonic +
7054 "' is not VPT predicable, but VPT code T/E is specified");
7058 if (CanAcceptCarrySet) {
7060 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
7065 if (CanAcceptPredicationCode) {
7068 Operands.push_back(ARMOperand::CreateCondCode(
7079 if (CanAcceptVPTPredicationCode && Mnemonic !=
"vmov" &&
7081 !(Mnemonic.
starts_with(
"vcvt") && Mnemonic !=
"vcvta" &&
7082 Mnemonic !=
"vcvtn" && Mnemonic !=
"vcvtp" && Mnemonic !=
"vcvtm")) {
7085 Operands.push_back(ARMOperand::CreateVPTPred(
7090 if (ProcessorIMod) {
7091 Operands.push_back(ARMOperand::CreateImm(
7094 }
else if (Mnemonic ==
"cps" && isMClass()) {
7095 return Error(NameLoc,
"instruction 'cps' requires effect for M-class");
7101 Next =
Name.find(
'.', Start + 1);
7102 ExtraToken =
Name.slice(Start, Next);
7111 if (ExtraToken ==
".n" && !
isThumb()) {
7113 return Error(Loc,
"instruction with .n (narrow) qualifier not allowed in "
7120 if (ExtraToken !=
".n" && (
isThumb() || ExtraToken !=
".w")) {
7122 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
7129 if (parseOperand(
Operands, Mnemonic)) {
7135 if (parseOperand(
Operands, Mnemonic)) {
7144 tryConvertingToTwoOperandForm(Mnemonic, CarrySetting,
Operands);
7146 if (hasCDE() && MS.isCDEInstr(Mnemonic)) {
7154 if (MS.isCDEDualRegInstr(Mnemonic)) {
7155 bool GotError = CDEConvertDualRegOperand(Mnemonic,
Operands);
7168 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic,
Operands))
7176 shouldOmitPredicateOperand(Mnemonic,
Operands))
7181 if (!shouldOmitVectorPredicateOperand(Mnemonic,
Operands) &&
7182 Mnemonic ==
"vmov" && PredicationCode ==
ARMCC::LT) {
7190 Mnemonic.
size() - 1 + CarrySetting);
7194 ARMOperand::CreateToken(
StringRef(
"vmovlt"), MLoc));
7195 }
else if (Mnemonic ==
"vcvt" && PredicationCode ==
ARMCC::NE &&
7196 !shouldOmitVectorPredicateOperand(Mnemonic,
Operands)) {
7205 Mnemonic.
size() - 1 + CarrySetting);
7209 ARMOperand::CreateToken(
StringRef(
"vcvtn"), MLoc));
7210 }
else if (Mnemonic ==
"vmul" && PredicationCode ==
ARMCC::LT &&
7211 !shouldOmitVectorPredicateOperand(Mnemonic,
Operands)) {
7219 ARMOperand::CreateToken(
StringRef(
"vmullt"), MLoc));
7226 else if (Mnemonic ==
"vmov" || Mnemonic.
starts_with(
"vcmp") ||
7231 if (!shouldOmitVectorPredicateOperand(Mnemonic,
Operands)) {
7239 auto Sz1 =
static_cast<ARMOperand &
>(*
Operands[2]);
7240 auto Sz2 =
static_cast<ARMOperand &
>(*
Operands[3]);
7241 if (!(Sz1.isToken() && Sz1.getToken().starts_with(
".f") &&
7242 Sz2.isToken() && Sz2.getToken().starts_with(
".f"))) {
7247 Mnemonic = Mnemonic.
substr(0, 4);
7249 ARMOperand::CreateToken(Mnemonic, MLoc));
7254 Mnemonic.
size() + CarrySetting);
7256 ARMOperand::CreateVPTPred(
7259 }
else if (CanAcceptVPTPredicationCode) {
7263 if (shouldOmitVectorPredicateOperand(Mnemonic,
Operands)) {
7264 if (CanAcceptPredicationCode)
7268 }
else if (CanAcceptPredicationCode && PredicationCode ==
ARMCC::AL) {
7275 bool usedVPTPredicationCode =
false;
7277 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isVPTPred())
7278 usedVPTPredicationCode =
true;
7279 if (!usedVPTPredicationCode) {
7287 Mnemonic =
Name.slice(0, Mnemonic.
size() + 1);
7290 ARMOperand::CreateToken(Mnemonic, NameLoc));
7300 static_cast<ARMOperand &
>(*
Operands[2]).isImm())
7311 (Mnemonic ==
"ldrexd" || Mnemonic ==
"strexd" || Mnemonic ==
"ldaexd" ||
7312 Mnemonic ==
"stlexd")) {
7313 bool isLoad = (Mnemonic ==
"ldrexd" || Mnemonic ==
"ldaexd");
7315 ARMOperand &Op1 =
static_cast<ARMOperand &
>(*
Operands[
Idx]);
7316 ARMOperand &Op2 =
static_cast<ARMOperand &
>(*
Operands[
Idx + 1]);
7320 if (Op1.isReg() && Op2.isReg() && MRC.
contains(Op1.getReg()) &&
7322 unsigned Reg1 = Op1.getReg();
7323 unsigned Reg2 = Op2.getReg();
7324 unsigned Rt =
MRI->getEncodingValue(Reg1);
7325 unsigned Rt2 =
MRI->getEncodingValue(Reg2);
7328 if (Rt + 1 != Rt2 || (Rt & 1)) {
7329 return Error(Op2.getStartLoc(),
7330 isLoad ?
"destination operands must be sequential"
7331 :
"source operands must be sequential");
7333 unsigned NewReg =
MRI->getMatchingSuperReg(
7334 Reg1, ARM::gsub_0, &(
MRI->getRegClass(ARM::GPRPairRegClassID)));
7336 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
7342 fixupGNULDRDAlias(Mnemonic,
Operands);
7349 if (isThumbTwo() && Mnemonic ==
"sub" &&
Operands.size() == 6 &&
7350 static_cast<ARMOperand &
>(*
Operands[3]).isReg() &&
7351 static_cast<ARMOperand &
>(*
Operands[3]).getReg() == ARM::PC &&
7352 static_cast<ARMOperand &
>(*
Operands[4]).isReg() &&
7353 static_cast<ARMOperand &
>(*
Operands[4]).getReg() == ARM::LR &&
7354 static_cast<ARMOperand &
>(*
Operands[5]).isImm()) {
7355 Operands.front() = ARMOperand::CreateToken(
Name, NameLoc);
7367 unsigned Reg,
unsigned HiReg,
7368 bool &containsReg) {
7369 containsReg =
false;
7395 return Inst.
getOpcode() == ARM::tBKPT ||
7401bool ARMAsmParser::validatetLDMRegList(
const MCInst &Inst,
7403 unsigned ListNo,
bool IsARPop) {
7404 const ARMOperand &
Op =
static_cast<const ARMOperand &
>(*
Operands[ListNo]);
7405 bool HasWritebackToken =
Op.isToken() &&
Op.getToken() ==
"!";
7411 if (!IsARPop && ListContainsSP)
7412 return Error(
Operands[ListNo + HasWritebackToken]->getStartLoc(),
7413 "SP may not be in the register list");
7414 else if (ListContainsPC && ListContainsLR)
7415 return Error(
Operands[ListNo + HasWritebackToken]->getStartLoc(),
7416 "PC and LR may not be in the register list simultaneously");
7420bool ARMAsmParser::validatetSTMRegList(
const MCInst &Inst,
7423 const ARMOperand &
Op =
static_cast<const ARMOperand &
>(*
Operands[ListNo]);
7424 bool HasWritebackToken =
Op.isToken() &&
Op.getToken() ==
"!";
7429 if (ListContainsSP && ListContainsPC)
7430 return Error(
Operands[ListNo + HasWritebackToken]->getStartLoc(),
7431 "SP and PC may not be in the register list");
7432 else if (ListContainsSP)
7433 return Error(
Operands[ListNo + HasWritebackToken]->getStartLoc(),
7434 "SP may not be in the register list");
7435 else if (ListContainsPC)
7436 return Error(
Operands[ListNo + HasWritebackToken]->getStartLoc(),
7437 "PC may not be in the register list");
7441bool ARMAsmParser::validateLDRDSTRD(
MCInst &Inst,
7443 bool Load,
bool ARMMode,
bool Writeback) {
7444 unsigned RtIndex =
Load || !Writeback ? 0 : 1;
7457 "Rt must be even-numbered");
7460 if (Rt2 != Rt + 1) {
7463 "destination operands must be sequential");
7466 "source operands must be sequential");
7473 if (!ARMMode && Load) {
7476 "destination operands can't be identical");
7482 if (Rn == Rt || Rn == Rt2) {
7485 "base register needs to be different from destination "
7489 "source register and base register can't be identical");
7512 ARMOperand &
Op =
static_cast<ARMOperand &
>(MCOp);
7518 const MCExpr *
E = dyn_cast<MCExpr>(
Op.getImm());
7525bool ARMAsmParser::validateInstruction(
MCInst &Inst,
7536 return Error(Loc,
"instructions in IT block must be predicable");
7539 if (
Cond != currentITCond()) {
7543 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isCondCode())
7545 return Error(CondLoc,
"incorrect condition in IT block; got '" +
7547 "', but expected '" +
7556 return Error(Loc,
"predicated instructions must be in IT block");
7560 return Warning(Loc,
"predicated instructions should be in IT block");
7567 if (MCID.
operands()[i].isPredicate()) {
7569 return Error(Loc,
"instruction is not predicable");
7577 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
7578 return Error(Loc,
"instruction must be outside of IT block or the last instruction in an IT block");
7582 unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
7584 return Error(Loc,
"instruction in VPT block must be predicable");
7587 if (Pred != VPTPred) {
7590 if (
static_cast<ARMOperand &
>(*
Operands[
I]).isVPTPred())
7592 return Error(PredLoc,
"incorrect predication in VPT block; got '" +
7594 "', but expected '" +
7601 return Error(Loc,
"VPT predicated instructions must be in VPT block");
7603 const unsigned Opcode = Inst.
getOpcode();
7608 case ARM::VLSTM_T2: {
7612 ARMOperand &
Op =
static_cast<ARMOperand &
>(
7615 auto &RegList =
Op.getRegList();
7617 if (RegList.size() == 32 && !hasV8_1MMainline()) {
7618 return Error(
Op.getEndLoc(),
"T2 version requires v8.1-M.Main");
7621 if (hasD32() && RegList.size() != 32) {
7622 return Error(
Op.getEndLoc(),
"operand must be exactly {d0-d31}");
7625 if (!hasD32() && (RegList.size() != 16 && RegList.size() != 32)) {
7627 "operand must be exactly {d0-d15} (T1) or {d0-d31} (T2)");
7643 return Error(Loc,
"unpredictable IT predicate sequence");
7647 if (validateLDRDSTRD(Inst,
Operands,
true,
true,
7652 case ARM::LDRD_POST:
7653 if (validateLDRDSTRD(Inst,
Operands,
true,
true,
7658 if (validateLDRDSTRD(Inst,
Operands,
true,
false,
7662 case ARM::t2LDRD_PRE:
7663 case ARM::t2LDRD_POST:
7664 if (validateLDRDSTRD(Inst,
Operands,
true,
false,
7671 if (RmReg == ARM::SP && !hasV8Ops())
7673 "r13 (SP) is an unpredictable operand to BXJ");
7677 if (validateLDRDSTRD(Inst,
Operands,
false,
true,
7682 case ARM::STRD_POST:
7683 if (validateLDRDSTRD(Inst,
Operands,
false,
true,
7687 case ARM::t2STRD_PRE:
7688 case ARM::t2STRD_POST:
7689 if (validateLDRDSTRD(Inst,
Operands,
false,
false,
7693 case ARM::STR_PRE_IMM:
7694 case ARM::STR_PRE_REG:
7695 case ARM::t2STR_PRE:
7696 case ARM::STR_POST_IMM:
7697 case ARM::STR_POST_REG:
7698 case ARM::t2STR_POST:
7700 case ARM::t2STRH_PRE:
7701 case ARM::STRH_POST:
7702 case ARM::t2STRH_POST:
7703 case ARM::STRB_PRE_IMM:
7704 case ARM::STRB_PRE_REG:
7705 case ARM::t2STRB_PRE:
7706 case ARM::STRB_POST_IMM:
7707 case ARM::STRB_POST_REG:
7708 case ARM::t2STRB_POST: {
7715 "source register and base register can't be identical");
7718 case ARM::t2LDR_PRE_imm:
7719 case ARM::t2LDR_POST_imm:
7720 case ARM::t2STR_PRE_imm:
7721 case ARM::t2STR_POST_imm: {
7728 "destination register and base register can't be identical");
7729 if (Inst.
getOpcode() == ARM::t2LDR_POST_imm ||
7730 Inst.
getOpcode() == ARM::t2STR_POST_imm) {
7732 if (Imm > 255 || Imm < -255)
7734 "operand must be in range [-255, 255]");
7736 if (Inst.
getOpcode() == ARM::t2STR_PRE_imm ||
7737 Inst.
getOpcode() == ARM::t2STR_POST_imm) {
7740 "operand must be a register in range [r0, r14]");
7746 case ARM::t2LDRB_OFFSET_imm:
7747 case ARM::t2LDRB_PRE_imm:
7748 case ARM::t2LDRB_POST_imm:
7749 case ARM::t2STRB_OFFSET_imm:
7750 case ARM::t2STRB_PRE_imm:
7751 case ARM::t2STRB_POST_imm: {
7752 if (Inst.
getOpcode() == ARM::t2LDRB_POST_imm ||
7753 Inst.
getOpcode() == ARM::t2STRB_POST_imm ||
7754 Inst.
getOpcode() == ARM::t2LDRB_PRE_imm ||
7755 Inst.
getOpcode() == ARM::t2STRB_PRE_imm) {
7757 if (Imm > 255 || Imm < -255)
7759 "operand must be in range [-255, 255]");
7760 }
else if (Inst.
getOpcode() == ARM::t2LDRB_OFFSET_imm ||
7761 Inst.
getOpcode() == ARM::t2STRB_OFFSET_imm) {
7763 if (Imm > 0 || Imm < -255)
7765 "operand must be in range [0, 255] with a negative sign");
7769 "if operand is PC, should call the LDRB (literal)");
7774 case ARM::t2LDRH_OFFSET_imm:
7775 case ARM::t2LDRH_PRE_imm:
7776 case ARM::t2LDRH_POST_imm:
7777 case ARM::t2STRH_OFFSET_imm:
7778 case ARM::t2STRH_PRE_imm:
7779 case ARM::t2STRH_POST_imm: {
7780 if (Inst.
getOpcode() == ARM::t2LDRH_POST_imm ||
7781 Inst.
getOpcode() == ARM::t2STRH_POST_imm ||
7782 Inst.
getOpcode() == ARM::t2LDRH_PRE_imm ||
7783 Inst.
getOpcode() == ARM::t2STRH_PRE_imm) {
7785 if (Imm > 255 || Imm < -255)
7787 "operand must be in range [-255, 255]");
7788 }
else if (Inst.
getOpcode() == ARM::t2LDRH_OFFSET_imm ||
7789 Inst.
getOpcode() == ARM::t2STRH_OFFSET_imm) {
7791 if (Imm > 0 || Imm < -255)
7793 "operand must be in range [0, 255] with a negative sign");
7797 "if operand is PC, should call the LDRH (literal)");
7802 case ARM::t2LDRSB_OFFSET_imm:
7803 case ARM::t2LDRSB_PRE_imm:
7804 case ARM::t2LDRSB_POST_imm: {
7805 if (Inst.
getOpcode() == ARM::t2LDRSB_POST_imm ||
7806 Inst.
getOpcode() == ARM::t2LDRSB_PRE_imm) {
7808 if (Imm > 255 || Imm < -255)
7810 "operand must be in range [-255, 255]");
7811 }
else if (Inst.
getOpcode() == ARM::t2LDRSB_OFFSET_imm) {
7813 if (Imm > 0 || Imm < -255)
7815 "operand must be in range [0, 255] with a negative sign");
7819 "if operand is PC, should call the LDRH (literal)");
7824 case ARM::t2LDRSH_OFFSET_imm:
7825 case ARM::t2LDRSH_PRE_imm:
7826 case ARM::t2LDRSH_POST_imm: {
7827 if (Inst.
getOpcode() == ARM::t2LDRSH_POST_imm ||
7828 Inst.
getOpcode() == ARM::t2LDRSH_PRE_imm) {
7830 if (Imm > 255 || Imm < -255)
7832 "operand must be in range [-255, 255]");
7833 }
else if (Inst.
getOpcode() == ARM::t2LDRSH_OFFSET_imm) {
7835 if (Imm > 0 || Imm < -255)
7837 "operand must be in range [0, 255] with a negative sign");
7841 "if operand is PC, should call the LDRH (literal)");
7846 case ARM::LDR_PRE_IMM:
7847 case ARM::LDR_PRE_REG:
7848 case ARM::t2LDR_PRE:
7849 case ARM::LDR_POST_IMM:
7850 case ARM::LDR_POST_REG:
7851 case ARM::t2LDR_POST:
7853 case ARM::t2LDRH_PRE:
7854 case ARM::LDRH_POST:
7855 case ARM::t2LDRH_POST:
7856 case ARM::LDRSH_PRE:
7857 case ARM::t2LDRSH_PRE:
7858 case ARM::LDRSH_POST:
7859 case ARM::t2LDRSH_POST:
7860 case ARM::LDRB_PRE_IMM:
7861 case ARM::LDRB_PRE_REG:
7862 case ARM::t2LDRB_PRE:
7863 case ARM::LDRB_POST_IMM:
7864 case ARM::LDRB_POST_REG:
7865 case ARM::t2LDRB_POST:
7866 case ARM::LDRSB_PRE:
7867 case ARM::t2LDRSB_PRE:
7868 case ARM::LDRSB_POST:
7869 case ARM::t2LDRSB_POST: {
7876 "destination register and base register can't be identical");
7880 case ARM::MVE_VLDRBU8_rq:
7881 case ARM::MVE_VLDRBU16_rq:
7882 case ARM::MVE_VLDRBS16_rq:
7883 case ARM::MVE_VLDRBU32_rq:
7884 case ARM::MVE_VLDRBS32_rq:
7885 case ARM::MVE_VLDRHU16_rq:
7886 case ARM::MVE_VLDRHU16_rq_u:
7887 case ARM::MVE_VLDRHU32_rq:
7888 case ARM::MVE_VLDRHU32_rq_u:
7889 case ARM::MVE_VLDRHS32_rq:
7890 case ARM::MVE_VLDRHS32_rq_u:
7891 case ARM::MVE_VLDRWU32_rq:
7892 case ARM::MVE_VLDRWU32_rq_u:
7893 case ARM::MVE_VLDRDU64_rq:
7894 case ARM::MVE_VLDRDU64_rq_u:
7895 case ARM::MVE_VLDRWU32_qi:
7896 case ARM::MVE_VLDRWU32_qi_pre:
7897 case ARM::MVE_VLDRDU64_qi:
7898 case ARM::MVE_VLDRDU64_qi_pre: {
7900 unsigned QdIdx = 0, QmIdx = 2;
7901 bool QmIsPointer =
false;
7903 case ARM::MVE_VLDRWU32_qi:
7904 case ARM::MVE_VLDRDU64_qi:
7908 case ARM::MVE_VLDRWU32_qi_pre:
7909 case ARM::MVE_VLDRDU64_qi_pre:
7920 Twine(
"destination vector register and vector ") +
7921 (QmIsPointer ?
"pointer" :
"offset") +
7922 " register can't be identical");
7934 if (Widthm1 >= 32 - LSB)
7936 "bitfield width must be in range [1,32-lsb]");
7948 bool HasWritebackToken =
7949 (
static_cast<ARMOperand &
>(*
Operands[3]).isToken() &&
7950 static_cast<ARMOperand &
>(*
Operands[3]).getToken() ==
"!");
7951 bool ListContainsBase;
7953 return Error(
Operands[3 + HasWritebackToken]->getStartLoc(),
7954 "registers must be in range r0-r7");
7956 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
7958 "writeback operator '!' expected");
7961 if (ListContainsBase && HasWritebackToken)
7963 "writeback operator '!' not allowed when base register "
7964 "in register list");
7966 if (validatetLDMRegList(Inst,
Operands, 3))
7970 case ARM::LDMIA_UPD:
7971 case ARM::LDMDB_UPD:
7972 case ARM::LDMIB_UPD:
7973 case ARM::LDMDA_UPD:
7980 "writeback register not allowed in register list");
7984 if (validatetLDMRegList(Inst,
Operands, 3))
7989 if (validatetSTMRegList(Inst,
Operands, 3))
7992 case ARM::t2LDMIA_UPD:
7993 case ARM::t2LDMDB_UPD:
7994 case ARM::t2STMIA_UPD:
7995 case ARM::t2STMDB_UPD:
7998 "writeback register not allowed in register list");
8000 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
8001 if (validatetLDMRegList(Inst,
Operands, 3))
8004 if (validatetSTMRegList(Inst,
Operands, 3))
8009 case ARM::sysLDMIA_UPD:
8010 case ARM::sysLDMDA_UPD:
8011 case ARM::sysLDMDB_UPD:
8012 case ARM::sysLDMIB_UPD:
8015 "writeback register only allowed on system LDM "
8016 "if PC in register-list");
8018 case ARM::sysSTMIA_UPD:
8019 case ARM::sysSTMDA_UPD:
8020 case ARM::sysSTMDB_UPD:
8021 case ARM::sysSTMIB_UPD:
8023 "system STM cannot have writeback register");
8034 ((ARMOperand &)*
Operands[5]).getReg()) &&
8035 (((ARMOperand &)*
Operands[3]).getReg() !=
8036 ((ARMOperand &)*
Operands[4]).getReg())) {
8038 "destination register must match source register");
8046 bool ListContainsBase;
8050 "registers must be in range r0-r7 or pc");
8051 if (validatetLDMRegList(Inst,
Operands, 2, !isMClass()))
8056 bool ListContainsBase;
8060 "registers must be in range r0-r7 or lr");
8061 if (validatetSTMRegList(Inst,
Operands, 2))
8065 case ARM::tSTMIA_UPD: {
8066 bool ListContainsBase, InvalidLowList;
8068 0, ListContainsBase);
8069 if (InvalidLowList && !isThumbTwo())
8071 "registers must be in range r0-r7");
8075 if (InvalidLowList && ListContainsBase)
8077 "writeback operator '!' not allowed when base register "
8078 "in register list");
8080 if (validatetSTMRegList(Inst,
Operands, 4))
8087 if (!isThumbTwo() &&
8090 "source register must be the same as destination");
8101 "source register must be sp if destination is sp");
8106 if (!(
static_cast<ARMOperand &
>(*
Operands[2])).isSignedOffset<11, 1>())
8107 return Error(
Operands[2]->getStartLoc(),
"branch target out of range");
8111 ARMOperand &Operand =
static_cast<ARMOperand &
>(*
Operands[
op]);
8113 if (!isa<MCBinaryExpr>(Operand.getImm()) &&
8114 !Operand.isSignedOffset<24, 1>())
8115 return Error(
Operands[
op]->getStartLoc(),
"branch target out of range");
8120 if (!
static_cast<ARMOperand &
>(*
Operands[2]).isSignedOffset<8, 1>())
8121 return Error(
Operands[2]->getStartLoc(),
"branch target out of range");
8125 if (!
static_cast<ARMOperand &
>(*
Operands[
Op]).isSignedOffset<20, 1>())
8126 return Error(
Operands[
Op]->getStartLoc(),
"branch target out of range");
8131 if (!
static_cast<ARMOperand &
>(*
Operands[2]).isUnsignedOffset<6, 1>())
8132 return Error(
Operands[2]->getStartLoc(),
"branch target out of range");
8138 case ARM::t2MOVTi16:
8146 int i = (
Operands[3]->isImm()) ? 3 : 4;
8147 ARMOperand &
Op =
static_cast<ARMOperand &
>(*
Operands[i]);
8150 const MCExpr *
E = dyn_cast<MCExpr>(
Op.getImm());
8152 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(
E);
8157 "immediate expression for mov requires :lower16: or :upper16");
8163 return Error(
Op.getStartLoc(),
8164 "Immediate expression for Thumb adds requires :lower0_7:,"
8165 " :lower8_15:, :upper0_7: or :upper8_15:");
8171 return Error(
Op.getStartLoc(),
8172 "Immediate expression for Thumb movs requires :lower0_7:,"
8173 " :lower8_15:, :upper0_7: or :upper8_15:");
8182 if (Imm8 == 0x10 && Pred !=
ARMCC::AL && hasRAS())
8183 return Error(
Operands[1]->getStartLoc(),
"instruction 'esb' is not "
8184 "predicable, but condition "
8187 return Error(
Operands[1]->getStartLoc(),
"instruction 'csdb' is not "
8188 "predicable, but condition "
8196 if (!
static_cast<ARMOperand &
>(*
Operands[2]).isUnsignedOffset<4, 1>() ||
8199 "branch location out of range or not a multiple of 2");
8201 if (Opcode == ARM::t2BFi) {
8202 if (!
static_cast<ARMOperand &
>(*
Operands[3]).isSignedOffset<16, 1>())
8204 "branch target out of range or not a multiple of 2");
8205 }
else if (Opcode == ARM::t2BFLi) {
8206 if (!
static_cast<ARMOperand &
>(*
Operands[3]).isSignedOffset<18, 1>())
8208 "branch target out of range or not a multiple of 2");
8213 if (!
static_cast<ARMOperand &
>(*
Operands[1]).isUnsignedOffset<4, 1>() ||
8216 "branch location out of range or not a multiple of 2");
8218 if (!
static_cast<ARMOperand &
>(*
Operands[2]).isSignedOffset<16, 1>())
8220 "branch target out of range or not a multiple of 2");
8223 "branch location and else branch target should either both be "
8224 "immediates or both labels");
8228 if (Diff != 4 && Diff != 2)
8231 "else branch target must be 2 or 4 greater than the branch location");
8238 !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
8241 "invalid register in register list. Valid registers are "
8242 "r0-r12, lr/r14 and APSR.");
8259 "instruction 'ssbb' is not predicable, but condition code "
8263 "instruction 'pssbb' is not predicable, but condition code "
8267 case ARM::VMOVRRS: {
8273 "source operands must be sequential");
8276 case ARM::VMOVSRR: {
8282 "destination operands must be sequential");
8286 case ARM::VSTMDIA: {
8287 ARMOperand &
Op =
static_cast<ARMOperand&
>(*
Operands[3]);
8288 auto &RegList =
Op.getRegList();
8289 if (RegList.size() < 1 || RegList.size() > 16)
8291 "list of registers must be at least 1 and at most 16");
8294 case ARM::MVE_VQDMULLs32bh:
8295 case ARM::MVE_VQDMULLs32th:
8296 case ARM::MVE_VCMULf32:
8297 case ARM::MVE_VMULLBs32:
8298 case ARM::MVE_VMULLTs32:
8299 case ARM::MVE_VMULLBu32:
8300 case ARM::MVE_VMULLTu32: {
8303 "Qd register and Qn register can't be identical");
8307 "Qd register and Qm register can't be identical");
8311 case ARM::MVE_VREV64_8:
8312 case ARM::MVE_VREV64_16:
8313 case ARM::MVE_VREV64_32:
8314 case ARM::MVE_VQDMULL_qr_s32bh:
8315 case ARM::MVE_VQDMULL_qr_s32th: {
8318 "Qd register and Qn register can't be identical");
8322 case ARM::MVE_VCADDi32:
8323 case ARM::MVE_VCADDf32:
8324 case ARM::MVE_VHCADDs32: {
8327 "Qd register and Qm register can't be identical");
8331 case ARM::MVE_VMOV_rr_q: {
8333 return Error (
Operands[4]->getStartLoc(),
"Q-registers must be the same");
8334 if (
static_cast<ARMOperand &
>(*
Operands[5]).getVectorIndex() !=
8335 static_cast<ARMOperand &
>(*
Operands[7]).getVectorIndex() + 2)
8336 return Error (
Operands[5]->getStartLoc(),
"Q-register indexes must be 2 and 0 or 3 and 1");
8339 case ARM::MVE_VMOV_q_rr: {
8341 return Error (
Operands[2]->getStartLoc(),
"Q-registers must be the same");
8342 if (
static_cast<ARMOperand &
>(*
Operands[3]).getVectorIndex() !=
8343 static_cast<ARMOperand &
>(*
Operands[5]).getVectorIndex() + 2)
8344 return Error (
Operands[3]->getStartLoc(),
"Q-register indexes must be 2 and 0 or 3 and 1");
8347 case ARM::MVE_SQRSHR:
8348 case ARM::MVE_UQRSHL: {
8351 "Rda register and Rm register can't be identical");
8372 case ARM::t2SMLALBB:
8373 case ARM::t2SMLALBT:
8375 case ARM::t2SMLALDX:
8376 case ARM::t2SMLALTB:
8377 case ARM::t2SMLALTT:
8379 case ARM::t2SMLSLDX:
8380 case ARM::t2SMULL: {
8385 "unpredictable instruction, RdHi and RdLo must be different");
8393 case ARM::CDE_CX1DA:
8397 case ARM::CDE_CX2DA:
8401 case ARM::CDE_CX3DA:
8402 case ARM::CDE_VCX1_vec:
8403 case ARM::CDE_VCX1_fpsp:
8404 case ARM::CDE_VCX1_fpdp:
8405 case ARM::CDE_VCX1A_vec:
8406 case ARM::CDE_VCX1A_fpsp:
8407 case ARM::CDE_VCX1A_fpdp:
8408 case ARM::CDE_VCX2_vec:
8409 case ARM::CDE_VCX2_fpsp:
8410 case ARM::CDE_VCX2_fpdp:
8411 case ARM::CDE_VCX2A_vec:
8412 case ARM::CDE_VCX2A_fpsp:
8413 case ARM::CDE_VCX2A_fpdp:
8414 case ARM::CDE_VCX3_vec:
8415 case ARM::CDE_VCX3_fpsp:
8416 case ARM::CDE_VCX3_fpdp:
8417 case ARM::CDE_VCX3A_vec:
8418 case ARM::CDE_VCX3A_fpsp:
8419 case ARM::CDE_VCX3A_fpdp: {
8421 "CDE operand 1 must be a coprocessor ID");
8425 "coprocessor must be configured as CDE");
8426 else if (Coproc >= 8)
8428 "coprocessor must be in the range [p0, p7]");
8434 case ARM::t2LDC2L_OFFSET:
8435 case ARM::t2LDC2L_OPTION:
8436 case ARM::t2LDC2L_POST:
8437 case ARM::t2LDC2L_PRE:
8438 case ARM::t2LDC2_OFFSET:
8439 case ARM::t2LDC2_OPTION:
8440 case ARM::t2LDC2_POST:
8441 case ARM::t2LDC2_PRE:
8442 case ARM::t2LDCL_OFFSET:
8443 case ARM::t2LDCL_OPTION:
8444 case ARM::t2LDCL_POST:
8445 case ARM::t2LDCL_PRE:
8446 case ARM::t2LDC_OFFSET:
8447 case ARM::t2LDC_OPTION:
8448 case ARM::t2LDC_POST:
8449 case ARM::t2LDC_PRE:
8458 case ARM::t2STC2L_OFFSET:
8459 case ARM::t2STC2L_OPTION:
8460 case ARM::t2STC2L_POST:
8461 case ARM::t2STC2L_PRE:
8462 case ARM::t2STC2_OFFSET:
8463 case ARM::t2STC2_OPTION:
8464 case ARM::t2STC2_POST:
8465 case ARM::t2STC2_PRE:
8466 case ARM::t2STCL_OFFSET:
8467 case ARM::t2STCL_OPTION:
8468 case ARM::t2STCL_POST:
8469 case ARM::t2STCL_PRE:
8470 case ARM::t2STC_OFFSET:
8471 case ARM::t2STC_OPTION:
8472 case ARM::t2STC_POST:
8473 case ARM::t2STC_PRE: {
8478 if (Opcode == ARM::t2MRRC || Opcode == ARM::t2MRRC2)
8480 else if (Opcode == ARM::t2MRC || Opcode == ARM::t2MRC2)
8483 "Operand must be a coprocessor ID");
8488 "coprocessor must be configured as GCP");
8500 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST1LNd8_UPD;
8501 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST1LNd16_UPD;
8502 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST1LNd32_UPD;
8503 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST1LNd8_UPD;
8504 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST1LNd16_UPD;
8505 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST1LNd32_UPD;
8506 case ARM::VST1LNdAsm_8: Spacing = 1;
return ARM::VST1LNd8;
8507 case ARM::VST1LNdAsm_16: Spacing = 1;
return ARM::VST1LNd16;
8508 case ARM::VST1LNdAsm_32: Spacing = 1;
return ARM::VST1LNd32;
8511 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST2LNd8_UPD;
8512 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST2LNd16_UPD;
8513 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST2LNd32_UPD;
8514 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2;
return ARM::VST2LNq16_UPD;
8515 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST2LNq32_UPD;
8517 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST2LNd8_UPD;
8518 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST2LNd16_UPD;
8519 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST2LNd32_UPD;
8520 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST2LNq16_UPD;
8521 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST2LNq32_UPD;
8523 case ARM::VST2LNdAsm_8: Spacing = 1;
return ARM::VST2LNd8;
8524 case ARM::VST2LNdAsm_16: Spacing = 1;
return ARM::VST2LNd16;
8525 case ARM::VST2LNdAsm_32: Spacing = 1;
return ARM::VST2LNd32;
8526 case ARM::VST2LNqAsm_16: Spacing = 2;
return ARM::VST2LNq16;
8527 case ARM::VST2LNqAsm_32: Spacing = 2;
return ARM::VST2LNq32;
8530 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST3LNd8_UPD;
8531 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3LNd16_UPD;
8532 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST3LNd32_UPD;
8533 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3LNq16_UPD;
8534 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST3LNq32_UPD;
8535 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST3LNd8_UPD;
8536 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST3LNd16_UPD;
8537 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST3LNd32_UPD;
8538 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST3LNq16_UPD;
8539 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST3LNq32_UPD;
8540 case ARM::VST3LNdAsm_8: Spacing = 1;
return ARM::VST3LNd8;
8541 case ARM::VST3LNdAsm_16: Spacing = 1;
return ARM::VST3LNd16;
8542 case ARM::VST3LNdAsm_32: Spacing = 1;
return ARM::VST3LNd32;
8543 case ARM::VST3LNqAsm_16: Spacing = 2;
return ARM::VST3LNq16;
8544 case ARM::VST3LNqAsm_32: Spacing = 2;
return ARM::VST3LNq32;
8547 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1;
return ARM::VST3d8_UPD;
8548 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1;
return ARM::VST3d16_UPD;
8549 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1;
return ARM::VST3d32_UPD;
8550 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2;
return ARM::VST3q8_UPD;
8551 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2;
return ARM::VST3q16_UPD;
8552 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2;
return ARM::VST3q32_UPD;
8553 case ARM::VST3dWB_register_Asm_8: Spacing = 1;
return ARM::VST3d8_UPD;
8554 case ARM::VST3dWB_register_Asm_16: Spacing = 1;
return ARM::VST3d16_UPD;
8555 case ARM::VST3dWB_register_Asm_32: Spacing = 1;
return ARM::VST3d32_UPD;
8556 case ARM::VST3qWB_register_Asm_8: Spacing = 2;
return ARM::VST3q8_UPD;
8557 case ARM::VST3qWB_register_Asm_16: Spacing = 2;
return ARM::VST3q16_UPD;
8558 case ARM::VST3qWB_register_Asm_32: Spacing = 2;
return ARM::VST3q32_UPD;
8559 case ARM::VST3dAsm_8: Spacing = 1;
return ARM::VST3d8;
8560 case ARM::VST3dAsm_16: Spacing = 1;
return ARM::VST3d16;
8561 case ARM::VST3dAsm_32: Spacing = 1;
return ARM::VST3d32;
8562 case ARM::VST3qAsm_8: Spacing = 2;
return ARM::VST3q8;
8563 case ARM::VST3qAsm_16: Spacing = 2;
return ARM::VST3q16;
8564 case ARM::VST3qAsm_32: Spacing = 2;
return ARM::VST3q32;
8567 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VST4LNd8_UPD;
8568 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4LNd16_UPD;
8569 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VST4LNd32_UPD;
8570 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4LNq16_UPD;
8571 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VST4LNq32_UPD;
8572 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1;
return ARM::VST4LNd8_UPD;
8573 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1;
return ARM::VST4LNd16_UPD;
8574 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1;
return ARM::VST4LNd32_UPD;
8575 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2;
return ARM::VST4LNq16_UPD;
8576 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2;
return ARM::VST4LNq32_UPD;
8577 case ARM::VST4LNdAsm_8: Spacing = 1;
return ARM::VST4LNd8;
8578 case ARM::VST4LNdAsm_16: Spacing = 1;
return ARM::VST4LNd16;
8579 case ARM::VST4LNdAsm_32: Spacing = 1;
return ARM::VST4LNd32;
8580 case ARM::VST4LNqAsm_16: Spacing = 2;
return ARM::VST4LNq16;
8581 case ARM::VST4LNqAsm_32: Spacing = 2;
return ARM::VST4LNq32;
8584 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1;
return ARM::VST4d8_UPD;
8585 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1;
return ARM::VST4d16_UPD;
8586 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1;
return ARM::VST4d32_UPD;
8587 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2;
return ARM::VST4q8_UPD;
8588 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2;
return ARM::VST4q16_UPD;
8589 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2;
return ARM::VST4q32_UPD;
8590 case ARM::VST4dWB_register_Asm_8: Spacing = 1;
return ARM::VST4d8_UPD;
8591 case ARM::VST4dWB_register_Asm_16: Spacing = 1;
return ARM::VST4d16_UPD;
8592 case ARM::VST4dWB_register_Asm_32: Spacing = 1;
return ARM::VST4d32_UPD;
8593 case ARM::VST4qWB_register_Asm_8: Spacing = 2;
return ARM::VST4q8_UPD;
8594 case ARM::VST4qWB_register_Asm_16: Spacing = 2;
return ARM::VST4q16_UPD;
8595 case ARM::VST4qWB_register_Asm_32: Spacing = 2;
return ARM::VST4q32_UPD;
8596 case ARM::VST4dAsm_8: Spacing = 1;
return ARM::VST4d8;
8597 case ARM::VST4dAsm_16: Spacing = 1;
return ARM::VST4d16;
8598 case ARM::VST4dAsm_32: Spacing = 1;
return ARM::VST4d32;
8599 case ARM::VST4qAsm_8: Spacing = 2;
return ARM::VST4q8;
8600 case ARM::VST4qAsm_16: Spacing = 2;
return ARM::VST4q16;
8601 case ARM::VST4qAsm_32: Spacing = 2;
return ARM::VST4q32;
8609 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD1LNd8_UPD;
8610 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD1LNd16_UPD;
8611 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD1LNd32_UPD;
8612 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD1LNd8_UPD;
8613 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD1LNd16_UPD;
8614 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD1LNd32_UPD;
8615 case ARM::VLD1LNdAsm_8: Spacing = 1;
return ARM::VLD1LNd8;
8616 case ARM::VLD1LNdAsm_16: Spacing = 1;
return ARM::VLD1LNd16;
8617 case ARM::VLD1LNdAsm_32: Spacing = 1;
return ARM::VLD1LNd32;
8620 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD2LNd8_UPD;
8621 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD2LNd16_UPD;
8622 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD2LNd32_UPD;
8623 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD2LNq16_UPD;
8624 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD2LNq32_UPD;
8625 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD2LNd8_UPD;
8626 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD2LNd16_UPD;
8627 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD2LNd32_UPD;
8628 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD2LNq16_UPD;
8629 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD2LNq32_UPD;
8630 case ARM::VLD2LNdAsm_8: Spacing = 1;
return ARM::VLD2LNd8;
8631 case ARM::VLD2LNdAsm_16: Spacing = 1;
return ARM::VLD2LNd16;
8632 case ARM::VLD2LNdAsm_32: Spacing = 1;
return ARM::VLD2LNd32;
8633 case ARM::VLD2LNqAsm_16: Spacing = 2;
return ARM::VLD2LNq16;
8634 case ARM::VLD2LNqAsm_32: Spacing = 2;
return ARM::VLD2LNq32;
8637 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3DUPd8_UPD;
8638 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3DUPd16_UPD;
8639 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3DUPd32_UPD;
8640 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3DUPq8_UPD;
8641 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD3DUPq16_UPD;
8642 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3DUPq32_UPD;
8643 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1;
return ARM::VLD3DUPd8_UPD;
8644 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1;
return ARM::VLD3DUPd16_UPD;
8645 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1;
return ARM::VLD3DUPd32_UPD;
8646 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2;
return ARM::VLD3DUPq8_UPD;
8647 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2;
return ARM::VLD3DUPq16_UPD;
8648 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2;
return ARM::VLD3DUPq32_UPD;
8649 case ARM::VLD3DUPdAsm_8: Spacing = 1;
return ARM::VLD3DUPd8;
8650 case ARM::VLD3DUPdAsm_16: Spacing = 1;
return ARM::VLD3DUPd16;
8651 case ARM::VLD3DUPdAsm_32: Spacing = 1;
return ARM::VLD3DUPd32;
8652 case ARM::VLD3DUPqAsm_8: Spacing = 2;
return ARM::VLD3DUPq8;
8653 case ARM::VLD3DUPqAsm_16: Spacing = 2;
return ARM::VLD3DUPq16;
8654 case ARM::VLD3DUPqAsm_32: Spacing = 2;
return ARM::VLD3DUPq32;
8657 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3LNd8_UPD;
8658 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3LNd16_UPD;
8659 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3LNd32_UPD;
8660 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3LNq16_UPD;
8661 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3LNq32_UPD;
8662 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD3LNd8_UPD;
8663 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD3LNd16_UPD;
8664 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD3LNd32_UPD;
8665 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD3LNq16_UPD;
8666 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD3LNq32_UPD;
8667 case ARM::VLD3LNdAsm_8: Spacing = 1;
return ARM::VLD3LNd8;
8668 case ARM::VLD3LNdAsm_16: Spacing = 1;
return ARM::VLD3LNd16;
8669 case ARM::VLD3LNdAsm_32: Spacing = 1;
return ARM::VLD3LNd32;
8670 case ARM::VLD3LNqAsm_16: Spacing = 2;
return ARM::VLD3LNq16;
8671 case ARM::VLD3LNqAsm_32: Spacing = 2;
return ARM::VLD3LNq32;
8674 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD3d8_UPD;
8675 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD3d16_UPD;
8676 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD3d32_UPD;
8677 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2;
return ARM::VLD3q8_UPD;
8678 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD3q16_UPD;
8679 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD3q32_UPD;
8680 case ARM::VLD3dWB_register_Asm_8: Spacing = 1;
return ARM::VLD3d8_UPD;
8681 case ARM::VLD3dWB_register_Asm_16: Spacing = 1;
return ARM::VLD3d16_UPD;
8682 case ARM::VLD3dWB_register_Asm_32: Spacing = 1;
return ARM::VLD3d32_UPD;
8683 case ARM::VLD3qWB_register_Asm_8: Spacing = 2;
return ARM::VLD3q8_UPD;
8684 case ARM::VLD3qWB_register_Asm_16: Spacing = 2;
return ARM::VLD3q16_UPD;
8685 case ARM::VLD3qWB_register_Asm_32: Spacing = 2;
return ARM::VLD3q32_UPD;
8686 case ARM::VLD3dAsm_8: Spacing = 1;
return ARM::VLD3d8;
8687 case ARM::VLD3dAsm_16: Spacing = 1;
return ARM::VLD3d16;
8688 case ARM::VLD3dAsm_32: Spacing = 1;
return ARM::VLD3d32;
8689 case ARM::VLD3qAsm_8: Spacing = 2;
return ARM::VLD3q8;
8690 case ARM::VLD3qAsm_16: Spacing = 2;
return ARM::VLD3q16;
8691 case ARM::VLD3qAsm_32: Spacing = 2;
return ARM::VLD3q32;
8694 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4LNd8_UPD;
8695 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4LNd16_UPD;
8696 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4LNd32_UPD;
8697 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD4LNq16_UPD;
8698 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4LNq32_UPD;
8699 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1;
return ARM::VLD4LNd8_UPD;
8700 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1;
return ARM::VLD4LNd16_UPD;
8701 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1;
return ARM::VLD4LNd32_UPD;
8702 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2;
return ARM::VLD4LNq16_UPD;
8703 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2;
return ARM::VLD4LNq32_UPD;
8704 case ARM::VLD4LNdAsm_8: Spacing = 1;
return ARM::VLD4LNd8;
8705 case ARM::VLD4LNdAsm_16: Spacing = 1;
return ARM::VLD4LNd16;
8706 case ARM::VLD4LNdAsm_32: Spacing = 1;
return ARM::VLD4LNd32;
8707 case ARM::VLD4LNqAsm_16: Spacing = 2;
return ARM::VLD4LNq16;
8708 case ARM::VLD4LNqAsm_32: Spacing = 2;
return ARM::VLD4LNq32;
8711 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4DUPd8_UPD;
8712 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4DUPd16_UPD;
8713 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4DUPd32_UPD;
8714 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4DUPq8_UPD;
8715 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4DUPq16_UPD;
8716 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4DUPq32_UPD;
8717 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1;
return ARM::VLD4DUPd8_UPD;
8718 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1;
return ARM::VLD4DUPd16_UPD;
8719 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1;
return ARM::VLD4DUPd32_UPD;
8720 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2;
return ARM::VLD4DUPq8_UPD;
8721 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2;
return ARM::VLD4DUPq16_UPD;
8722 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2;
return ARM::VLD4DUPq32_UPD;
8723 case ARM::VLD4DUPdAsm_8: Spacing = 1;
return ARM::VLD4DUPd8;
8724 case ARM::VLD4DUPdAsm_16: Spacing = 1;
return ARM::VLD4DUPd16;
8725 case ARM::VLD4DUPdAsm_32: Spacing = 1;
return ARM::VLD4DUPd32;
8726 case ARM::VLD4DUPqAsm_8: Spacing = 2;
return ARM::VLD4DUPq8;
8727 case ARM::VLD4DUPqAsm_16: Spacing = 2;
return ARM::VLD4DUPq16;
8728 case ARM::VLD4DUPqAsm_32: Spacing = 2;
return ARM::VLD4DUPq32;
8731 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1;
return ARM::VLD4d8_UPD;
8732 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1;
return ARM::VLD4d16_UPD;
8733 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1;
return ARM::VLD4d32_UPD;
8734 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2;
return ARM::VLD4q8_UPD;
8735 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2;
return ARM::VLD4q16_UPD;
8736 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2;
return ARM::VLD4q32_UPD;
8737 case ARM::VLD4dWB_register_Asm_8: Spacing = 1;
return ARM::VLD4d8_UPD;
8738 case ARM::VLD4dWB_register_Asm_16: Spacing = 1;
return ARM::VLD4d16_UPD;
8739 case ARM::VLD4dWB_register_Asm_32: Spacing = 1;
return ARM::VLD4d32_UPD;
8740 case ARM::VLD4qWB_register_Asm_8: Spacing = 2;
return ARM::VLD4q8_UPD;
8741 case ARM::VLD4qWB_register_Asm_16: Spacing = 2;
return ARM::VLD4q16_UPD;
8742 case ARM::VLD4qWB_register_Asm_32: Spacing = 2;
return ARM::VLD4q32_UPD;
8743 case ARM::VLD4dAsm_8: Spacing = 1;
return ARM::VLD4d8;
8744 case ARM::VLD4dAsm_16: Spacing = 1;
return ARM::VLD4d16;
8745 case ARM::VLD4dAsm_32: Spacing = 1;
return ARM::VLD4d32;
8746 case ARM::VLD4qAsm_8: Spacing = 2;
return ARM::VLD4q8;
8747 case ARM::VLD4qAsm_16: Spacing = 2;
return ARM::VLD4q16;
8748 case ARM::VLD4qAsm_32: Spacing = 2;
return ARM::VLD4q32;
8752bool ARMAsmParser::processInstruction(
MCInst &Inst,
8757 bool HasWideQualifier =
false;
8759 ARMOperand &ARMOp =
static_cast<ARMOperand&
>(*Op);
8760 if (ARMOp.isToken() && ARMOp.getToken() ==
".w") {
8761 HasWideQualifier =
true;
8772 ARMOperand &
Op =
static_cast<ARMOperand &
>(
8775 auto &RegList =
Op.getRegList();
8778 if (RegList.size() == 32) {
8779 const unsigned Opcode =
8780 (Inst.
getOpcode() == ARM::VLLDM) ? ARM::VLLDM_T2 : ARM::VLSTM_T2;
8794 case ARM::LDRT_POST:
8795 case ARM::LDRBT_POST: {
8796 const unsigned Opcode =
8797 (Inst.
getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
8798 : ARM::LDRBT_POST_IMM;
8814 case ARM::LDRSHTii: {
8819 else if (Inst.
getOpcode() == ARM::LDRHTii)
8821 else if (Inst.
getOpcode() == ARM::LDRSHTii)
8832 case ARM::STRT_POST:
8833 case ARM::STRBT_POST: {
8834 const unsigned Opcode =
8835 (Inst.
getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
8836 : ARM::STRBT_POST_IMM;
8863 llvm::rotr<uint32_t>(Enc & 0xFF, (Enc & 0xF00) >> 7)));
8868 MCSymbol *Dot = getContext().createTempSymbol();
8887 case ARM::t2LDR_PRE_imm:
8888 case ARM::t2LDR_POST_imm: {
8901 case ARM::t2STR_PRE_imm:
8902 case ARM::t2STR_POST_imm: {
8915 case ARM::t2LDRB_OFFSET_imm: {
8925 case ARM::t2LDRB_PRE_imm:
8926 case ARM::t2LDRB_POST_imm: {
8930 : ARM::t2LDRB_POST);
8940 case ARM::t2STRB_OFFSET_imm: {
8950 case ARM::t2STRB_PRE_imm:
8951 case ARM::t2STRB_POST_imm: {
8955 : ARM::t2STRB_POST);
8965 case ARM::t2LDRH_OFFSET_imm: {
8975 case ARM::t2LDRH_PRE_imm:
8976 case ARM::t2LDRH_POST_imm: {
8980 : ARM::t2LDRH_POST);
8990 case ARM::t2STRH_OFFSET_imm: {
9000 case ARM::t2STRH_PRE_imm:
9001 case ARM::t2STRH_POST_imm: {
9005 : ARM::t2STRH_POST);
9015 case ARM::t2LDRSB_OFFSET_imm: {
9025 case ARM::t2LDRSB_PRE_imm:
9026 case ARM::t2LDRSB_POST_imm: {
9030 : ARM::t2LDRSB_POST);
9040 case ARM::t2LDRSH_OFFSET_imm: {
9050 case ARM::t2LDRSH_PRE_imm:
9051 case ARM::t2LDRSH_POST_imm: {
9055 : ARM::t2LDRSH_POST);
9065 case ARM::t2LDRpcrel:
9074 case ARM::t2LDRBpcrel:
9077 case ARM::t2LDRHpcrel:
9080 case ARM::t2LDRSBpcrel:
9083 case ARM::t2LDRSHpcrel:
9086 case ARM::LDRConstPool:
9087 case ARM::tLDRConstPool:
9088 case ARM::t2LDRConstPool: {
9093 if (Inst.
getOpcode() == ARM::LDRConstPool)
9095 else if (Inst.
getOpcode() == ARM::tLDRConstPool)
9097 else if (Inst.
getOpcode() == ARM::t2LDRConstPool)
9099 const ARMOperand &PoolOperand =
9101 static_cast<ARMOperand &
>(*
Operands[4]) :
9102 static_cast<ARMOperand &
>(*
Operands[3]));
9103 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
9105 if (isa<MCConstantExpr>(SubExprVal) &&
9109 (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
9111 bool MovHasS =
true;
9112 if (Inst.
getOpcode() == ARM::LDRConstPool) {
9122 else if (hasV6T2Ops() &&
9135 else if (hasThumb2() &&
9140 else if (hasV8MBaseline() &&
9161 getTargetStreamer().addConstantPoolEntry(SubExprVal,
9162 PoolOperand.getStartLoc());
9173 case ARM::VST1LNdWB_register_Asm_8:
9174 case ARM::VST1LNdWB_register_Asm_16:
9175 case ARM::VST1LNdWB_register_Asm_32: {
9193 case ARM::VST2LNdWB_register_Asm_8:
9194 case ARM::VST2LNdWB_register_Asm_16:
9195 case ARM::VST2LNdWB_register_Asm_32:
9196 case ARM::VST2LNqWB_register_Asm_16:
9197 case ARM::VST2LNqWB_register_Asm_32: {
9217 case ARM::VST3LNdWB_register_Asm_8:
9218 case ARM::VST3LNdWB_register_Asm_16:
9219 case ARM::VST3LNdWB_register_Asm_32:
9220 case ARM::VST3LNqWB_register_Asm_16:
9221 case ARM::VST3LNqWB_register_Asm_32: {
9243 case ARM::VST4LNdWB_register_Asm_8:
9244 case ARM::VST4LNdWB_register_Asm_16:
9245 case ARM::VST4LNdWB_register_Asm_32:
9246 case ARM::VST4LNqWB_register_Asm_16:
9247 case ARM::VST4LNqWB_register_Asm_32: {
9271 case ARM::VST1LNdWB_fixed_Asm_8:
9272 case ARM::VST1LNdWB_fixed_Asm_16:
9273 case ARM::VST1LNdWB_fixed_Asm_32: {
9291 case ARM::VST2LNdWB_fixed_Asm_8:
9292 case ARM::VST2LNdWB_fixed_Asm_16:
9293 case ARM::VST2LNdWB_fixed_Asm_32:
9294 case ARM::VST2LNqWB_fixed_Asm_16:
9295 case ARM::VST2LNqWB_fixed_Asm_32: {
9315 case ARM::VST3LNdWB_fixed_Asm_8:
9316 case ARM::VST3LNdWB_fixed_Asm_16:
9317 case ARM::VST3LNdWB_fixed_Asm_32:
9318 case ARM::VST3LNqWB_fixed_Asm_16:
9319 case ARM::VST3LNqWB_fixed_Asm_32: {
9341 case ARM::VST4LNdWB_fixed_Asm_8:
9342 case ARM::VST4LNdWB_fixed_Asm_16:
9343 case ARM::VST4LNdWB_fixed_Asm_32:
9344 case ARM::VST4LNqWB_fixed_Asm_16:
9345 case ARM::VST4LNqWB_fixed_Asm_32: {
9369 case ARM::VST1LNdAsm_8:
9370 case ARM::VST1LNdAsm_16:
9371 case ARM::VST1LNdAsm_32: {
9387 case ARM::VST2LNdAsm_8:
9388 case ARM::VST2LNdAsm_16:
9389 case ARM::VST2LNdAsm_32:
9390 case ARM::VST2LNqAsm_16:
9391 case ARM::VST2LNqAsm_32: {
9409 case ARM::VST3LNdAsm_8:
9410 case ARM::VST3LNdAsm_16:
9411 case ARM::VST3LNdAsm_32:
9412 case ARM::VST3LNqAsm_16:
9413 case ARM::VST3LNqAsm_32: {
9433 case ARM::VST4LNdAsm_8:
9434 case ARM::VST4LNdAsm_16:
9435 case ARM::VST4LNdAsm_32:
9436 case ARM::VST4LNqAsm_16:
9437 case ARM::VST4LNqAsm_32: {
9460 case ARM::VLD1LNdWB_register_Asm_8:
9461 case ARM::VLD1LNdWB_register_Asm_16:
9462 case ARM::VLD1LNdWB_register_Asm_32: {
9481 case ARM::VLD2LNdWB_register_Asm_8:
9482 case ARM::VLD2LNdWB_register_Asm_16:
9483 case ARM::VLD2LNdWB_register_Asm_32:
9484 case ARM::VLD2LNqWB_register_Asm_16:
9485 case ARM::VLD2LNqWB_register_Asm_32: {
9508 case ARM::VLD3LNdWB_register_Asm_8:
9509 case ARM::VLD3LNdWB_register_Asm_16:
9510 case ARM::VLD3LNdWB_register_Asm_32:
9511 case ARM::VLD3LNqWB_register_Asm_16:
9512 case ARM::VLD3LNqWB_register_Asm_32: {
9539 case ARM::VLD4LNdWB_register_Asm_8:
9540 case ARM::VLD4LNdWB_register_Asm_16:
9541 case ARM::VLD4LNdWB_register_Asm_32:
9542 case ARM::VLD4LNqWB_register_Asm_16:
9543 case ARM::VLD4LNqWB_register_Asm_32: {
9574 case ARM::VLD1LNdWB_fixed_Asm_8:
9575 case ARM::VLD1LNdWB_fixed_Asm_16:
9576 case ARM::VLD1LNdWB_fixed_Asm_32: {
9595 case ARM::VLD2LNdWB_fixed_Asm_8:
9596 case ARM::VLD2LNdWB_fixed_Asm_16:
9597 case ARM::VLD2LNdWB_fixed_Asm_32:
9598 case ARM::VLD2LNqWB_fixed_Asm_16:
9599 case ARM::VLD2LNqWB_fixed_Asm_32: {
9622 case ARM::VLD3LNdWB_fixed_Asm_8:
9623 case ARM::VLD3LNdWB_fixed_Asm_16:
9624 case ARM::VLD3LNdWB_fixed_Asm_32:
9625 case ARM::VLD3LNqWB_fixed_Asm_16:
9626 case ARM::VLD3LNqWB_fixed_Asm_32: {
9653 case ARM::VLD4LNdWB_fixed_Asm_8:
9654 case ARM::VLD4LNdWB_fixed_Asm_16:
9655 case ARM::VLD4LNdWB_fixed_Asm_32:
9656 case ARM::VLD4LNqWB_fixed_Asm_16:
9657 case ARM::VLD4LNqWB_fixed_Asm_32: {
9688 case ARM::VLD1LNdAsm_8:
9689 case ARM::VLD1LNdAsm_16:
9690 case ARM::VLD1LNdAsm_32: {
9707 case ARM::VLD2LNdAsm_8:
9708 case ARM::VLD2LNdAsm_16:
9709 case ARM::VLD2LNdAsm_32:
9710 case ARM::VLD2LNqAsm_16:
9711 case ARM::VLD2LNqAsm_32: {
9732 case ARM::VLD3LNdAsm_8:
9733 case ARM::VLD3LNdAsm_16:
9734 case ARM::VLD3LNdAsm_32:
9735 case ARM::VLD3LNqAsm_16:
9736 case ARM::VLD3LNqAsm_32: {
9761 case ARM::VLD4LNdAsm_8:
9762 case ARM::VLD4LNdAsm_16:
9763 case ARM::VLD4LNdAsm_32:
9764 case ARM::VLD4LNqAsm_16:
9765 case ARM::VLD4LNqAsm_32: {
9795 case ARM::VLD3DUPdAsm_8:
9796 case ARM::VLD3DUPdAsm_16:
9797 case ARM::VLD3DUPdAsm_32:
9798 case ARM::VLD3DUPqAsm_8:
9799 case ARM::VLD3DUPqAsm_16:
9800 case ARM::VLD3DUPqAsm_32: {
9817 case ARM::VLD3DUPdWB_fixed_Asm_8:
9818 case ARM::VLD3DUPdWB_fixed_Asm_16:
9819 case ARM::VLD3DUPdWB_fixed_Asm_32:
9820 case ARM::VLD3DUPqWB_fixed_Asm_8:
9821 case ARM::VLD3DUPqWB_fixed_Asm_16:
9822 case ARM::VLD3DUPqWB_fixed_Asm_32: {
9841 case ARM::VLD3DUPdWB_register_Asm_8:
9842 case ARM::VLD3DUPdWB_register_Asm_16:
9843 case ARM::VLD3DUPdWB_register_Asm_32:
9844 case ARM::VLD3DUPqWB_register_Asm_8:
9845 case ARM::VLD3DUPqWB_register_Asm_16:
9846 case ARM::VLD3DUPqWB_register_Asm_32: {
9866 case ARM::VLD3dAsm_8:
9867 case ARM::VLD3dAsm_16:
9868 case ARM::VLD3dAsm_32:
9869 case ARM::VLD3qAsm_8:
9870 case ARM::VLD3qAsm_16:
9871 case ARM::VLD3qAsm_32: {
9888 case ARM::VLD3dWB_fixed_Asm_8:
9889 case ARM::VLD3dWB_fixed_Asm_16:
9890 case ARM::VLD3dWB_fixed_Asm_32:
9891 case ARM::VLD3qWB_fixed_Asm_8:
9892 case ARM::VLD3qWB_fixed_Asm_16:
9893 case ARM::VLD3qWB_fixed_Asm_32: {
9912 case ARM::VLD3dWB_register_Asm_8:
9913 case ARM::VLD3dWB_register_Asm_16:
9914 case ARM::VLD3dWB_register_Asm_32:
9915 case ARM::VLD3qWB_register_Asm_8:
9916 case ARM::VLD3qWB_register_Asm_16:
9917 case ARM::VLD3qWB_register_Asm_32: {
9937 case ARM::VLD4DUPdAsm_8:
9938 case ARM::VLD4DUPdAsm_16:
9939 case ARM::VLD4DUPdAsm_32:
9940 case ARM::VLD4DUPqAsm_8:
9941 case ARM::VLD4DUPqAsm_16:
9942 case ARM::VLD4DUPqAsm_32: {
9961 case ARM::VLD4DUPdWB_fixed_Asm_8:
9962 case ARM::VLD4DUPdWB_fixed_Asm_16:
9963 case ARM::VLD4DUPdWB_fixed_Asm_32:
9964 case ARM::VLD4DUPqWB_fixed_Asm_8:
9965 case ARM::VLD4DUPqWB_fixed_Asm_16:
9966 case ARM::VLD4DUPqWB_fixed_Asm_32: {
9987 case ARM::VLD4DUPdWB_register_Asm_8:
9988 case ARM::VLD4DUPdWB_register_Asm_16:
9989 case ARM::VLD4DUPdWB_register_Asm_32:
9990 case ARM::VLD4DUPqWB_register_Asm_8:
9991 case ARM::VLD4DUPqWB_register_Asm_16:
9992 case ARM::VLD4DUPqWB_register_Asm_32: {
10014 case ARM::VLD4dAsm_8:
10015 case ARM::VLD4dAsm_16:
10016 case ARM::VLD4dAsm_32:
10017 case ARM::VLD4qAsm_8:
10018 case ARM::VLD4qAsm_16:
10019 case ARM::VLD4qAsm_32: {
10038 case ARM::VLD4dWB_fixed_Asm_8:
10039 case ARM::VLD4dWB_fixed_Asm_16:
10040 case ARM::VLD4dWB_fixed_Asm_32:
10041 case ARM::VLD4qWB_fixed_Asm_8:
10042 case ARM::VLD4qWB_fixed_Asm_16:
10043 case ARM::VLD4qWB_fixed_Asm_32: {
10064 case ARM::VLD4dWB_register_Asm_8:
10065 case ARM::VLD4dWB_register_Asm_16:
10066 case ARM::VLD4dWB_register_Asm_32:
10067 case ARM::VLD4qWB_register_Asm_8:
10068 case ARM::VLD4qWB_register_Asm_16:
10069 case ARM::VLD4qWB_register_Asm_32: {
10091 case ARM::VST3dAsm_8:
10092 case ARM::VST3dAsm_16:
10093 case ARM::VST3dAsm_32:
10094 case ARM::VST3qAsm_8:
10095 case ARM::VST3qAsm_16:
10096 case ARM::VST3qAsm_32: {
10113 case ARM::VST3dWB_fixed_Asm_8:
10114 case ARM::VST3dWB_fixed_Asm_16:
10115 case ARM::VST3dWB_fixed_Asm_32:
10116 case ARM::VST3qWB_fixed_Asm_8:
10117 case ARM::VST3qWB_fixed_Asm_16:
10118 case ARM::VST3qWB_fixed_Asm_32: {
10137 case ARM::VST3dWB_register_Asm_8:
10138 case ARM::VST3dWB_register_Asm_16:
10139 case ARM::VST3dWB_register_Asm_32:
10140 case ARM::VST3qWB_register_Asm_8:
10141 case ARM::VST3qWB_register_Asm_16:
10142 case ARM::VST3qWB_register_Asm_32: {
10162 case ARM::VST4dAsm_8:
10163 case ARM::VST4dAsm_16:
10164 case ARM::VST4dAsm_32:
10165 case ARM::VST4qAsm_8:
10166 case ARM::VST4qAsm_16:
10167 case ARM::VST4qAsm_32: {
10186 case ARM::VST4dWB_fixed_Asm_8:
10187 case ARM::VST4dWB_fixed_Asm_16:
10188 case ARM::VST4dWB_fixed_Asm_32:
10189 case ARM::VST4qWB_fixed_Asm_8:
10190 case ARM::VST4qWB_fixed_Asm_16:
10191 case ARM::VST4qWB_fixed_Asm_32: {
10212 case ARM::VST4dWB_register_Asm_8:
10213 case ARM::VST4dWB_register_Asm_16:
10214 case ARM::VST4dWB_register_Asm_32:
10215 case ARM::VST4qWB_register_Asm_8:
10216 case ARM::VST4qWB_register_Asm_16:
10217 case ARM::VST4qWB_register_Asm_32: {
10245 !HasWideQualifier) {
10249 case ARM::t2LSLri: NewOpc = ARM::tLSLri;
break;
10250 case ARM::t2LSRri: NewOpc = ARM::tLSRri;
break;
10251 case ARM::t2ASRri: NewOpc = ARM::tASRri;
break;
10269 case ARM::t2MOVSsr: {
10273 bool isNarrow =
false;
10278 inITBlock() == (Inst.
getOpcode() == ARM::t2MOVsr) &&
10285 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr;
break;
10286 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr;
break;
10287 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr;
break;
10288 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr;
break;
10294 Inst.
getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10301 Inst.
getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
10306 case ARM::t2MOVSsi: {
10310 bool isNarrow =
false;
10313 inITBlock() == (Inst.
getOpcode() == ARM::t2MOVsi) &&
10320 bool isMov =
false;
10331 newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
10335 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri;
break;
10336 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri;
break;
10337 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri;
break;
10338 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow =
false;
break;
10339 case ARM_AM::rrx: isNarrow =
false; newOpc = ARM::t2RRX;
break;
10342 if (Amount == 32) Amount = 0;
10345 if (isNarrow && !isMov)
10347 Inst.
getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10349 if (newOpc != ARM::t2RRX && !isMov)
10355 Inst.
getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
10399 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
10408 if (Opc == ARM::MOVsi)
10429 case ARM::t2LDMIA_UPD: {
10445 case ARM::t2STMDB_UPD: {
10461 case ARM::LDMIA_UPD:
10464 if (
static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"pop" &&
10479 case ARM::STMDB_UPD:
10482 if (
static_cast<ARMOperand &
>(*
Operands[0]).getToken() ==
"push" &&
10495 case ARM::t2ADDri12:
10496 case ARM::t2SUBri12:
10497 case ARM::t2ADDspImm12:
10498 case ARM::t2SUBspImm12: {
10502 if ((Token !=
"add" && Token !=
"sub") ||
10506 case ARM::t2ADDri12:
10509 case ARM::t2SUBri12:
10512 case ARM::t2ADDspImm12:
10515 case ARM::t2SUBspImm12:
10545 case ARM::t2SUBri: {
10559 ARM::tADDi8 : ARM::tSUBi8);
10569 case ARM::t2ADDspImm:
10570 case ARM::t2SUBspImm: {
10575 if (V & 3 || V > ((1 << 7) - 1) << 2)
10588 case ARM::t2ADDrr: {
10652 case ARM::tLDMIA: {
10658 bool hasWritebackToken =
10659 (
static_cast<ARMOperand &
>(*
Operands[3]).isToken() &&
10660 static_cast<ARMOperand &
>(*
Operands[3]).getToken() ==
"!");
10661 bool listContainsBase;
10663 (!listContainsBase && !hasWritebackToken) ||
10664 (listContainsBase && hasWritebackToken)) {
10667 Inst.
setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
10670 if (hasWritebackToken)
10677 case ARM::tSTMIA_UPD: {
10682 bool listContainsBase;
10692 bool listContainsBase;
10706 bool listContainsBase;
10723 !HasWideQualifier) {
10744 !HasWideQualifier) {
10751 if (
Op == ARM::tMOVr) {
10769 !HasWideQualifier) {
10773 case ARM::t2SXTH: NewOpc = ARM::tSXTH;
break;
10774 case ARM::t2SXTB: NewOpc = ARM::tSXTB;
break;
10775 case ARM::t2UXTH: NewOpc = ARM::tUXTH;
break;
10776 case ARM::t2UXTB: NewOpc = ARM::tUXTB;
break;
10814 case ARM::ADDrsi: {
10820 case ARM::ANDrsi: newOpc = ARM::ANDrr;
break;
10821 case ARM::ORRrsi: newOpc = ARM::ORRrr;
break;
10822 case ARM::EORrsi: newOpc = ARM::EORrr;
break;
10823 case ARM::BICrsi: newOpc = ARM::BICrr;
break;
10824 case ARM::SUBrsi: newOpc = ARM::SUBrr;
break;
10825 case ARM::ADDrsi: newOpc = ARM::ADDrr;
break;
10848 assert(!inITBlock() &&
"nested IT blocks?!");
10864 !HasWideQualifier) {
10868 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr;
break;
10869 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr;
break;
10870 case ARM::t2ASRrr: NewOpc = ARM::tASRrr;
break;
10871 case ARM::t2SBCrr: NewOpc = ARM::tSBC;
break;
10872 case ARM::t2RORrr: NewOpc = ARM::tROR;
break;
10873 case ARM::t2BICrr: NewOpc = ARM::tBIC;
break;
10900 !HasWideQualifier) {
10904 case ARM::t2ADCrr: NewOpc = ARM::tADC;
break;
10905 case ARM::t2ANDrr: NewOpc = ARM::tAND;
break;
10906 case ARM::t2EORrr: NewOpc = ARM::tEOR;
break;
10907 case ARM::t2ORRrr: NewOpc = ARM::tORR;
break;
10926 case ARM::MVE_VPST:
10927 case ARM::MVE_VPTv16i8:
10928 case ARM::MVE_VPTv8i16:
10929 case ARM::MVE_VPTv4i32:
10930 case ARM::MVE_VPTv16u8:
10931 case ARM::MVE_VPTv8u16:
10932 case ARM::MVE_VPTv4u32:
10933 case ARM::MVE_VPTv16s8:
10934 case ARM::MVE_VPTv8s16:
10935 case ARM::MVE_VPTv4s32:
10936 case ARM::MVE_VPTv4f32:
10937 case ARM::MVE_VPTv8f16:
10938 case ARM::MVE_VPTv16i8r:
10939 case ARM::MVE_VPTv8i16r:
10940 case ARM::MVE_VPTv4i32r:
10941 case ARM::MVE_VPTv16u8r:
10942 case ARM::MVE_VPTv8u16r:
10943 case ARM::MVE_VPTv4u32r:
10944 case ARM::MVE_VPTv16s8r:
10945 case ARM::MVE_VPTv8s16r:
10946 case ARM::MVE_VPTv4s32r:
10947 case ARM::MVE_VPTv4f32r:
10948 case ARM::MVE_VPTv8f16r: {
10949 assert(!inVPTBlock() &&
"Nested VPT blocks are not allowed");
10951 VPTState.Mask = MO.
getImm();
10952 VPTState.CurPosition = 0;
10959unsigned ARMAsmParser::checkTargetMatchPredicate(
MCInst &Inst) {
10966 "optionally flag setting instruction missing optional def operand");
10968 "operand count mismatch!");
10977 return Match_RequiresFlagSetting;
10982 return Match_RequiresITBlock;
10985 return Match_RequiresNotITBlock;
10987 if (Opc == ARM::tLSLri && Inst.
getOperand(3).
getImm() == 0 && inITBlock())
10988 return Match_RequiresNotITBlock;
10989 }
else if (isThumbOne()) {
10992 if (Opc == ARM::tADDhirr && !hasV6MOps() &&
10995 return Match_RequiresThumb2;
10997 else if (Opc == ARM::tMOVr && !hasV6Ops() &&
11000 return Match_RequiresV6;
11006 if (Opc == ARM::t2MOVr && !hasV8Ops())
11011 return Match_RequiresV8;
11016 return Match_RequiresV8;
11022 case ARM::VMRS_FPCXTS:
11023 case ARM::VMRS_FPCXTNS:
11024 case ARM::VMSR_FPCXTS:
11025 case ARM::VMSR_FPCXTNS:
11026 case ARM::VMRS_FPSCR_NZCVQC:
11027 case ARM::VMSR_FPSCR_NZCVQC:
11029 case ARM::VMRS_VPR:
11031 case ARM::VMSR_VPR:
11037 return Match_InvalidOperand;
11043 return Match_RequiresV8;
11050 if (MCID.
operands()[
I].RegClass == ARM::rGPRRegClassID) {
11065 unsigned Reg =
Op.getReg();
11066 if ((Reg == ARM::SP) && !hasV8Ops())
11067 return Match_RequiresV8;
11068 else if (Reg == ARM::PC)
11069 return Match_InvalidOperand;
11072 return Match_Success;
11085bool ARMAsmParser::isITBlockTerminator(
MCInst &Inst)
const {
11104 bool MatchingInlineAsm,
11105 bool &EmitInITBlock,
11108 if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
11109 return MatchInstructionImpl(
Operands, Inst, &NearMisses, MatchingInlineAsm);
11113 if (inImplicitITBlock()) {
11114 extendImplicitITBlock(ITState.Cond);
11115 if (MatchInstructionImpl(
Operands, Inst,
nullptr, MatchingInlineAsm) ==
11125 if (InstCond == ITCond) {
11126 EmitInITBlock =
true;
11127 return Match_Success;
11129 invertCurrentITCondition();
11130 EmitInITBlock =
true;
11131 return Match_Success;
11135 rewindImplicitITPosition();
11139 flushPendingInstructions(Out);
11140 unsigned PlainMatchResult =
11141 MatchInstructionImpl(
Operands, Inst, &NearMisses, MatchingInlineAsm);
11142 if (PlainMatchResult == Match_Success) {
11151 EmitInITBlock =
false;
11152 return Match_Success;
11155 EmitInITBlock =
false;
11156 return Match_Success;
11159 EmitInITBlock =
false;
11160 return Match_Success;
11167 startImplicitITBlock();
11168 if (MatchInstructionImpl(
Operands, Inst,
nullptr, MatchingInlineAsm) ==
11175 EmitInITBlock =
true;
11176 return Match_Success;
11179 discardImplicitITBlock();
11183 EmitInITBlock =
false;
11184 return PlainMatchResult;
11188 unsigned VariantID = 0);
11191bool ARMAsmParser::MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
11194 bool MatchingInlineAsm) {
11196 unsigned MatchResult;
11197 bool PendConditionalInstruction =
false;
11200 MatchResult = MatchInstruction(
Operands, Inst, NearMisses, MatchingInlineAsm,
11201 PendConditionalInstruction, Out);
11203 switch (MatchResult) {
11204 case Match_Success:
11211 if (validateInstruction(Inst,
Operands)) {
11214 forwardITPosition();
11215 forwardVPTPosition();
11224 while (processInstruction(Inst,
Operands, Out))
11233 forwardITPosition();
11234 forwardVPTPosition();
11242 if (PendConditionalInstruction) {
11243 PendingConditionalInsts.push_back(Inst);
11244 if (isITBlockFull() || isITBlockTerminator(Inst))
11245 flushPendingInstructions(Out);
11250 case Match_NearMisses:
11251 ReportNearMisses(NearMisses, IDLoc,
Operands);
11253 case Match_MnemonicFail: {
11254 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
11256 ((ARMOperand &)*
Operands[0]).getToken(), FBS);
11257 return Error(IDLoc,
"invalid instruction" + Suggestion,
11258 ((ARMOperand &)*
Operands[0]).getLocRange());
11266bool ARMAsmParser::ParseDirective(
AsmToken DirectiveID) {
11272 if (IDVal ==
".word")
11273 parseLiteralValues(4, DirectiveID.
getLoc());
11274 else if (IDVal ==
".short" || IDVal ==
".hword")
11275 parseLiteralValues(2, DirectiveID.
getLoc());
11276 else if (IDVal ==
".thumb")
11277 parseDirectiveThumb(DirectiveID.
getLoc());
11278 else if (IDVal ==
".arm")
11279 parseDirectiveARM(DirectiveID.
getLoc());
11280 else if (IDVal ==
".thumb_func")
11281 parseDirectiveThumbFunc(DirectiveID.
getLoc());
11282 else if (IDVal ==
".code")
11283 parseDirectiveCode(DirectiveID.
getLoc());
11284 else if (IDVal ==
".syntax")
11285 parseDirectiveSyntax(DirectiveID.
getLoc());
11286 else if (IDVal ==
".unreq")
11287 parseDirectiveUnreq(DirectiveID.
getLoc());
11288 else if (IDVal ==
".fnend")
11289 parseDirectiveFnEnd(DirectiveID.
getLoc());
11290 else if (IDVal ==
".cantunwind")
11291 parseDirectiveCantUnwind(DirectiveID.
getLoc());
11292 else if (IDVal ==
".personality")
11293 parseDirectivePersonality(DirectiveID.
getLoc());
11294 else if (IDVal ==
".handlerdata")
11295 parseDirectiveHandlerData(DirectiveID.
getLoc());
11296 else if (IDVal ==
".setfp")
11297 parseDirectiveSetFP(DirectiveID.
getLoc());
11298 else if (IDVal ==
".pad")
11299 parseDirectivePad(DirectiveID.
getLoc());
11300 else if (IDVal ==
".save")
11301 parseDirectiveRegSave(DirectiveID.
getLoc(),
false);
11302 else if (IDVal ==
".vsave")
11303 parseDirectiveRegSave(DirectiveID.
getLoc(),
true);
11304 else if (IDVal ==
".ltorg" || IDVal ==
".pool")
11305 parseDirectiveLtorg(DirectiveID.
getLoc());
11306 else if (IDVal ==
".even")
11307 parseDirectiveEven(DirectiveID.
getLoc());
11308 else if (IDVal ==
".personalityindex")
11309 parseDirectivePersonalityIndex(DirectiveID.
getLoc());
11310 else if (IDVal ==
".unwind_raw")
11311 parseDirectiveUnwindRaw(DirectiveID.
getLoc());
11312 else if (IDVal ==
".movsp")
11313 parseDirectiveMovSP(DirectiveID.
getLoc());
11314 else if (IDVal ==
".arch_extension")
11315 parseDirectiveArchExtension(DirectiveID.
getLoc());
11316 else if (IDVal ==
".align")
11317 return parseDirectiveAlign(DirectiveID.
getLoc());
11318 else if (IDVal ==
".thumb_set")
11319 parseDirectiveThumbSet(DirectiveID.
getLoc());
11320 else if (IDVal ==
".inst")
11321 parseDirectiveInst(DirectiveID.
getLoc());
11322 else if (IDVal ==
".inst.n")
11323 parseDirectiveInst(DirectiveID.
getLoc(),
'n');
11324 else if (IDVal ==
".inst.w")
11325 parseDirectiveInst(DirectiveID.
getLoc(),
'w');
11326 else if (!IsMachO && !IsCOFF) {
11327 if (IDVal ==
".arch")
11328 parseDirectiveArch(DirectiveID.
getLoc());
11329 else if (IDVal ==
".cpu")
11330 parseDirectiveCPU(DirectiveID.
getLoc());
11331 else if (IDVal ==
".eabi_attribute")
11332 parseDirectiveEabiAttr(DirectiveID.
getLoc());
11333 else if (IDVal ==
".fpu")
11334 parseDirectiveFPU(DirectiveID.
getLoc());
11335 else if (IDVal ==
".fnstart")
11336 parseDirectiveFnStart(DirectiveID.
getLoc());
11337 else if (IDVal ==
".object_arch")
11338 parseDirectiveObjectArch(DirectiveID.
getLoc());
11339 else if (IDVal ==
".tlsdescseq")
11340 parseDirectiveTLSDescSeq(DirectiveID.
getLoc());
11343 }
else if (IsCOFF) {
11344 if (IDVal ==
".seh_stackalloc")
11345 parseDirectiveSEHAllocStack(DirectiveID.
getLoc(),
false);
11346 else if (IDVal ==
".seh_stackalloc_w")
11347 parseDirectiveSEHAllocStack(DirectiveID.
getLoc(),
true);
11348 else if (IDVal ==
".seh_save_regs")
11349 parseDirectiveSEHSaveRegs(DirectiveID.
getLoc(),
false);
11350 else if (IDVal ==
".seh_save_regs_w")
11351 parseDirectiveSEHSaveRegs(DirectiveID.
getLoc(),
true);
11352 else if (IDVal ==
".seh_save_sp")
11353 parseDirectiveSEHSaveSP(DirectiveID.
getLoc());
11354 else if (IDVal ==
".seh_save_fregs")
11355 parseDirectiveSEHSaveFRegs(DirectiveID.
getLoc());
11356 else if (IDVal ==
".seh_save_lr")
11357 parseDirectiveSEHSaveLR(DirectiveID.
getLoc());
11358 else if (IDVal ==
".seh_endprologue")
11359 parseDirectiveSEHPrologEnd(DirectiveID.
getLoc(),
false);
11360 else if (IDVal ==
".seh_endprologue_fragment")
11361 parseDirectiveSEHPrologEnd(DirectiveID.
getLoc(),
true);
11362 else if (IDVal ==
".seh_nop")
11363 parseDirectiveSEHNop(DirectiveID.
getLoc(),
false);
11364 else if (IDVal ==
".seh_nop_w")
11365 parseDirectiveSEHNop(DirectiveID.
getLoc(),
true);
11366 else if (IDVal ==
".seh_startepilogue")
11367 parseDirectiveSEHEpilogStart(DirectiveID.
getLoc(),
false);
11368 else if (IDVal ==
".seh_startepilogue_cond")
11369 parseDirectiveSEHEpilogStart(DirectiveID.
getLoc(),
true);
11370 else if (IDVal ==
".seh_endepilogue")
11371 parseDirectiveSEHEpilogEnd(DirectiveID.
getLoc());
11372 else if (IDVal ==
".seh_custom")
11373 parseDirectiveSEHCustom(DirectiveID.
getLoc());
11385bool ARMAsmParser::parseLiteralValues(
unsigned Size,
SMLoc L) {
11386 auto parseOne = [&]() ->
bool {
11388 if (getParser().parseExpression(
Value))
11390 getParser().getStreamer().emitValue(
Value,
Size, L);
11393 return (parseMany(parseOne));
11398bool ARMAsmParser::parseDirectiveThumb(
SMLoc L) {
11399 if (parseEOL() ||
check(!hasThumb(), L,
"target does not support Thumb mode"))
11405 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11406 getParser().getStreamer().emitCodeAlignment(
Align(2), &getSTI(), 0);
11412bool ARMAsmParser::parseDirectiveARM(
SMLoc L) {
11413 if (parseEOL() ||
check(!hasARM(), L,
"target does not support ARM mode"))
11418 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
11419 getParser().getStreamer().emitCodeAlignment(
Align(4), &getSTI(), 0);
11454void ARMAsmParser::doBeforeLabelEmit(
MCSymbol *Symbol,
SMLoc IDLoc) {
11457 flushPendingInstructions(getStreamer());
11460void ARMAsmParser::onLabelParsed(
MCSymbol *Symbol) {
11461 if (NextSymbolIsThumb) {
11462 getParser().getStreamer().emitThumbFunc(Symbol);
11463 NextSymbolIsThumb =
false;
11469bool ARMAsmParser::parseDirectiveThumbFunc(
SMLoc L) {
11471 const auto Format = getContext().getObjectFileType();
11480 MCSymbol *
Func = getParser().getContext().getOrCreateSymbol(
11482 getParser().getStreamer().emitThumbFunc(Func);
11497 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11499 NextSymbolIsThumb =
true;
11505bool ARMAsmParser::parseDirectiveSyntax(
SMLoc L) {
11509 Error(L,
"unexpected token in .syntax directive");
11515 if (
check(Mode ==
"divided" || Mode ==
"DIVIDED", L,
11516 "'.syntax divided' arm assembly not supported") ||
11517 check(Mode !=
"unified" && Mode !=
"UNIFIED", L,
11518 "unrecognized syntax mode in .syntax directive") ||
11529bool ARMAsmParser::parseDirectiveCode(
SMLoc L) {
11533 return Error(L,
"unexpected token in .code directive");
11535 if (Val != 16 && Val != 32) {
11536 Error(L,
"invalid operand to .code directive");
11546 return Error(L,
"target does not support Thumb mode");
11550 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code16);
11553 return Error(L,
"target does not support ARM mode");
11557 getParser().getStreamer().emitAssemblerFlag(
MCAF_Code32);
11569 SMLoc SRegLoc, ERegLoc;
11570 if (
check(parseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
11571 "register name expected") ||
11575 if (RegisterReqs.
insert(std::make_pair(
Name, Reg)).first->second != Reg)
11576 return Error(SRegLoc,
11577 "redefinition of '" +
Name +
"' does not match original.");
11584bool ARMAsmParser::parseDirectiveUnreq(
SMLoc L) {
11587 return Error(L,
"unexpected input in .unreq directive.");
11596void ARMAsmParser::FixModeAfterArchChange(
bool WasThumb,
SMLoc Loc) {
11598 if (WasThumb && hasThumb()) {
11601 }
else if (!WasThumb && hasARM()) {
11612 (WasThumb ?
"thumb" :
"arm") +
" mode, switching to " +
11613 (!WasThumb ?
"thumb" :
"arm") +
" mode");
11620bool ARMAsmParser::parseDirectiveArch(
SMLoc L) {
11621 StringRef Arch = getParser().parseStringToEndOfStatement().
trim();
11624 if (
ID == ARM::ArchKind::INVALID)
11625 return Error(L,
"Unknown arch name");
11632 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11633 FixModeAfterArchChange(WasThumb, L);
11635 getTargetStreamer().emitArch(
ID);
11642bool ARMAsmParser::parseDirectiveEabiAttr(
SMLoc L) {
11652 Error(TagLoc,
"attribute name not recognised: " +
Name);
11665 if (
check(!CE, TagLoc,
"expected numeric constant"))
11668 Tag =
CE->getValue();
11675 bool IsStringValue =
false;
11677 int64_t IntegerValue = 0;
11678 bool IsIntegerValue =
false;
11681 IsStringValue =
true;
11683 IsStringValue =
true;
11684 IsIntegerValue =
true;
11685 }
else if (
Tag < 32 ||
Tag % 2 == 0)
11686 IsIntegerValue =
true;
11687 else if (
Tag % 2 == 1)
11688 IsStringValue =
true;
11692 if (IsIntegerValue) {
11693 const MCExpr *ValueExpr;
11700 return Error(ValueExprLoc,
"expected numeric constant");
11701 IntegerValue =
CE->getValue();
11709 std::string EscapedValue;
11710 if (IsStringValue) {
11718 StringValue = EscapedValue;
11728 if (IsIntegerValue && IsStringValue) {
11730 getTargetStreamer().emitIntTextAttribute(
Tag, IntegerValue, StringValue);
11731 }
else if (IsIntegerValue)
11732 getTargetStreamer().emitAttribute(
Tag, IntegerValue);
11733 else if (IsStringValue)
11734 getTargetStreamer().emitTextAttribute(
Tag, StringValue);
11740bool ARMAsmParser::parseDirectiveCPU(
SMLoc L) {
11741 StringRef CPU = getParser().parseStringToEndOfStatement().
trim();
11746 if (!getSTI().isCPUStringValid(CPU))
11747 return Error(L,
"Unknown CPU name");
11752 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11753 FixModeAfterArchChange(WasThumb, L);
11760bool ARMAsmParser::parseDirectiveFPU(
SMLoc L) {
11761 SMLoc FPUNameLoc = getTok().getLoc();
11762 StringRef FPU = getParser().parseStringToEndOfStatement().
trim();
11765 std::vector<StringRef> Features;
11767 return Error(FPUNameLoc,
"Unknown FPU name");
11770 for (
auto Feature : Features)
11772 setAvailableFeatures(ComputeAvailableFeatures(STI.
getFeatureBits()));
11774 getTargetStreamer().emitFPU(
ID);
11780bool ARMAsmParser::parseDirectiveFnStart(
SMLoc L) {
11784 if (UC.hasFnStart()) {
11785 Error(L,
".fnstart starts before the end of previous one");
11786 UC.emitFnStartLocNotes();
11793 getTargetStreamer().emitFnStart();
11795 UC.recordFnStart(L);
11801bool ARMAsmParser::parseDirectiveFnEnd(
SMLoc L) {
11805 if (!UC.hasFnStart())
11806 return Error(L,
".fnstart must precede .fnend directive");
11809 getTargetStreamer().emitFnEnd();
11817bool ARMAsmParser::parseDirectiveCantUnwind(
SMLoc L) {
11821 UC.recordCantUnwind(L);
11823 if (
check(!UC.hasFnStart(), L,
".fnstart must precede .cantunwind directive"))
11826 if (UC.hasHandlerData()) {
11827 Error(L,
".cantunwind can't be used with .handlerdata directive");
11828 UC.emitHandlerDataLocNotes();
11831 if (UC.hasPersonality()) {
11832 Error(L,
".cantunwind can't be used with .personality directive");
11833 UC.emitPersonalityLocNotes();
11837 getTargetStreamer().emitCantUnwind();
11843bool ARMAsmParser::parseDirectivePersonality(
SMLoc L) {
11845 bool HasExistingPersonality = UC.hasPersonality();
11849 return Error(L,
"unexpected input in .personality directive.");
11856 UC.recordPersonality(L);
11859 if (!UC.hasFnStart())
11860 return Error(L,
".fnstart must precede .personality directive");
11861 if (UC.cantUnwind()) {
11862 Error(L,
".personality can't be used with .cantunwind directive");
11863 UC.emitCantUnwindLocNotes();
11866 if (UC.hasHandlerData()) {
11867 Error(L,
".personality must precede .handlerdata directive");
11868 UC.emitHandlerDataLocNotes();
11871 if (HasExistingPersonality) {
11872 Error(L,
"multiple personality directives");
11873 UC.emitPersonalityLocNotes();
11877 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(
Name);
11878 getTargetStreamer().emitPersonality(PR);
11884bool ARMAsmParser::parseDirectiveHandlerData(
SMLoc L) {
11888 UC.recordHandlerData(L);
11890 if (!UC.hasFnStart())
11891 return Error(L,
".fnstart must precede .personality directive");
11892 if (UC.cantUnwind()) {
11893 Error(L,
".handlerdata can't be used with .cantunwind directive");
11894 UC.emitCantUnwindLocNotes();
11898 getTargetStreamer().emitHandlerData();
11904bool ARMAsmParser::parseDirectiveSetFP(
SMLoc L) {
11907 if (
check(!UC.hasFnStart(), L,
".fnstart must precede .setfp directive") ||
11908 check(UC.hasHandlerData(), L,
11909 ".setfp must precede .handlerdata directive"))
11914 int FPReg = tryParseRegister();
11916 if (
check(FPReg == -1, FPRegLoc,
"frame pointer register expected") ||
11922 int SPReg = tryParseRegister();
11923 if (
check(SPReg == -1, SPRegLoc,
"stack pointer register expected") ||
11924 check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
11925 "register should be either $sp or the latest fp register"))
11929 UC.saveFPReg(FPReg);
11939 const MCExpr *OffsetExpr;
11942 if (getParser().parseExpression(OffsetExpr, EndLoc))
11943 return Error(ExLoc,
"malformed setfp offset");
11945 if (
check(!CE, ExLoc,
"setfp offset must be an immediate"))
11953 getTargetStreamer().emitSetFP(
static_cast<unsigned>(FPReg),
11954 static_cast<unsigned>(SPReg),
Offset);
11960bool ARMAsmParser::parseDirectivePad(
SMLoc L) {
11963 if (!UC.hasFnStart())
11964 return Error(L,
".fnstart must precede .pad directive");
11965 if (UC.hasHandlerData())
11966 return Error(L,
".pad must precede .handlerdata directive");
11974 const MCExpr *OffsetExpr;
11977 if (getParser().parseExpression(OffsetExpr, EndLoc))
11978 return Error(ExLoc,
"malformed pad offset");
11981 return Error(ExLoc,
"pad offset must be an immediate");
11986 getTargetStreamer().emitPad(
CE->getValue());
11993bool ARMAsmParser::parseDirectiveRegSave(
SMLoc L,
bool IsVector) {
11995 if (!UC.hasFnStart())
11996 return Error(L,
".fnstart must precede .save or .vsave directives");
11997 if (UC.hasHandlerData())
11998 return Error(L,
".save or .vsave must precede .handlerdata directive");
12004 if (parseRegisterList(
Operands,
true,
true) || parseEOL())
12006 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12007 if (!IsVector && !
Op.isRegList())
12008 return Error(L,
".save expects GPR registers");
12009 if (IsVector && !
Op.isDPRRegList())
12010 return Error(L,
".vsave expects DPR registers");
12012 getTargetStreamer().emitRegSave(
Op.getRegList(), IsVector);
12020bool ARMAsmParser::parseDirectiveInst(
SMLoc Loc,
char Suffix) {
12036 return Error(Loc,
"width suffixes are invalid in ARM mode");
12039 auto parseOne = [&]() ->
bool {
12041 if (getParser().parseExpression(Expr))
12045 return Error(Loc,
"expected constant expression");
12048 char CurSuffix = Suffix;
12051 if (
Value->getValue() > 0xffff)
12052 return Error(Loc,
"inst.n operand is too big, use inst.w instead");
12055 if (
Value->getValue() > 0xffffffff)
12057 " operand is too big");
12061 if (
Value->getValue() < 0xe800)
12063 else if (
Value->getValue() >= 0xe8000000)
12066 return Error(Loc,
"cannot determine Thumb instruction size, "
12067 "use inst.n/inst.w instead");
12073 getTargetStreamer().emitInst(
Value->getValue(), CurSuffix);
12074 forwardITPosition();
12075 forwardVPTPosition();
12080 return Error(Loc,
"expected expression following directive");
12081 if (parseMany(parseOne))
12088bool ARMAsmParser::parseDirectiveLtorg(
SMLoc L) {
12091 getTargetStreamer().emitCurrentConstantPool();
12095bool ARMAsmParser::parseDirectiveEven(
SMLoc L) {
12102 getStreamer().initSections(
false, getSTI());
12103 Section = getStreamer().getCurrentSectionOnly();
12106 assert(Section &&
"must have section to emit alignment");
12108 getStreamer().emitCodeAlignment(
Align(2), &getSTI());
12110 getStreamer().emitValueToAlignment(
Align(2));
12117bool ARMAsmParser::parseDirectivePersonalityIndex(
SMLoc L) {
12119 bool HasExistingPersonality = UC.hasPersonality();
12121 const MCExpr *IndexExpression;
12127 UC.recordPersonalityIndex(L);
12129 if (!UC.hasFnStart()) {
12130 return Error(L,
".fnstart must precede .personalityindex directive");
12132 if (UC.cantUnwind()) {
12133 Error(L,
".personalityindex cannot be used with .cantunwind");
12134 UC.emitCantUnwindLocNotes();
12137 if (UC.hasHandlerData()) {
12138 Error(L,
".personalityindex must precede .handlerdata directive");
12139 UC.emitHandlerDataLocNotes();
12142 if (HasExistingPersonality) {
12143 Error(L,
"multiple personality directives");
12144 UC.emitPersonalityLocNotes();
12150 return Error(IndexLoc,
"index must be a constant number");
12152 return Error(IndexLoc,
12153 "personality routine index should be in range [0-3]");
12155 getTargetStreamer().emitPersonalityIndex(
CE->getValue());
12161bool ARMAsmParser::parseDirectiveUnwindRaw(
SMLoc L) {
12164 const MCExpr *OffsetExpr;
12165 SMLoc OffsetLoc = getLexer().getLoc();
12167 if (!UC.hasFnStart())
12168 return Error(L,
".fnstart must precede .unwind_raw directives");
12169 if (getParser().parseExpression(OffsetExpr))
12170 return Error(OffsetLoc,
"expected expression");
12174 return Error(OffsetLoc,
"offset must be a constant");
12183 auto parseOne = [&]() ->
bool {
12184 const MCExpr *OE =
nullptr;
12185 SMLoc OpcodeLoc = getLexer().getLoc();
12188 OpcodeLoc,
"expected opcode expression"))
12192 return Error(OpcodeLoc,
"opcode value must be a constant");
12193 const int64_t Opcode =
OC->getValue();
12194 if (Opcode & ~0xff)
12195 return Error(OpcodeLoc,
"invalid opcode");
12201 SMLoc OpcodeLoc = getLexer().getLoc();
12203 return Error(OpcodeLoc,
"expected opcode expression");
12204 if (parseMany(parseOne))
12207 getTargetStreamer().emitUnwindRaw(
StackOffset, Opcodes);
12213bool ARMAsmParser::parseDirectiveTLSDescSeq(
SMLoc L) {
12217 return TokError(
"expected variable after '.tlsdescseq' directive");
12227 getTargetStreamer().annotateTLSDescriptorSequence(SRE);
12233bool ARMAsmParser::parseDirectiveMovSP(
SMLoc L) {
12235 if (!UC.hasFnStart())
12236 return Error(L,
".fnstart must precede .movsp directives");
12237 if (UC.getFPReg() != ARM::SP)
12238 return Error(L,
"unexpected .movsp directive");
12241 int SPReg = tryParseRegister();
12243 return Error(SPRegLoc,
"register expected");
12244 if (SPReg == ARM::SP || SPReg == ARM::PC)
12245 return Error(SPRegLoc,
"sp and pc are not permitted in .movsp directive");
12252 const MCExpr *OffsetExpr;
12256 return Error(OffsetLoc,
"malformed offset expression");
12260 return Error(OffsetLoc,
"offset must be an immediate constant");
12268 getTargetStreamer().emitMovSP(SPReg,
Offset);
12269 UC.saveFPReg(SPReg);
12276bool ARMAsmParser::parseDirectiveObjectArch(
SMLoc L) {
12279 return Error(getLexer().getLoc(),
"unexpected token");
12287 if (
ID == ARM::ArchKind::INVALID)
12288 return Error(ArchLoc,
"unknown architecture '" + Arch +
"'");
12292 getTargetStreamer().emitObjectArch(
ID);
12298bool ARMAsmParser::parseDirectiveAlign(
SMLoc L) {
12304 assert(Section &&
"must have section to emit alignment");
12306 getStreamer().emitCodeAlignment(
Align(4), &getSTI(), 0);
12308 getStreamer().emitValueToAlignment(
Align(4), 0, 1, 0);
12316bool ARMAsmParser::parseDirectiveThumbSet(
SMLoc L) {
12321 "expected identifier after '.thumb_set'") ||
12331 getTargetStreamer().emitThumbSet(
Sym,
Value);
12338bool ARMAsmParser::parseDirectiveSEHAllocStack(
SMLoc L,
bool Wide) {
12340 if (parseImmExpr(
Size))
12342 getTargetStreamer().emitARMWinCFIAllocStack(
Size, Wide);
12349bool ARMAsmParser::parseDirectiveSEHSaveRegs(
SMLoc L,
bool Wide) {
12352 if (parseRegisterList(
Operands) || parseEOL())
12354 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12355 if (!
Op.isRegList())
12356 return Error(L,
".seh_save_regs{_w} expects GPR registers");
12359 for (
size_t i = 0; i < RegList.
size(); ++i) {
12360 unsigned Reg =
MRI->getEncodingValue(RegList[i]);
12364 return Error(L,
".seh_save_regs{_w} can't include SP");
12365 assert(Reg < 16U &&
"Register out of range");
12366 unsigned Bit = (1u <<
Reg);
12369 if (!Wide && (Mask & 0x1f00) != 0)
12371 ".seh_save_regs cannot save R8-R12, needs .seh_save_regs_w");
12372 getTargetStreamer().emitARMWinCFISaveRegMask(Mask, Wide);
12378bool ARMAsmParser::parseDirectiveSEHSaveSP(
SMLoc L) {
12379 int Reg = tryParseRegister();
12380 if (Reg == -1 || !
MRI->getRegClass(ARM::GPRRegClassID).contains(Reg))
12381 return Error(L,
"expected GPR");
12382 unsigned Index =
MRI->getEncodingValue(Reg);
12384 return Error(L,
"invalid register for .seh_save_sp");
12385 getTargetStreamer().emitARMWinCFISaveSP(
Index);
12391bool ARMAsmParser::parseDirectiveSEHSaveFRegs(
SMLoc L) {
12394 if (parseRegisterList(
Operands) || parseEOL())
12396 ARMOperand &
Op = (ARMOperand &)*
Operands[0];
12397 if (!
Op.isDPRRegList())
12398 return Error(L,
".seh_save_fregs expects DPR registers");
12401 for (
size_t i = 0; i < RegList.
size(); ++i) {
12402 unsigned Reg =
MRI->getEncodingValue(RegList[i]);
12403 assert(Reg < 32U &&
"Register out of range");
12404 unsigned Bit = (1u <<
Reg);
12409 return Error(L,
".seh_save_fregs missing registers");
12411 unsigned First = 0;
12412 while ((Mask & 1) == 0) {
12416 if (((Mask + 1) & Mask) != 0)
12418 ".seh_save_fregs must take a contiguous range of registers");
12420 while ((Mask & 2) != 0) {
12424 if (First < 16 && Last >= 16)
12425 return Error(L,
".seh_save_fregs must be all d0-d15 or d16-d31");
12426 getTargetStreamer().emitARMWinCFISaveFRegs(
First,
Last);
12432bool ARMAsmParser::parseDirectiveSEHSaveLR(
SMLoc L) {
12434 if (parseImmExpr(
Offset))
12436 getTargetStreamer().emitARMWinCFISaveLR(
Offset);
12443bool ARMAsmParser::parseDirectiveSEHPrologEnd(
SMLoc L,
bool Fragment) {
12444 getTargetStreamer().emitARMWinCFIPrologEnd(Fragment);
12451bool ARMAsmParser::parseDirectiveSEHNop(
SMLoc L,
bool Wide) {
12452 getTargetStreamer().emitARMWinCFINop(Wide);
12459bool ARMAsmParser::parseDirectiveSEHEpilogStart(
SMLoc L,
bool Condition) {
12466 return Error(S,
".seh_startepilogue_cond missing condition");
12469 return Error(S,
"invalid condition");
12473 getTargetStreamer().emitARMWinCFIEpilogStart(
CC);
12479bool ARMAsmParser::parseDirectiveSEHEpilogEnd(
SMLoc L) {
12480 getTargetStreamer().emitARMWinCFIEpilogEnd();
12486bool ARMAsmParser::parseDirectiveSEHCustom(
SMLoc L) {
12487 unsigned Opcode = 0;
12490 if (parseImmExpr(Byte))
12492 if (Byte > 0xff || Byte < 0)
12493 return Error(L,
"Invalid byte value in .seh_custom");
12494 if (Opcode > 0x00ffffff)
12495 return Error(L,
"Too many bytes in .seh_custom");
12498 Opcode = (Opcode << 8) | Byte;
12500 getTargetStreamer().emitARMWinCFICustom(Opcode);
12512#define GET_REGISTER_MATCHER
12513#define GET_SUBTARGET_FEATURE_NAME
12514#define GET_MATCHER_IMPLEMENTATION
12515#define GET_MNEMONIC_SPELL_CHECKER
12516#include "ARMGenAsmMatcher.inc"
12522ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
12523 switch (MatchError) {
12526 return hasV8Ops() ?
"operand must be a register in range [r0, r14]"
12527 :
"operand must be a register in range [r0, r12] or r14";
12530 return hasD32() ?
"operand must be a register in range [d0, d31]"
12531 :
"operand must be a register in range [d0, d15]";
12532 case Match_DPR_RegList:
12533 return hasD32() ?
"operand must be a list of registers in range [d0, d31]"
12534 :
"operand must be a list of registers in range [d0, d15]";
12538 return getMatchKindDiag(MatchError);
12561 std::multimap<unsigned, unsigned> OperandMissesSeen;
12563 bool ReportedTooFewOperands =
false;
12568 switch (
I.getKind()) {
12571 ((ARMOperand &)*
Operands[
I.getOperandIndex()]).getStartLoc();
12572 const char *OperandDiag =
12573 getCustomOperandDiag((ARMMatchResultTy)
I.getOperandError());
12580 unsigned DupCheckMatchClass = OperandDiag ?
I.getOperandClass() : ~0
U;
12581 auto PrevReports = OperandMissesSeen.equal_range(
I.getOperandIndex());
12582 if (std::any_of(PrevReports.first, PrevReports.second,
12583 [DupCheckMatchClass](
12584 const std::pair<unsigned, unsigned> Pair) {
12585 if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
12586 return Pair.second == DupCheckMatchClass;
12588 return isSubclass((MatchClassKind)DupCheckMatchClass,
12589 (MatchClassKind)Pair.second);
12592 OperandMissesSeen.insert(
12593 std::make_pair(
I.getOperandIndex(), DupCheckMatchClass));
12595 NearMissMessage Message;
12596 Message.Loc = OperandLoc;
12598 Message.Message = OperandDiag;
12599 }
else if (
I.getOperandClass() == InvalidMatchClass) {
12600 Message.Message =
"too many operands for instruction";
12602 Message.Message =
"invalid operand for instruction";
12604 dbgs() <<
"Missing diagnostic string for operand class "
12605 << getMatchClassName((MatchClassKind)
I.getOperandClass())
12606 <<
I.getOperandClass() <<
", error " <<
I.getOperandError()
12607 <<
", opcode " << MII.getName(
I.getOpcode()) <<
"\n");
12615 if (FeatureMissesSeen.
count(MissingFeatures))
12617 FeatureMissesSeen.
insert(MissingFeatures);
12621 if (MissingFeatures.
test(Feature_IsARMBit) && !hasARM())
12625 if (
isThumb() && MissingFeatures.
test(Feature_IsARMBit) &&
12626 MissingFeatures.
count() > 1)
12628 if (!
isThumb() && MissingFeatures.
test(Feature_IsThumbBit) &&
12629 MissingFeatures.
count() > 1)
12631 if (!
isThumb() && MissingFeatures.
test(Feature_IsThumb2Bit) &&
12633 Feature_IsThumbBit})).
any())
12635 if (isMClass() && MissingFeatures.
test(Feature_HasNEONBit))
12638 NearMissMessage Message;
12639 Message.Loc = IDLoc;
12642 OS <<
"instruction requires:";
12643 for (
unsigned i = 0, e = MissingFeatures.
size(); i != e; ++i)
12644 if (MissingFeatures.
test(i))
12652 NearMissMessage Message;
12653 Message.Loc = IDLoc;
12654 switch (
I.getPredicateError()) {
12655 case Match_RequiresNotITBlock:
12656 Message.Message =
"flag setting instruction only valid outside IT block";
12658 case Match_RequiresITBlock:
12659 Message.Message =
"instruction only valid inside IT block";
12661 case Match_RequiresV6:
12662 Message.Message =
"instruction variant requires ARMv6 or later";
12664 case Match_RequiresThumb2:
12665 Message.Message =
"instruction variant requires Thumb2";
12667 case Match_RequiresV8:
12668 Message.Message =
"instruction variant requires ARMv8 or later";
12670 case Match_RequiresFlagSetting:
12671 Message.Message =
"no flag-preserving variant of this instruction available";
12673 case Match_InvalidOperand:
12674 Message.Message =
"invalid operand for instruction";
12684 if (!ReportedTooFewOperands) {
12685 SMLoc EndLoc = ((ARMOperand &)*
Operands.back()).getEndLoc();
12687 EndLoc,
StringRef(
"too few operands for instruction")});
12688 ReportedTooFewOperands =
true;
12703 FilterNearMisses(NearMisses, Messages, IDLoc,
Operands);
12705 if (Messages.
size() == 0) {
12708 Error(IDLoc,
"invalid instruction");
12709 }
else if (Messages.
size() == 1) {
12711 Error(Messages[0].Loc, Messages[0].Message);
12715 Error(IDLoc,
"invalid instruction, any one of the following would fix this:");
12716 for (
auto &M : Messages) {
12726 static const struct {
12731 {
ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC}},
12733 {Feature_HasV8Bit},
12734 {ARM::FeatureAES, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12736 {Feature_HasV8Bit},
12737 {ARM::FeatureSHA2, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12739 {Feature_HasV8Bit},
12740 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8}},
12742 {Feature_HasV8_1MMainlineBit},
12743 {ARM::HasMVEFloatOps}},
12745 {Feature_HasV8Bit},
12746 {ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12748 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12749 {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM}},
12751 {Feature_HasV7Bit, Feature_IsNotMClassBit},
12754 {Feature_HasV8Bit},
12755 {ARM::FeatureNEON, ARM::FeatureVFP2_SP, ARM::FeatureFPARMv8}},
12756 {
ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone}},
12758 {
ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization}},
12760 {Feature_HasV8_2aBit},
12761 {ARM::FeatureFPARMv8, ARM::FeatureFullFP16}},
12762 {
ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS}},
12763 {
ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB}},
12764 {
ARM::AEK_PACBTI, {Feature_HasV8_1MMainlineBit}, {ARM::FeaturePACBTI}},
12772 bool EnableFeature = !
Name.consume_front_insensitive(
"no");
12775 return Error(ExtLoc,
"unknown architectural extension: " +
Name);
12782 return Error(ExtLoc,
"unsupported architectural extension: " +
Name);
12785 return Error(ExtLoc,
"architectural extension '" +
Name +
12787 "allowed for the current base architecture");
12790 if (EnableFeature) {
12796 setAvailableFeatures(Features);
12804bool ARMAsmParser::parseDirectiveArchExtension(
SMLoc L) {
12809 return Error(getLexer().getLoc(),
"expected architecture extension name");
12818 if (
Name ==
"nocrypto") {
12819 enableArchExtFeature(
"nosha2", ExtLoc);
12820 enableArchExtFeature(
"noaes", ExtLoc);
12823 if (enableArchExtFeature(
Name, ExtLoc))
12826 return Error(ExtLoc,
"unknown architectural extension: " +
Name);
12833 ARMOperand &
Op =
static_cast<ARMOperand &
>(AsmOp);
12842 if (
CE->getValue() == 0)
12843 return Match_Success;
12848 if (
CE->getValue() == 8)
12849 return Match_Success;
12854 if (
CE->getValue() == 16)
12855 return Match_Success;
12859 const MCExpr *SOExpr =
Op.getImm();
12861 if (!SOExpr->evaluateAsAbsolute(
Value))
12862 return Match_Success;
12863 assert((
Value >= std::numeric_limits<int32_t>::min() &&
12864 Value <= std::numeric_limits<uint32_t>::max()) &&
12865 "expression value must be representable in 32 bits");
12869 if (hasV8Ops() &&
Op.isReg() &&
Op.getReg() == ARM::SP)
12870 return Match_Success;
12874 MRI->getRegClass(ARM::GPRRegClassID).contains(
Op.getReg()))
12875 return Match_Success;
12878 return Match_InvalidOperand;
12881bool ARMAsmParser::isMnemonicVPTPredicable(
StringRef Mnemonic,
12886 if (MS.isVPTPredicableCDEInstr(Mnemonic) ||
12887 (Mnemonic.
starts_with(
"vldrh") && Mnemonic !=
"vldrhi") ||
12889 !(ExtraToken ==
".f16" || ExtraToken ==
".32" || ExtraToken ==
".16" ||
12890 ExtraToken ==
".8")) ||
12891 (Mnemonic.
starts_with(
"vrint") && Mnemonic !=
"vrintr") ||
12892 (Mnemonic.
starts_with(
"vstrh") && Mnemonic !=
"vstrhi"))
12895 const char *predicable_prefixes[] = {
12896 "vabav",
"vabd",
"vabs",
"vadc",
"vadd",
12897 "vaddlv",
"vaddv",
"vand",
"vbic",
"vbrsr",
12898 "vcadd",
"vcls",
"vclz",
"vcmla",
"vcmp",
12899 "vcmul",
"vctp",
"vcvt",
"vddup",
"vdup",
12900 "vdwdup",
"veor",
"vfma",
"vfmas",
"vfms",
12901 "vhadd",
"vhcadd",
"vhsub",
"vidup",
"viwdup",
12902 "vldrb",
"vldrd",
"vldrw",
"vmax",
"vmaxa",
12903 "vmaxav",
"vmaxnm",
"vmaxnma",
"vmaxnmav",
"vmaxnmv",
12904 "vmaxv",
"vmin",
"vminav",
"vminnm",
"vminnmav",
12905 "vminnmv",
"vminv",
"vmla",
"vmladav",
"vmlaldav",
12906 "vmlalv",
"vmlas",
"vmlav",
"vmlsdav",
"vmlsldav",
12907 "vmovlb",
"vmovlt",
"vmovnb",
"vmovnt",
"vmul",
12908 "vmvn",
"vneg",
"vorn",
"vorr",
"vpnot",
12909 "vpsel",
"vqabs",
"vqadd",
"vqdmladh",
"vqdmlah",
12910 "vqdmlash",
"vqdmlsdh",
"vqdmulh",
"vqdmull",
"vqmovn",
12911 "vqmovun",
"vqneg",
"vqrdmladh",
"vqrdmlah",
"vqrdmlash",
12912 "vqrdmlsdh",
"vqrdmulh",
"vqrshl",
"vqrshrn",
"vqrshrun",
12913 "vqshl",
"vqshrn",
"vqshrun",
"vqsub",
"vrev16",
12914 "vrev32",
"vrev64",
"vrhadd",
"vrmlaldavh",
"vrmlalvh",
12915 "vrmlsldavh",
"vrmulh",
"vrshl",
"vrshr",
"vrshrn",
12916 "vsbc",
"vshl",
"vshlc",
"vshll",
"vshr",
12917 "vshrn",
"vsli",
"vsri",
"vstrb",
"vstrd",
12920 return std::any_of(
12921 std::begin(predicable_prefixes), std::end(predicable_prefixes),
12922 [&Mnemonic](
const char *prefix) {
return Mnemonic.
starts_with(prefix); });
unsigned const MachineRegisterInfo * MRI
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static bool isLoad(int Opcode)
static unsigned getNextRegister(unsigned Reg)
static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing)
static bool instIsBreakpoint(const MCInst &Inst)
static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo, unsigned Reg, unsigned HiReg, bool &containsReg)
static bool isDataTypeToken(StringRef Tok)
static MCRegister MatchRegisterName(StringRef Name)
static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing)
static const char * getSubtargetFeatureName(uint64_t Val)
static bool isVectorPredicable(const MCInstrDesc &MCID)
static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp)
MatchCoprocessorOperandName - Try to parse an coprocessor related instruction with a symbolic operand...
static void applyMnemonicAliases(StringRef &Mnemonic, const FeatureBitset &Features, unsigned VariantID)
static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT)
static bool insertNoDuplicates(SmallVectorImpl< std::pair< unsigned, unsigned > > &Regs, unsigned Enc, unsigned Reg)
static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID)
static bool isThumbI8Relocation(MCParsedAsmOperand &MCOp)
static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg)
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeARMAsmParser()
Force static initialization.
static bool isARMMCExpr(MCParsedAsmOperand &MCOp)
static bool isThumb(const MCSubtargetInfo &STI)
static uint64_t scale(uint64_t Num, uint32_t N, uint32_t D)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Register getFPReg(const CSKYSubtarget &STI)
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
#define LLVM_EXTERNAL_VISIBILITY
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
mir Rename Register Operands
static MSP430CC::CondCodes getCondCode(unsigned Cond)
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static cl::opt< bool > AddBuildAttributes("riscv-add-build-attributes", cl::init(false))
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Pre allocate WWM Registers
cl::list< SPIRV::Extension::Extension > Extensions("spirv-extensions", cl::desc("SPIR-V extensions"), cl::ZeroOrMore, cl::Hidden, cl::values(clEnumValN(SPIRV::Extension::SPV_EXT_shader_atomic_float_add, "SPV_EXT_shader_atomic_float_add", "Adds atomic add instruction on floating-point numbers."), clEnumValN(SPIRV::Extension::SPV_EXT_shader_atomic_float16_add, "SPV_EXT_shader_atomic_float16_add", "Extends the SPV_EXT_shader_atomic_float_add extension to support " "atomically adding to 16-bit floating-point numbers in memory."), clEnumValN(SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max, "SPV_EXT_shader_atomic_float_min_max", "Adds atomic min and max instruction on floating-point numbers."), clEnumValN(SPIRV::Extension::SPV_INTEL_arbitrary_precision_integers, "SPV_INTEL_arbitrary_precision_integers", "Allows generating arbitrary width integer types."), clEnumValN(SPIRV::Extension::SPV_INTEL_optnone, "SPV_INTEL_optnone", "Adds OptNoneINTEL value for Function Control mask that " "indicates a request to not optimize the function."), clEnumValN(SPIRV::Extension::SPV_INTEL_usm_storage_classes, "SPV_INTEL_usm_storage_classes", "Introduces two new storage classes that are sub classes of " "the CrossWorkgroup storage class " "that provides additional information that can enable " "optimization."), clEnumValN(SPIRV::Extension::SPV_INTEL_subgroups, "SPV_INTEL_subgroups", "Allows work items in a subgroup to share data without the " "use of local memory and work group barriers, and to " "utilize specialized hardware to load and store blocks of " "data from images or buffers."), clEnumValN(SPIRV::Extension::SPV_KHR_uniform_group_instructions, "SPV_KHR_uniform_group_instructions", "Allows support for additional group operations within " "uniform control flow."), clEnumValN(SPIRV::Extension::SPV_KHR_no_integer_wrap_decoration, "SPV_KHR_no_integer_wrap_decoration", "Adds decorations to indicate that a given instruction does " "not cause integer wrapping."), clEnumValN(SPIRV::Extension::SPV_KHR_float_controls, "SPV_KHR_float_controls", "Provides new execution modes to control floating-point " "computations by overriding an implementation’s default behavior " "for rounding modes, denormals, signed zero, and infinities."), clEnumValN(SPIRV::Extension::SPV_KHR_expect_assume, "SPV_KHR_expect_assume", "Provides additional information to a compiler, similar to " "the llvm.assume and llvm.expect intrinsics."), clEnumValN(SPIRV::Extension::SPV_KHR_bit_instructions, "SPV_KHR_bit_instructions", "This enables bit instructions to be used by SPIR-V modules " "without requiring the Shader capability."), clEnumValN(SPIRV::Extension::SPV_KHR_linkonce_odr, "SPV_KHR_linkonce_odr", "Allows to use the LinkOnceODR linkage type that is to let " "a function or global variable to be merged with other functions " "or global variables of the same name when linkage occurs."), clEnumValN(SPIRV::Extension::SPV_INTEL_bfloat16_conversion, "SPV_INTEL_bfloat16_conversion", "Adds instructions to convert between single-precision " "32-bit floating-point values and 16-bit bfloat16 values."), clEnumValN(SPIRV::Extension::SPV_KHR_subgroup_rotate, "SPV_KHR_subgroup_rotate", "Adds a new instruction that enables rotating values across " "invocations within a subgroup."), clEnumValN(SPIRV::Extension::SPV_INTEL_variable_length_array, "SPV_INTEL_variable_length_array", "Allows to allocate local arrays whose number of elements " "is unknown at compile time."), clEnumValN(SPIRV::Extension::SPV_INTEL_function_pointers, "SPV_INTEL_function_pointers", "Allows translation of function pointers.")))
This file defines the SmallSet class.
This file defines the SmallVector class.
StringSet - A set-like wrapper for the StringMap.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=ARM::NoRegAltName)
VariantKind getKind() const
getOpcode - Get the kind of this expression.
static const ARMMCExpr * create(VariantKind Kind, const MCExpr *Expr, MCContext &Ctx)
Target independent representation for an assembler token.
int64_t getIntVal() const
bool isNot(TokenKind K) const
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
StringRef getStringContents() const
Get the contents of a string token (without quotes).
bool is(TokenKind K) const
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
This class represents an Operation in the Expression.
Base class for user error types.
Lightweight error class with error context and mandatory checking.
Container class for subtarget features.
constexpr bool test(unsigned I) const
constexpr size_t size() const
Generic assembler lexer interface, for use by target specific assembly lexers.
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
MCStreamer & getStreamer()
MCAsmParser & getParser()
Generic assembler parser interface, for use by target specific assembly parsers.
bool parseToken(AsmToken::TokenKind T, const Twine &Msg="unexpected token")
virtual bool parseEscapedString(std::string &Data)=0
Parse the current token as a string which may include escaped characters and return the string conten...
virtual MCStreamer & getStreamer()=0
Return the output streamer for the assembler.
virtual void Note(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a note at the location L, with the message Msg.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual bool parseIdentifier(StringRef &Res)=0
Parse an identifier or string (as a quoted identifier) and set Res to the identifier contents.
bool parseOptionalToken(AsmToken::TokenKind T)
Attempt to parse and consume token, returning true on success.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual bool Warning(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)=0
Emit a warning at the location L, with the message Msg.
bool Error(SMLoc L, const Twine &Msg, SMRange Range=std::nullopt)
Return an error at the location L, with the message Msg.
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Context object for machine code objects.
const MCRegisterInfo * getRegisterInfo() const
Base class for the full range of assembler expressions which are needed for parsing.
Instances of this class represent a single low-level machine instruction.
void dump_pretty(raw_ostream &OS, const MCInstPrinter *Printer=nullptr, StringRef Separator=" ", const MCRegisterInfo *RegInfo=nullptr) const
Dump the MCInst as prettily as possible using the additional MC structures, if given.
unsigned getNumOperands() const
unsigned getOpcode() const
iterator insert(iterator I, const MCOperand &Op)
void addOperand(const MCOperand Op)
void setOpcode(unsigned Op)
const MCOperand & getOperand(unsigned i) const
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool isIndirectBranch() const
Return true if this is an indirect branch, such as a branch through a register.
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
bool hasDefOfPhysReg(const MCInst &MI, unsigned Reg, const MCRegisterInfo &RI) const
Return true if this instruction defines the specified physical register, either explicitly or implici...
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned short NumOperands
bool isBranch() const
Returns true if this is a conditional, unconditional, or indirect branch.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
bool isCall() const
Return true if the instruction is a call.
bool isTerminator() const
Returns true if this instruction part of the terminator for a basic block.
bool isReturn() const
Return true if the instruction is a return.
Interface to description of machine instruction set.
This holds information about one operand of a machine instruction, indicating the register class for ...
Instances of this class represent operands of the MCInst class.
static MCOperand createReg(unsigned Reg)
static MCOperand createExpr(const MCExpr *Val)
static MCOperand createImm(int64_t Val)
unsigned getReg() const
Returns the register number.
const MCExpr * getExpr() const
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual unsigned getReg() const =0
virtual SMLoc getStartLoc() const =0
getStartLoc - Get the location of the first token of this operand.
virtual bool isReg() const =0
isReg - Is this a register operand?
virtual bool isMem() const =0
isMem - Is this a memory operand?
virtual void print(raw_ostream &OS) const =0
print - Print a debug representation of the operand to the given stream.
virtual bool isToken() const =0
isToken - Is this a token operand?
virtual bool isImm() const =0
isImm - Is this an immediate operand?
virtual SMLoc getEndLoc() const =0
getEndLoc - Get the location of the last token of this operand.
MCRegisterClass - Base class of TargetRegisterClass.
unsigned getID() const
getID() - Return the register class ID number.
unsigned getNumRegs() const
getNumRegs - Return the number of registers in this class.
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Streaming machine code generation interface.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
MCTargetStreamer * getTargetStreamer()
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const FeatureBitset & getFeatureBits() const
FeatureBitset ApplyFeatureFlag(StringRef FS)
Apply a feature flag and return the re-computed feature bits, including all feature bits implied by t...
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
Represent a reference to a symbol from inside an expression.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual void onLabelParsed(MCSymbol *Symbol)
MCSubtargetInfo & copySTI()
Create a copy of STI and return a non-const reference to it.
@ FIRST_TARGET_MATCH_RESULT_TY
virtual bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
virtual bool ParseDirective(AsmToken DirectiveID)
ParseDirective - Parse a target specific assembler directive This method is deprecated,...
virtual ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc)=0
tryParseRegister - parse one register if possible
virtual void flushPendingInstructions(MCStreamer &Out)
Ensure that all previously parsed instructions have been emitted to the output streamer,...
void setAvailableFeatures(const FeatureBitset &Value)
virtual MCSymbolRefExpr::VariantKind getVariantKindForName(StringRef Name) const
const MCSubtargetInfo & getSTI() const
virtual void doBeforeLabelEmit(MCSymbol *Symbol, SMLoc IDLoc)
virtual unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, unsigned Kind)
Allow a target to add special case operand matching for things that tblgen doesn't/can't handle effec...
virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands)=0
ParseInstruction - Parse one assembly instruction.
virtual unsigned checkTargetMatchPredicate(MCInst &Inst)
checkTargetMatchPredicate - Validate the instruction match against any complex target predicates not ...
virtual bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, OperandVector &Operands, MCStreamer &Out, uint64_t &ErrorInfo, bool MatchingInlineAsm)=0
MatchAndEmitInstruction - Recognize a series of operands of a parsed instruction as an actual MCInst ...
Target specific streamer interface.
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
Represents a location in source code.
static SMLoc getFromPointer(const char *Ptr)
constexpr const char * getPointer() const
Represents a range in source code.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
typename SuperClass::const_iterator const_iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
iterator find(StringRef Key)
size_type count(StringRef Key) const
count - Return 1 if the element is in the map, 0 otherwise.
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
StringRef - Represent a constant reference to a string, i.e.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
std::string lower() const
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
static constexpr size_t npos
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
StringSet - A wrapper for StringMap that provides set-like functionality.
std::pair< typename Base::iterator, bool > insert(StringRef key)
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
Triple - Helper class for working with autoconf configuration names.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
LLVM Value Representation.
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an SmallVector or SmallString.
This class provides various memory handling functions that manipulate MemoryBlock instances.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const TagNameMap & getARMAttributeTags()
static CondCodes getOppositeCondition(CondCodes CC)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
unsigned encodeNEONi16splat(unsigned Value)
float getFPImmFloat(unsigned Imm)
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO, unsigned IdxMode=0)
unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset)
getAM5Opc - This function encodes the addrmode5 opc field.
ShiftOpc getSORegShOp(unsigned Op)
bool isNEONi16splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getAM5FP16Opc(AddrOpc Opc, unsigned char Offset)
getAM5FP16Opc - This function encodes the addrmode5fp16 opc field.
unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset, unsigned IdxMode=0)
getAM3Opc - This function encodes the addrmode3 opc field.
bool isNEONi32splat(unsigned Value)
Checks if Value is a correct immediate for instructions like VBIC/VORR.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
const char * getShiftOpcStr(ShiftOpc Op)
unsigned encodeNEONi32splat(unsigned Value)
Encode NEON 32 bits Splat immediate for instructions like VBIC/VORR.
static const char * IFlagsToString(unsigned val)
bool getFPUFeatures(FPUKind FPUKind, std::vector< StringRef > &Features)
StringRef getArchName(ArchKind AK)
uint64_t parseArchExt(StringRef ArchExt)
ArchKind parseArch(StringRef Arch)
bool isVpred(OperandType op)
FPUKind parseFPU(StringRef FPU)
bool isCDECoproc(size_t Coproc, const MCSubtargetInfo &STI)
@ D16
Only 16 D registers.
constexpr bool any(E Val)
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
std::optional< unsigned > attrTypeFromString(StringRef tag, TagNameMap tagNameMap)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool parseAssignmentExpression(StringRef Name, bool allow_redef, MCAsmParser &Parser, MCSymbol *&Symbol, const MCExpr *&Value)
Parse a value expression and return whether it can be assigned to a symbol with the given name.
@ CE
Windows NT (Windows on ARM)
Reg
All possible values of the reg field in the ModR/M byte.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
static const char * ARMVPTPredToString(ARMVCC::VPTCodes CC)
int popcount(T Value) noexcept
Count the number of set bits in a value.
Target & getTheThumbBETarget()
static unsigned ARMCondCodeFromString(StringRef CC)
const ARMInstrTable ARMDescs
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
static bool isARMLowRegister(unsigned Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
auto reverse(ContainerTy &&C)
@ Never
Never set the bit.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
bool IsCPSRDead< MCInst >(const MCInst *Instr)
static bool isValidCoprocessorNumber(unsigned Num, const FeatureBitset &featureBits)
isValidCoprocessorNumber - decide whether an explicit coprocessor number is legal in generic instruct...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ MCAF_Code16
.code16 (X86) / .code 16 (ARM)
@ MCAF_Code32
.code32 (X86) / .code 32 (ARM)
DWARFExpression::Operation Op
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
static unsigned ARMVectorCondCodeFromString(StringRef CC)
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
Target & getTheARMLETarget()
Target & getTheARMBETarget()
Target & getTheThumbLETarget()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
const FeatureBitset Features
MCOperandInfo OperandInfo[3026]
MCPhysReg ImplicitOps[130]
This struct is a compact representation of a valid (non-zero power of two) alignment.
Holds functions to get, set or test bitfields.
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...