75 enum class MatrixKind {
Array, Tile, Row, Col };
77 enum RegConstraintEqualityTy {
92 static PrefixInfo CreateFromInst(
const MCInst &Inst,
uint64_t TSFlags) {
95 case AArch64::MOVPRFX_ZZ:
99 case AArch64::MOVPRFX_ZPmZ_B:
100 case AArch64::MOVPRFX_ZPmZ_H:
101 case AArch64::MOVPRFX_ZPmZ_S:
102 case AArch64::MOVPRFX_ZPmZ_D:
107 "No destructive element size set for movprfx");
111 case AArch64::MOVPRFX_ZPzZ_B:
112 case AArch64::MOVPRFX_ZPzZ_H:
113 case AArch64::MOVPRFX_ZPzZ_S:
114 case AArch64::MOVPRFX_ZPzZ_D:
119 "No destructive element size set for movprfx");
130 PrefixInfo() =
default;
131 bool isActive()
const {
return Active; }
133 unsigned getElementSize()
const {
137 unsigned getDstReg()
const {
return Dst; }
138 unsigned getPgReg()
const {
145 bool Predicated =
false;
146 unsigned ElementSize;
156 SMLoc getLoc()
const {
return getParser().getTok().getLoc(); }
161 std::string &Suggestion);
165 bool parseSymbolicImmVal(
const MCExpr *&ImmVal);
170 bool invertCondCode);
171 bool parseImmExpr(int64_t &Out);
173 bool parseRegisterInRange(
unsigned &Out,
unsigned Base,
unsigned First,
179 bool parseDirectiveArch(
SMLoc L);
180 bool parseDirectiveArchExtension(
SMLoc L);
181 bool parseDirectiveCPU(
SMLoc L);
182 bool parseDirectiveInst(
SMLoc L);
184 bool parseDirectiveTLSDescCall(
SMLoc L);
187 bool parseDirectiveLtorg(
SMLoc L);
190 bool parseDirectiveUnreq(
SMLoc L);
191 bool parseDirectiveCFINegateRAState();
192 bool parseDirectiveCFIBKeyFrame();
193 bool parseDirectiveCFIMTETaggedFrame();
195 bool parseDirectiveVariantPCS(
SMLoc L);
197 bool parseDirectiveSEHAllocStack(
SMLoc L);
198 bool parseDirectiveSEHPrologEnd(
SMLoc L);
199 bool parseDirectiveSEHSaveR19R20X(
SMLoc L);
200 bool parseDirectiveSEHSaveFPLR(
SMLoc L);
201 bool parseDirectiveSEHSaveFPLRX(
SMLoc L);
202 bool parseDirectiveSEHSaveReg(
SMLoc L);
203 bool parseDirectiveSEHSaveRegX(
SMLoc L);
204 bool parseDirectiveSEHSaveRegP(
SMLoc L);
205 bool parseDirectiveSEHSaveRegPX(
SMLoc L);
206 bool parseDirectiveSEHSaveLRPair(
SMLoc L);
207 bool parseDirectiveSEHSaveFReg(
SMLoc L);
208 bool parseDirectiveSEHSaveFRegX(
SMLoc L);
209 bool parseDirectiveSEHSaveFRegP(
SMLoc L);
210 bool parseDirectiveSEHSaveFRegPX(
SMLoc L);
211 bool parseDirectiveSEHSetFP(
SMLoc L);
212 bool parseDirectiveSEHAddFP(
SMLoc L);
213 bool parseDirectiveSEHNop(
SMLoc L);
214 bool parseDirectiveSEHSaveNext(
SMLoc L);
215 bool parseDirectiveSEHEpilogStart(
SMLoc L);
216 bool parseDirectiveSEHEpilogEnd(
SMLoc L);
217 bool parseDirectiveSEHTrapFrame(
SMLoc L);
218 bool parseDirectiveSEHMachineFrame(
SMLoc L);
219 bool parseDirectiveSEHContext(
SMLoc L);
220 bool parseDirectiveSEHClearUnwoundToCall(
SMLoc L);
222 bool validateInstruction(
MCInst &Inst,
SMLoc &IDLoc,
224 bool MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
227 bool MatchingInlineAsm)
override;
231 #define GET_ASSEMBLER_HEADER
232 #include "AArch64GenAsmMatcher.inc"
247 template <
bool IsSVEPrefetch = false>
253 template<
bool AddFPZeroAsLiteral>
260 template <
bool ParseShiftExtend,
261 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
263 template <
bool ParseShiftExtend,
bool ParseSuffix>
266 template <RegKind VectorKind>
268 bool ExpectMatch =
false);
274 enum AArch64MatchResultTy {
275 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
276 #define GET_OPERAND_DIAGNOSTIC_TYPES
277 #include "AArch64GenAsmMatcher.inc"
287 if (
S.getTargetStreamer() ==
nullptr)
300 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
307 bool ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
SMLoc &EndLoc)
override;
309 SMLoc &EndLoc)
override;
310 bool ParseDirective(
AsmToken DirectiveID)
override;
312 unsigned Kind)
override;
314 static bool classifySymbolRef(
const MCExpr *Expr,
345 SMLoc StartLoc, EndLoc;
354 struct ShiftExtendOp {
357 bool HasExplicitAmount;
367 RegConstraintEqualityTy EqualityTy;
383 ShiftExtendOp ShiftExtend;
388 unsigned ElementWidth;
392 struct MatrixTileListOp {
393 unsigned RegMask = 0;
396 struct VectorListOp {
399 unsigned NumElements;
400 unsigned ElementWidth;
401 RegKind RegisterKind;
404 struct VectorIndexOp {
412 struct ShiftedImmOp {
414 unsigned ShiftAmount;
466 unsigned PStateField;
472 struct MatrixRegOp MatrixReg;
473 struct MatrixTileListOp MatrixTileList;
474 struct VectorListOp VectorList;
475 struct VectorIndexOp VectorIndex;
477 struct ShiftedImmOp ShiftedImm;
479 struct FPImmOp FPImm;
481 struct SysRegOp SysReg;
482 struct SysCRImmOp SysCRImm;
484 struct PSBHintOp PSBHint;
485 struct BTIHintOp BTIHint;
486 struct ShiftExtendOp ShiftExtend;
495 AArch64Operand(KindTy K,
MCContext &Ctx) :
Kind(K), Ctx(Ctx) {}
499 StartLoc = o.StartLoc;
509 ShiftedImm = o.ShiftedImm;
523 case k_MatrixRegister:
524 MatrixReg = o.MatrixReg;
526 case k_MatrixTileList:
527 MatrixTileList = o.MatrixTileList;
530 VectorList = o.VectorList;
533 VectorIndex = o.VectorIndex;
539 SysCRImm = o.SysCRImm;
551 ShiftExtend = o.ShiftExtend;
560 SMLoc getStartLoc()
const override {
return StartLoc; }
562 SMLoc getEndLoc()
const override {
return EndLoc; }
569 bool isTokenSuffix()
const {
574 const MCExpr *getImm()
const {
575 assert(
Kind == k_Immediate &&
"Invalid access!");
579 const MCExpr *getShiftedImmVal()
const {
580 assert(
Kind == k_ShiftedImm &&
"Invalid access!");
581 return ShiftedImm.Val;
584 unsigned getShiftedImmShift()
const {
585 assert(
Kind == k_ShiftedImm &&
"Invalid access!");
586 return ShiftedImm.ShiftAmount;
590 assert(
Kind == k_CondCode &&
"Invalid access!");
595 assert (
Kind == k_FPImm &&
"Invalid access!");
599 bool getFPImmIsExact()
const {
600 assert (
Kind == k_FPImm &&
"Invalid access!");
601 return FPImm.IsExact;
604 unsigned getBarrier()
const {
605 assert(
Kind == k_Barrier &&
"Invalid access!");
610 assert(
Kind == k_Barrier &&
"Invalid access!");
614 bool getBarriernXSModifier()
const {
615 assert(
Kind == k_Barrier &&
"Invalid access!");
619 unsigned getReg()
const override {
620 assert(
Kind == k_Register &&
"Invalid access!");
624 unsigned getMatrixReg()
const {
625 assert(
Kind == k_MatrixRegister &&
"Invalid access!");
626 return MatrixReg.RegNum;
629 unsigned getMatrixElementWidth()
const {
630 assert(
Kind == k_MatrixRegister &&
"Invalid access!");
631 return MatrixReg.ElementWidth;
634 MatrixKind getMatrixKind()
const {
635 assert(
Kind == k_MatrixRegister &&
"Invalid access!");
636 return MatrixReg.Kind;
639 unsigned getMatrixTileListRegMask()
const {
640 assert(isMatrixTileList() &&
"Invalid access!");
641 return MatrixTileList.RegMask;
644 RegConstraintEqualityTy getRegEqualityTy()
const {
645 assert(
Kind == k_Register &&
"Invalid access!");
646 return Reg.EqualityTy;
649 unsigned getVectorListStart()
const {
650 assert(
Kind == k_VectorList &&
"Invalid access!");
651 return VectorList.RegNum;
654 unsigned getVectorListCount()
const {
655 assert(
Kind == k_VectorList &&
"Invalid access!");
656 return VectorList.Count;
659 int getVectorIndex()
const {
660 assert(
Kind == k_VectorIndex &&
"Invalid access!");
661 return VectorIndex.Val;
665 assert(
Kind == k_SysReg &&
"Invalid access!");
666 return StringRef(SysReg.Data, SysReg.Length);
669 unsigned getSysCR()
const {
674 unsigned getPrefetch()
const {
675 assert(
Kind == k_Prefetch &&
"Invalid access!");
679 unsigned getPSBHint()
const {
680 assert(
Kind == k_PSBHint &&
"Invalid access!");
685 assert(
Kind == k_PSBHint &&
"Invalid access!");
686 return StringRef(PSBHint.Data, PSBHint.Length);
689 unsigned getBTIHint()
const {
690 assert(
Kind == k_BTIHint &&
"Invalid access!");
695 assert(
Kind == k_BTIHint &&
"Invalid access!");
696 return StringRef(BTIHint.Data, BTIHint.Length);
701 return StringRef(SVCR.Data, SVCR.Length);
705 assert(
Kind == k_Prefetch &&
"Invalid access!");
710 if (
Kind == k_ShiftExtend)
711 return ShiftExtend.Type;
712 if (
Kind == k_Register)
713 return Reg.ShiftExtend.Type;
717 unsigned getShiftExtendAmount()
const {
718 if (
Kind == k_ShiftExtend)
719 return ShiftExtend.Amount;
720 if (
Kind == k_Register)
721 return Reg.ShiftExtend.Amount;
725 bool hasShiftExtendAmount()
const {
726 if (
Kind == k_ShiftExtend)
727 return ShiftExtend.HasExplicitAmount;
728 if (
Kind == k_Register)
729 return Reg.ShiftExtend.HasExplicitAmount;
733 bool isImm()
const override {
return Kind == k_Immediate; }
734 bool isMem()
const override {
return false; }
736 bool isUImm6()
const {
743 return (Val >= 0 && Val < 64);
746 template <
int W
idth>
bool isSImm()
const {
return isSImmScaled<Width, 1>(); }
749 return isImmScaled<Bits, Scale>(
true);
753 return isImmScaled<Bits, Scale>(
false);
756 template <
int Bits,
int Scale>
765 int64_t MinVal, MaxVal;
768 MinVal = (int64_t(1) <<
Shift) * -Scale;
769 MaxVal = ((int64_t(1) <<
Shift) - 1) * Scale;
772 MaxVal = ((int64_t(1) <<
Bits) - 1) * Scale;
776 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
785 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
789 if (Val >= 0 && Val < 32)
794 bool isSymbolicUImm12Offset(
const MCExpr *Expr)
const {
798 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
830 template <
int Scale>
bool isUImm12Offset()
const {
836 return isSymbolicUImm12Offset(getImm());
839 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
842 template <
int N,
int M>
843 bool isImmInRange()
const {
850 return (Val >=
N && Val <=
M);
855 template <
typename T>
856 bool isLogicalImm()
const {
867 if ((Val & Upper) && (Val &
Upper) != Upper)
873 bool isShiftedImm()
const {
return Kind == k_ShiftedImm; }
878 template <
unsigned W
idth>
880 if (isShiftedImm() &&
Width == getShiftedImmShift())
881 if (
auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
882 return std::make_pair(
CE->getValue(),
Width);
885 if (
auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
886 int64_t Val =
CE->getValue();
890 return std::make_pair(Val, 0u);
896 bool isAddSubImm()
const {
897 if (!isShiftedImm() && !
isImm())
903 if (isShiftedImm()) {
904 unsigned Shift = ShiftedImm.ShiftAmount;
905 Expr = ShiftedImm.Val;
915 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
916 DarwinRefKind, Addend)) {
933 if (
auto ShiftedVal = getShiftedVal<12>())
934 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
941 bool isAddSubImmNeg()
const {
942 if (!isShiftedImm() && !
isImm())
946 if (
auto ShiftedVal = getShiftedVal<12>())
947 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
957 template <
typename T>
959 if (!isShiftedImm() && (!
isImm() || !isa<MCConstantExpr>(getImm())))
962 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
963 std::is_same<int8_t, T>::value;
964 if (
auto ShiftedImm = getShiftedVal<8>())
965 if (!(IsByte && ShiftedImm->second) &&
966 AArch64_AM::isSVECpyImm<T>(
uint64_t(ShiftedImm->first)
967 << ShiftedImm->second))
977 if (!isShiftedImm() && (!
isImm() || !isa<MCConstantExpr>(getImm())))
980 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
981 std::is_same<int8_t, T>::value;
982 if (
auto ShiftedImm = getShiftedVal<8>())
983 if (!(IsByte && ShiftedImm->second) &&
984 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
985 << ShiftedImm->second))
992 if (isLogicalImm<T>() && !isSVECpyImm<T>())
997 bool isCondCode()
const {
return Kind == k_CondCode; }
999 bool isSIMDImmType10()
const {
1009 bool isBranchTarget()
const {
1018 assert(
N > 0 &&
"Branch target immediate cannot be 0 bits!");
1019 return (Val >= -((1<<(
N-1)) << 2) && Val <= (((1<<(
N-1))-1) << 2));
1030 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1031 DarwinRefKind, Addend)) {
1040 bool isMovWSymbolG3()
const {
1044 bool isMovWSymbolG2()
const {
1045 return isMovWSymbol(
1052 bool isMovWSymbolG1()
const {
1053 return isMovWSymbol(
1061 bool isMovWSymbolG0()
const {
1062 return isMovWSymbol(
1070 template<
int RegW
idth,
int Shift>
1072 if (!
isImm())
return false;
1085 template<
int RegW
idth,
int Shift>
1087 if (!
isImm())
return false;
1090 if (!CE)
return false;
1096 bool isFPImm()
const {
1097 return Kind == k_FPImm &&
1101 bool isBarrier()
const {
1102 return Kind == k_Barrier && !getBarriernXSModifier();
1104 bool isBarriernXS()
const {
1105 return Kind == k_Barrier && getBarriernXSModifier();
1107 bool isSysReg()
const {
return Kind == k_SysReg; }
1109 bool isMRSSystemRegister()
const {
1110 if (!isSysReg())
return false;
1112 return SysReg.MRSReg != -1U;
1115 bool isMSRSystemRegister()
const {
1116 if (!isSysReg())
return false;
1117 return SysReg.MSRReg != -1U;
1120 bool isSystemPStateFieldWithImm0_1()
const {
1121 if (!isSysReg())
return false;
1122 return (SysReg.PStateField == AArch64PState::PAN ||
1123 SysReg.PStateField == AArch64PState::DIT ||
1124 SysReg.PStateField == AArch64PState::UAO ||
1125 SysReg.PStateField == AArch64PState::SSBS);
1128 bool isSystemPStateFieldWithImm0_15()
const {
1129 if (!isSysReg() || isSystemPStateFieldWithImm0_1())
return false;
1130 return SysReg.PStateField != -1U;
1133 bool isSVCR()
const {
1136 return SVCR.PStateField != -1U;
1139 bool isReg()
const override {
1140 return Kind == k_Register;
1143 bool isScalarReg()
const {
1147 bool isNeonVectorReg()
const {
1148 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector;
1151 bool isNeonVectorRegLo()
const {
1152 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1153 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1155 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1159 bool isMatrix()
const {
return Kind == k_MatrixRegister; }
1160 bool isMatrixTileList()
const {
return Kind == k_MatrixTileList; }
1162 template <
unsigned Class>
bool isSVEVectorReg()
const {
1165 case AArch64::ZPRRegClassID:
1166 case AArch64::ZPR_3bRegClassID:
1167 case AArch64::ZPR_4bRegClassID:
1168 RK = RegKind::SVEDataVector;
1170 case AArch64::PPRRegClassID:
1171 case AArch64::PPR_3bRegClassID:
1172 RK = RegKind::SVEPredicateVector;
1178 return (
Kind == k_Register &&
Reg.Kind == RK) &&
1179 AArch64MCRegisterClasses[
Class].contains(
getReg());
1182 template <
unsigned Class>
bool isFPRasZPR()
const {
1184 AArch64MCRegisterClasses[
Class].contains(
getReg());
1187 template <
int ElementW
idth,
unsigned Class>
1189 if (
Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateVector)
1192 if (isSVEVectorReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1198 template <
int ElementW
idth,
unsigned Class>
1200 if (
Kind != k_Register ||
Reg.Kind != RegKind::SVEDataVector)
1203 if (isSVEVectorReg<Class>() &&
Reg.ElementWidth == ElementWidth)
1209 template <
int ElementWidth,
unsigned Class,
1211 bool ShiftWidthAlwaysSame>
1213 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1214 if (!VectorMatch.isMatch())
1220 bool MatchShift = getShiftExtendAmount() ==
Log2_32(ShiftWidth / 8);
1223 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1226 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1232 bool isGPR32as64()
const {
1234 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(
Reg.RegNum);
1237 bool isGPR64as32()
const {
1239 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(
Reg.RegNum);
1242 bool isGPR64x8()
const {
1244 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1248 bool isWSeqPair()
const {
1250 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1254 bool isXSeqPair()
const {
1256 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1260 template<
int64_t Angle,
int64_t Remainder>
1268 if (
Value % Angle == Remainder &&
Value <= 270)
1273 template <
unsigned RegClassID>
bool isGPR64()
const {
1275 AArch64MCRegisterClasses[RegClassID].contains(
getReg());
1278 template <
unsigned RegClassID,
int ExtW
idth>
1283 if (isGPR64<RegClassID>() && getShiftExtendType() ==
AArch64_AM::LSL &&
1284 getShiftExtendAmount() ==
Log2_32(ExtWidth / 8))
1291 template <RegKind VectorKind,
unsigned NumRegs>
1292 bool isImplicitlyTypedVectorList()
const {
1293 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1294 VectorList.NumElements == 0 &&
1295 VectorList.RegisterKind == VectorKind;
1298 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1299 unsigned ElementWidth>
1300 bool isTypedVectorList()
const {
1301 if (
Kind != k_VectorList)
1303 if (VectorList.Count != NumRegs)
1305 if (VectorList.RegisterKind != VectorKind)
1307 if (VectorList.ElementWidth != ElementWidth)
1309 return VectorList.NumElements == NumElements;
1312 template <
int Min,
int Max>
1314 if (
Kind != k_VectorIndex)
1316 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1321 bool isToken()
const override {
return Kind == k_Token; }
1323 bool isTokenEqual(
StringRef Str)
const {
1324 return Kind == k_Token && getToken() == Str;
1326 bool isSysCR()
const {
return Kind == k_SysCR; }
1327 bool isPrefetch()
const {
return Kind == k_Prefetch; }
1328 bool isPSBHint()
const {
return Kind == k_PSBHint; }
1329 bool isBTIHint()
const {
return Kind == k_BTIHint; }
1330 bool isShiftExtend()
const {
return Kind == k_ShiftExtend; }
1331 bool isShifter()
const {
1332 if (!isShiftExtend())
1342 if (
Kind != k_FPImm)
1345 if (getFPImmIsExact()) {
1347 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1348 assert(Desc &&
"Unknown enum value");
1357 if (getFPImm().bitwiseIsEqual(RealVal))
1364 template <
unsigned ImmA,
unsigned ImmB>
1367 if ((Res = isExactFPImm<ImmA>()))
1369 if ((Res = isExactFPImm<ImmB>()))
1374 bool isExtend()
const {
1375 if (!isShiftExtend())
1384 getShiftExtendAmount() <= 4;
1387 bool isExtend64()
const {
1397 bool isExtendLSL64()
const {
1403 getShiftExtendAmount() <= 4;
1406 template<
int W
idth>
bool isMemXExtend()
const {
1412 getShiftExtendAmount() == 0);
1415 template<
int W
idth>
bool isMemWExtend()
const {
1421 getShiftExtendAmount() == 0);
1424 template <
unsigned w
idth>
1425 bool isArithmeticShifter()
const {
1435 template <
unsigned w
idth>
1436 bool isLogicalShifter()
const {
1444 getShiftExtendAmount() < width;
1447 bool isMovImm32Shifter()
const {
1455 uint64_t Val = getShiftExtendAmount();
1456 return (Val == 0 || Val == 16);
1459 bool isMovImm64Shifter()
const {
1467 uint64_t Val = getShiftExtendAmount();
1468 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1471 bool isLogicalVecShifter()
const {
1476 unsigned Shift = getShiftExtendAmount();
1481 bool isLogicalVecHalfWordShifter()
const {
1482 if (!isLogicalVecShifter())
1486 unsigned Shift = getShiftExtendAmount();
1491 bool isMoveVecShifter()
const {
1492 if (!isShiftExtend())
1496 unsigned Shift = getShiftExtendAmount();
1507 bool isSImm9OffsetFB()
const {
1508 return isSImm<9>() && !isUImm12Offset<
Width / 8>();
1511 bool isAdrpLabel()
const {
1518 int64_t Val =
CE->getValue();
1519 int64_t Min = - (4096 * (1LL << (21 - 1)));
1520 int64_t
Max = 4096 * ((1LL << (21 - 1)) - 1);
1521 return (Val % 4096) == 0 && Val >= Min && Val <=
Max;
1527 bool isAdrLabel()
const {
1534 int64_t Val =
CE->getValue();
1535 int64_t Min = - (1LL << (21 - 1));
1536 int64_t
Max = ((1LL << (21 - 1)) - 1);
1537 return Val >= Min && Val <=
Max;
1543 template <MatrixKind Kind,
unsigned EltSize,
unsigned RegClass>
1547 if (getMatrixKind() !=
Kind ||
1548 !AArch64MCRegisterClasses[RegClass].
contains(getMatrixReg()) ||
1549 EltSize != getMatrixElementWidth())
1558 else if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1564 void addRegOperands(
MCInst &Inst,
unsigned N)
const {
1565 assert(
N == 1 &&
"Invalid number of operands!");
1569 void addMatrixOperands(
MCInst &Inst,
unsigned N)
const {
1570 assert(
N == 1 &&
"Invalid number of operands!");
1574 void addGPR32as64Operands(
MCInst &Inst,
unsigned N)
const {
1575 assert(
N == 1 &&
"Invalid number of operands!");
1577 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].
contains(
getReg()));
1586 void addGPR64as32Operands(
MCInst &Inst,
unsigned N)
const {
1587 assert(
N == 1 &&
"Invalid number of operands!");
1589 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].
contains(
getReg()));
1598 template <
int W
idth>
1599 void addFPRasZPRRegOperands(
MCInst &Inst,
unsigned N)
const {
1602 case 8:
Base = AArch64::B0;
break;
1603 case 16:
Base = AArch64::H0;
break;
1604 case 32:
Base = AArch64::S0;
break;
1605 case 64:
Base = AArch64::D0;
break;
1606 case 128:
Base = AArch64::Q0;
break;
1613 void addVectorReg64Operands(
MCInst &Inst,
unsigned N)
const {
1614 assert(
N == 1 &&
"Invalid number of operands!");
1616 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1620 void addVectorReg128Operands(
MCInst &Inst,
unsigned N)
const {
1621 assert(
N == 1 &&
"Invalid number of operands!");
1623 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1627 void addVectorRegLoOperands(
MCInst &Inst,
unsigned N)
const {
1628 assert(
N == 1 &&
"Invalid number of operands!");
1632 enum VecListIndexType {
1633 VecListIdx_DReg = 0,
1634 VecListIdx_QReg = 1,
1635 VecListIdx_ZReg = 2,
1638 template <VecListIndexType RegTy,
unsigned NumRegs>
1639 void addVectorListOperands(
MCInst &Inst,
unsigned N)
const {
1640 assert(
N == 1 &&
"Invalid number of operands!");
1641 static const unsigned FirstRegs[][5] = {
1643 AArch64::D0, AArch64::D0_D1,
1644 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1646 AArch64::Q0, AArch64::Q0_Q1,
1647 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1649 AArch64::Z0, AArch64::Z0_Z1,
1650 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1653 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1654 " NumRegs must be <= 4 for ZRegs");
1656 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1658 FirstRegs[(
unsigned)RegTy][0]));
1661 void addMatrixTileListOperands(
MCInst &Inst,
unsigned N)
const {
1662 assert(
N == 1 &&
"Invalid number of operands!");
1663 unsigned RegMask = getMatrixTileListRegMask();
1664 assert(RegMask <= 0xFF &&
"Invalid mask!");
1668 void addVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
1669 assert(
N == 1 &&
"Invalid number of operands!");
1673 template <
unsigned ImmIs0,
unsigned ImmIs1>
1674 void addExactFPImmOperands(
MCInst &Inst,
unsigned N)
const {
1675 assert(
N == 1 &&
"Invalid number of operands!");
1676 assert(
bool(isExactFPImm<ImmIs0, ImmIs1>()) &&
"Invalid operand");
1680 void addImmOperands(
MCInst &Inst,
unsigned N)
const {
1681 assert(
N == 1 &&
"Invalid number of operands!");
1685 addExpr(Inst, getImm());
1688 template <
int Shift>
1689 void addImmWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1690 assert(
N == 2 &&
"Invalid number of operands!");
1691 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1694 }
else if (isShiftedImm()) {
1695 addExpr(Inst, getShiftedImmVal());
1698 addExpr(Inst, getImm());
1703 template <
int Shift>
1704 void addImmNegWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1705 assert(
N == 2 &&
"Invalid number of operands!");
1706 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1713 void addCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
1714 assert(
N == 1 &&
"Invalid number of operands!");
1718 void addAdrpLabelOperands(
MCInst &Inst,
unsigned N)
const {
1719 assert(
N == 1 &&
"Invalid number of operands!");
1722 addExpr(Inst, getImm());
1727 void addAdrLabelOperands(
MCInst &Inst,
unsigned N)
const {
1728 addImmOperands(Inst,
N);
1732 void addUImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
1733 assert(
N == 1 &&
"Invalid number of operands!");
1743 void addUImm6Operands(
MCInst &Inst,
unsigned N)
const {
1744 assert(
N == 1 &&
"Invalid number of operands!");
1749 template <
int Scale>
1750 void addImmScaledOperands(
MCInst &Inst,
unsigned N)
const {
1751 assert(
N == 1 &&
"Invalid number of operands!");
1756 template <
typename T>
1757 void addLogicalImmOperands(
MCInst &Inst,
unsigned N)
const {
1758 assert(
N == 1 &&
"Invalid number of operands!");
1760 std::make_unsigned_t<T> Val = MCE->
getValue();
1765 template <
typename T>
1766 void addLogicalImmNotOperands(
MCInst &Inst,
unsigned N)
const {
1767 assert(
N == 1 &&
"Invalid number of operands!");
1769 std::make_unsigned_t<T> Val = ~MCE->
getValue();
1774 void addSIMDImmType10Operands(
MCInst &Inst,
unsigned N)
const {
1775 assert(
N == 1 &&
"Invalid number of operands!");
1781 void addBranchTarget26Operands(
MCInst &Inst,
unsigned N)
const {
1785 assert(
N == 1 &&
"Invalid number of operands!");
1788 addExpr(Inst, getImm());
1791 assert(MCE &&
"Invalid constant immediate operand!");
1795 void addPCRelLabel19Operands(
MCInst &Inst,
unsigned N)
const {
1799 assert(
N == 1 &&
"Invalid number of operands!");
1802 addExpr(Inst, getImm());
1805 assert(MCE &&
"Invalid constant immediate operand!");
1809 void addBranchTarget14Operands(
MCInst &Inst,
unsigned N)
const {
1813 assert(
N == 1 &&
"Invalid number of operands!");
1816 addExpr(Inst, getImm());
1819 assert(MCE &&
"Invalid constant immediate operand!");
1823 void addFPImmOperands(
MCInst &Inst,
unsigned N)
const {
1824 assert(
N == 1 &&
"Invalid number of operands!");
1829 void addBarrierOperands(
MCInst &Inst,
unsigned N)
const {
1830 assert(
N == 1 &&
"Invalid number of operands!");
1834 void addBarriernXSOperands(
MCInst &Inst,
unsigned N)
const {
1835 assert(
N == 1 &&
"Invalid number of operands!");
1839 void addMRSSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
1840 assert(
N == 1 &&
"Invalid number of operands!");
1845 void addMSRSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
1846 assert(
N == 1 &&
"Invalid number of operands!");
1851 void addSystemPStateFieldWithImm0_1Operands(
MCInst &Inst,
unsigned N)
const {
1852 assert(
N == 1 &&
"Invalid number of operands!");
1857 void addSVCROperands(
MCInst &Inst,
unsigned N)
const {
1858 assert(
N == 1 &&
"Invalid number of operands!");
1863 void addSystemPStateFieldWithImm0_15Operands(
MCInst &Inst,
unsigned N)
const {
1864 assert(
N == 1 &&
"Invalid number of operands!");
1869 void addSysCROperands(
MCInst &Inst,
unsigned N)
const {
1870 assert(
N == 1 &&
"Invalid number of operands!");
1874 void addPrefetchOperands(
MCInst &Inst,
unsigned N)
const {
1875 assert(
N == 1 &&
"Invalid number of operands!");
1879 void addPSBHintOperands(
MCInst &Inst,
unsigned N)
const {
1880 assert(
N == 1 &&
"Invalid number of operands!");
1884 void addBTIHintOperands(
MCInst &Inst,
unsigned N)
const {
1885 assert(
N == 1 &&
"Invalid number of operands!");
1889 void addShifterOperands(
MCInst &Inst,
unsigned N)
const {
1890 assert(
N == 1 &&
"Invalid number of operands!");
1896 void addExtendOperands(
MCInst &Inst,
unsigned N)
const {
1897 assert(
N == 1 &&
"Invalid number of operands!");
1904 void addExtend64Operands(
MCInst &Inst,
unsigned N)
const {
1905 assert(
N == 1 &&
"Invalid number of operands!");
1912 void addMemExtendOperands(
MCInst &Inst,
unsigned N)
const {
1913 assert(
N == 2 &&
"Invalid number of operands!");
1924 void addMemExtend8Operands(
MCInst &Inst,
unsigned N)
const {
1925 assert(
N == 2 &&
"Invalid number of operands!");
1933 void addMOVZMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
1934 assert(
N == 1 &&
"Invalid number of operands!");
1941 addExpr(Inst, getImm());
1946 void addMOVNMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
1947 assert(
N == 1 &&
"Invalid number of operands!");
1954 void addComplexRotationEvenOperands(
MCInst &Inst,
unsigned N)
const {
1955 assert(
N == 1 &&
"Invalid number of operands!");
1960 void addComplexRotationOddOperands(
MCInst &Inst,
unsigned N)
const {
1961 assert(
N == 1 &&
"Invalid number of operands!");
1968 static std::unique_ptr<AArch64Operand>
1970 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
1971 Op->Tok.Data = Str.data();
1972 Op->Tok.Length = Str.size();
1973 Op->Tok.IsSuffix = IsSuffix;
1979 static std::unique_ptr<AArch64Operand>
1981 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
1983 unsigned ShiftAmount = 0,
1984 unsigned HasExplicitAmount =
false) {
1985 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
1986 Op->Reg.RegNum = RegNum;
1988 Op->Reg.ElementWidth = 0;
1989 Op->Reg.EqualityTy = EqTy;
1990 Op->Reg.ShiftExtend.Type = ExtTy;
1991 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1992 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1998 static std::unique_ptr<AArch64Operand>
1999 CreateVectorReg(
unsigned RegNum, RegKind
Kind,
unsigned ElementWidth,
2002 unsigned ShiftAmount = 0,
2003 unsigned HasExplicitAmount =
false) {
2004 assert((
Kind == RegKind::NeonVector ||
Kind == RegKind::SVEDataVector ||
2005 Kind == RegKind::SVEPredicateVector) &&
2006 "Invalid vector kind");
2007 auto Op = CreateReg(RegNum,
Kind,
S,
E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2009 Op->Reg.ElementWidth = ElementWidth;
2013 static std::unique_ptr<AArch64Operand>
2014 CreateVectorList(
unsigned RegNum,
unsigned Count,
unsigned NumElements,
2015 unsigned ElementWidth, RegKind RegisterKind,
SMLoc S,
SMLoc E,
2017 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2018 Op->VectorList.RegNum = RegNum;
2019 Op->VectorList.Count = Count;
2020 Op->VectorList.NumElements = NumElements;
2021 Op->VectorList.ElementWidth = ElementWidth;
2022 Op->VectorList.RegisterKind = RegisterKind;
2028 static std::unique_ptr<AArch64Operand>
2030 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2031 Op->VectorIndex.Val = Idx;
2037 static std::unique_ptr<AArch64Operand>
2039 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2040 Op->MatrixTileList.RegMask = RegMask;
2047 const unsigned ElementWidth) {
2048 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2050 {{0, AArch64::ZAB0},
2051 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2052 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2053 {{8, AArch64::ZAB0},
2054 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2055 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2056 {{16, AArch64::ZAH0},
2057 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2058 {{16, AArch64::ZAH1},
2059 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2060 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2061 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2062 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2063 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2066 if (ElementWidth == 64)
2069 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth,
Reg)];
2070 assert(!Regs.empty() &&
"Invalid tile or element width!");
2071 for (
auto OutReg : Regs)
2076 static std::unique_ptr<AArch64Operand> CreateImm(
const MCExpr *Val,
SMLoc S,
2078 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2085 static std::unique_ptr<AArch64Operand> CreateShiftedImm(
const MCExpr *Val,
2086 unsigned ShiftAmount,
2089 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2090 Op->ShiftedImm .Val = Val;
2091 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2097 static std::unique_ptr<AArch64Operand>
2099 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2100 Op->CondCode.Code =
Code;
2106 static std::unique_ptr<AArch64Operand>
2108 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2110 Op->FPImm.IsExact = IsExact;
2116 static std::unique_ptr<AArch64Operand> CreateBarrier(
unsigned Val,
2120 bool HasnXSModifier) {
2121 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2122 Op->Barrier.Val = Val;
2123 Op->Barrier.Data = Str.data();
2124 Op->Barrier.Length = Str.size();
2125 Op->Barrier.HasnXSModifier = HasnXSModifier;
2131 static std::unique_ptr<AArch64Operand> CreateSysReg(
StringRef Str,
SMLoc S,
2136 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2137 Op->SysReg.Data = Str.data();
2138 Op->SysReg.Length = Str.size();
2139 Op->SysReg.MRSReg = MRSReg;
2140 Op->SysReg.MSRReg = MSRReg;
2141 Op->SysReg.PStateField = PStateField;
2147 static std::unique_ptr<AArch64Operand> CreateSysCR(
unsigned Val,
SMLoc S,
2149 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2150 Op->SysCRImm.Val = Val;
2156 static std::unique_ptr<AArch64Operand> CreatePrefetch(
unsigned Val,
2160 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2161 Op->Prefetch.Val = Val;
2162 Op->Barrier.Data = Str.data();
2163 Op->Barrier.Length = Str.size();
2169 static std::unique_ptr<AArch64Operand> CreatePSBHint(
unsigned Val,
2173 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2174 Op->PSBHint.Val = Val;
2175 Op->PSBHint.Data = Str.data();
2176 Op->PSBHint.Length = Str.size();
2182 static std::unique_ptr<AArch64Operand> CreateBTIHint(
unsigned Val,
2186 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2187 Op->BTIHint.Val = Val | 32;
2188 Op->BTIHint.Data = Str.data();
2189 Op->BTIHint.Length = Str.size();
2195 static std::unique_ptr<AArch64Operand>
2196 CreateMatrixRegister(
unsigned RegNum,
unsigned ElementWidth, MatrixKind
Kind,
2198 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2199 Op->MatrixReg.RegNum = RegNum;
2200 Op->MatrixReg.ElementWidth = ElementWidth;
2201 Op->MatrixReg.Kind =
Kind;
2207 static std::unique_ptr<AArch64Operand>
2209 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2210 Op->SVCR.PStateField = PStateField;
2211 Op->SVCR.Data = Str.data();
2212 Op->SVCR.Length = Str.size();
2218 static std::unique_ptr<AArch64Operand>
2221 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2222 Op->ShiftExtend.Type = ShOp;
2223 Op->ShiftExtend.Amount = Val;
2224 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2236 OS <<
"<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2237 if (!getFPImmIsExact())
2244 OS <<
"<barrier " <<
Name <<
">";
2246 OS <<
"<barrier invalid #" << getBarrier() <<
">";
2252 case k_ShiftedImm: {
2253 unsigned Shift = getShiftedImmShift();
2254 OS <<
"<shiftedimm ";
2255 OS << *getShiftedImmVal();
2262 case k_VectorList: {
2263 OS <<
"<vectorlist ";
2264 unsigned Reg = getVectorListStart();
2265 for (
unsigned i = 0,
e = getVectorListCount();
i !=
e; ++
i)
2266 OS <<
Reg +
i <<
" ";
2271 OS <<
"<vectorindex " << getVectorIndex() <<
">";
2274 OS <<
"<sysreg: " << getSysReg() <<
'>';
2277 OS <<
"'" << getToken() <<
"'";
2280 OS <<
"c" << getSysCR();
2285 OS <<
"<prfop " <<
Name <<
">";
2287 OS <<
"<prfop invalid #" << getPrefetch() <<
">";
2291 OS << getPSBHintName();
2294 OS << getBTIHintName();
2296 case k_MatrixRegister:
2297 OS <<
"<matrix " << getMatrixReg() <<
">";
2299 case k_MatrixTileList: {
2300 OS <<
"<matrixlist ";
2301 unsigned RegMask = getMatrixTileListRegMask();
2302 unsigned MaxBits = 8;
2303 for (
unsigned I = MaxBits;
I > 0; --
I)
2304 OS << ((RegMask & (1 << (
I - 1))) >> (
I - 1));
2313 OS << "<register " << getReg() << ">";
2314 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2318 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2319 << getShiftExtendAmount();
2320 if (!hasShiftExtendAmount())
2330 static unsigned MatchRegisterName(StringRef Name);
2334 static unsigned MatchNeonVectorRegName(StringRef Name) {
2335 return StringSwitch<unsigned>(Name.lower())
2336 .Case("v0", AArch64::Q0)
2337 .Case("v1", AArch64::Q1)
2338 .Case("v2", AArch64::Q2)
2339 .Case("v3", AArch64::Q3)
2340 .Case("v4", AArch64::Q4)
2341 .Case("v5", AArch64::Q5)
2342 .Case("v6", AArch64::Q6)
2343 .Case("v7", AArch64::Q7)
2344 .Case("v8", AArch64::Q8)
2345 .Case("v9", AArch64::Q9)
2346 .Case("v10", AArch64::Q10)
2347 .Case("v11", AArch64::Q11)
2348 .Case("v12", AArch64::Q12)
2349 .Case("v13", AArch64::Q13)
2350 .Case("v14", AArch64::Q14)
2351 .Case("v15", AArch64::Q15)
2352 .Case("v16", AArch64::Q16)
2353 .Case("v17", AArch64::Q17)
2354 .Case("v18", AArch64::Q18)
2355 .Case("v19", AArch64::Q19)
2356 .Case("v20", AArch64::Q20)
2357 .Case("v21", AArch64::Q21)
2358 .Case("v22", AArch64::Q22)
2359 .Case("v23", AArch64::Q23)
2360 .Case("v24", AArch64::Q24)
2361 .Case("v25", AArch64::Q25)
2362 .Case("v26", AArch64::Q26)
2363 .Case("v27", AArch64::Q27)
2364 .Case("v28", AArch64::Q28)
2365 .Case("v29", AArch64::Q29)
2366 .Case("v30", AArch64::Q30)
2367 .Case("v31", AArch64::Q31)
2375 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2376 RegKind VectorKind) {
2377 std::pair<int, int> Res = {-1, -1};
2379 switch (VectorKind) {
2380 case RegKind::NeonVector:
2382 StringSwitch<std::pair<int, int>>(Suffix.lower())
2384 .Case(".1d", {1, 64})
2385 .Case(".1q", {1, 128})
2386 // '.2h
' needed for fp16 scalar pairwise reductions
2387 .Case(".2h", {2, 16})
2388 .Case(".2s", {2, 32})
2389 .Case(".2d", {2, 64})
2390 // '.4b
' is another special case for the ARMv8.2a dot product
2392 .Case(".4b", {4, 8})
2393 .Case(".4h", {4, 16})
2394 .Case(".4s", {4, 32})
2395 .Case(".8b", {8, 8})
2396 .Case(".8h", {8, 16})
2397 .Case(".16b", {16, 8})
2398 // Accept the width neutral ones, too, for verbose syntax. If those
2400 // all will work out.
2402 .Case(".h", {0, 16})
2403 .Case(".s", {0, 32})
2404 .Case(".d", {0, 64})
2407 case RegKind::SVEPredicateVector:
2408 case RegKind::SVEDataVector:
2409 case RegKind::Matrix:
2410 Res = StringSwitch<std::pair<int, int>>(Suffix.lower())
2413 .Case(".h", {0, 16})
2414 .Case(".s", {0, 32})
2415 .Case(".d", {0, 64})
2416 .Case(".q", {0, 128})
2420 llvm_unreachable("Unsupported RegKind");
2423 if (Res == std::make_pair(-1, -1))
2424 return Optional<std::pair<int, int>>();
2426 return Optional<std::pair<int, int>>(Res);
2429 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2430 return parseVectorKind(Suffix, VectorKind).has_value();
2433 static unsigned matchSVEDataVectorRegName(StringRef Name) {
2434 return StringSwitch<unsigned>(Name.lower())
2435 .Case("z0", AArch64::Z0)
2436 .Case("z1", AArch64::Z1)
2437 .Case("z2", AArch64::Z2)
2438 .Case("z3", AArch64::Z3)
2439 .Case("z4", AArch64::Z4)
2440 .Case("z5", AArch64::Z5)
2441 .Case("z6", AArch64::Z6)
2442 .Case("z7", AArch64::Z7)
2443 .Case("z8", AArch64::Z8)
2444 .Case("z9", AArch64::Z9)
2445 .Case("z10", AArch64::Z10)
2446 .Case("z11", AArch64::Z11)
2447 .Case("z12", AArch64::Z12)
2448 .Case("z13", AArch64::Z13)
2449 .Case("z14", AArch64::Z14)
2450 .Case("z15", AArch64::Z15)
2451 .Case("z16", AArch64::Z16)
2452 .Case("z17", AArch64::Z17)
2453 .Case("z18", AArch64::Z18)
2454 .Case("z19", AArch64::Z19)
2455 .Case("z20", AArch64::Z20)
2456 .Case("z21", AArch64::Z21)
2457 .Case("z22", AArch64::Z22)
2458 .Case("z23", AArch64::Z23)
2459 .Case("z24", AArch64::Z24)
2460 .Case("z25", AArch64::Z25)
2461 .Case("z26", AArch64::Z26)
2462 .Case("z27", AArch64::Z27)
2463 .Case("z28", AArch64::Z28)
2464 .Case("z29", AArch64::Z29)
2465 .Case("z30", AArch64::Z30)
2466 .Case("z31", AArch64::Z31)
2470 static unsigned matchSVEPredicateVectorRegName(StringRef Name) {
2471 return StringSwitch<unsigned>(Name.lower())
2472 .Case("p0", AArch64::P0)
2473 .Case("p1", AArch64::P1)
2474 .Case("p2", AArch64::P2)
2475 .Case("p3", AArch64::P3)
2476 .Case("p4", AArch64::P4)
2477 .Case("p5", AArch64::P5)
2478 .Case("p6", AArch64::P6)
2479 .Case("p7", AArch64::P7)
2480 .Case("p8", AArch64::P8)
2481 .Case("p9", AArch64::P9)
2482 .Case("p10", AArch64::P10)
2483 .Case("p11", AArch64::P11)
2484 .Case("p12", AArch64::P12)
2485 .Case("p13", AArch64::P13)
2486 .Case("p14", AArch64::P14)
2487 .Case("p15", AArch64::P15)
2491 static unsigned matchMatrixTileListRegName(StringRef Name) {
2492 return StringSwitch<unsigned>(Name.lower())
2493 .Case("za0.d", AArch64::ZAD0)
2494 .Case("za1.d", AArch64::ZAD1)
2495 .Case("za2.d", AArch64::ZAD2)
2496 .Case("za3.d", AArch64::ZAD3)
2497 .Case("za4.d", AArch64::ZAD4)
2498 .Case("za5.d", AArch64::ZAD5)
2499 .Case("za6.d", AArch64::ZAD6)
2500 .Case("za7.d", AArch64::ZAD7)
2501 .Case("za0.s", AArch64::ZAS0)
2502 .Case("za1.s", AArch64::ZAS1)
2503 .Case("za2.s", AArch64::ZAS2)
2504 .Case("za3.s", AArch64::ZAS3)
2505 .Case("za0.h", AArch64::ZAH0)
2506 .Case("za1.h", AArch64::ZAH1)
2507 .Case("za0.b", AArch64::ZAB0)
2511 static unsigned matchMatrixRegName(StringRef Name) {
2512 return StringSwitch<unsigned>(Name.lower())
2513 .Case("za", AArch64::ZA)
2514 .Case("za0.q", AArch64::ZAQ0)
2515 .Case("za1.q", AArch64::ZAQ1)
2516 .Case("za2.q", AArch64::ZAQ2)
2517 .Case("za3.q", AArch64::ZAQ3)
2518 .Case("za4.q", AArch64::ZAQ4)
2519 .Case("za5.q", AArch64::ZAQ5)
2520 .Case("za6.q", AArch64::ZAQ6)
2521 .Case("za7.q", AArch64::ZAQ7)
2522 .Case("za8.q", AArch64::ZAQ8)
2523 .Case("za9.q", AArch64::ZAQ9)
2524 .Case("za10.q", AArch64::ZAQ10)
2525 .Case("za11.q", AArch64::ZAQ11)
2526 .Case("za12.q", AArch64::ZAQ12)
2527 .Case("za13.q", AArch64::ZAQ13)
2528 .Case("za14.q", AArch64::ZAQ14)
2529 .Case("za15.q", AArch64::ZAQ15)
2530 .Case("za0.d", AArch64::ZAD0)
2531 .Case("za1.d", AArch64::ZAD1)
2532 .Case("za2.d", AArch64::ZAD2)
2533 .Case("za3.d", AArch64::ZAD3)
2534 .Case("za4.d", AArch64::ZAD4)
2535 .Case("za5.d", AArch64::ZAD5)
2536 .Case("za6.d", AArch64::ZAD6)
2537 .Case("za7.d", AArch64::ZAD7)
2538 .Case("za0.s", AArch64::ZAS0)
2539 .Case("za1.s", AArch64::ZAS1)
2540 .Case("za2.s", AArch64::ZAS2)
2541 .Case("za3.s", AArch64::ZAS3)
2542 .Case("za0.h", AArch64::ZAH0)
2543 .Case("za1.h", AArch64::ZAH1)
2544 .Case("za0.b", AArch64::ZAB0)
2545 .Case("za0h.q", AArch64::ZAQ0)
2546 .Case("za1h.q", AArch64::ZAQ1)
2547 .Case("za2h.q", AArch64::ZAQ2)
2548 .Case("za3h.q", AArch64::ZAQ3)
2549 .Case("za4h.q", AArch64::ZAQ4)
2550 .Case("za5h.q", AArch64::ZAQ5)
2551 .Case("za6h.q", AArch64::ZAQ6)
2552 .Case("za7h.q", AArch64::ZAQ7)
2553 .Case("za8h.q", AArch64::ZAQ8)
2554 .Case("za9h.q", AArch64::ZAQ9)
2555 .Case("za10h.q", AArch64::ZAQ10)
2556 .Case("za11h.q", AArch64::ZAQ11)
2557 .Case("za12h.q", AArch64::ZAQ12)
2558 .Case("za13h.q", AArch64::ZAQ13)
2559 .Case("za14h.q", AArch64::ZAQ14)
2560 .Case("za15h.q", AArch64::ZAQ15)
2561 .Case("za0h.d", AArch64::ZAD0)
2562 .Case("za1h.d", AArch64::ZAD1)
2563 .Case("za2h.d", AArch64::ZAD2)
2564 .Case("za3h.d", AArch64::ZAD3)
2565 .Case("za4h.d", AArch64::ZAD4)
2566 .Case("za5h.d", AArch64::ZAD5)
2567 .Case("za6h.d", AArch64::ZAD6)
2568 .Case("za7h.d", AArch64::ZAD7)
2569 .Case("za0h.s", AArch64::ZAS0)
2570 .Case("za1h.s", AArch64::ZAS1)
2571 .Case("za2h.s", AArch64::ZAS2)
2572 .Case("za3h.s", AArch64::ZAS3)
2573 .Case("za0h.h", AArch64::ZAH0)
2574 .Case("za1h.h", AArch64::ZAH1)
2575 .Case("za0h.b", AArch64::ZAB0)
2576 .Case("za0v.q", AArch64::ZAQ0)
2577 .Case("za1v.q", AArch64::ZAQ1)
2578 .Case("za2v.q", AArch64::ZAQ2)
2579 .Case("za3v.q", AArch64::ZAQ3)
2580 .Case("za4v.q", AArch64::ZAQ4)
2581 .Case("za5v.q", AArch64::ZAQ5)
2582 .Case("za6v.q", AArch64::ZAQ6)
2583 .Case("za7v.q", AArch64::ZAQ7)
2584 .Case("za8v.q", AArch64::ZAQ8)
2585 .Case("za9v.q", AArch64::ZAQ9)
2586 .Case("za10v.q", AArch64::ZAQ10)
2587 .Case("za11v.q", AArch64::ZAQ11)
2588 .Case("za12v.q", AArch64::ZAQ12)
2589 .Case("za13v.q", AArch64::ZAQ13)
2590 .Case("za14v.q", AArch64::ZAQ14)
2591 .Case("za15v.q", AArch64::ZAQ15)
2592 .Case("za0v.d", AArch64::ZAD0)
2593 .Case("za1v.d", AArch64::ZAD1)
2594 .Case("za2v.d", AArch64::ZAD2)
2595 .Case("za3v.d", AArch64::ZAD3)
2596 .Case("za4v.d", AArch64::ZAD4)
2597 .Case("za5v.d", AArch64::ZAD5)
2598 .Case("za6v.d", AArch64::ZAD6)
2599 .Case("za7v.d", AArch64::ZAD7)
2600 .Case("za0v.s", AArch64::ZAS0)
2601 .Case("za1v.s", AArch64::ZAS1)
2602 .Case("za2v.s", AArch64::ZAS2)
2603 .Case("za3v.s", AArch64::ZAS3)
2604 .Case("za0v.h", AArch64::ZAH0)
2605 .Case("za1v.h", AArch64::ZAH1)
2606 .Case("za0v.b", AArch64::ZAB0)
2610 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
2612 return tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success;
2615 OperandMatchResultTy AArch64AsmParser::tryParseRegister(unsigned &RegNo,
2618 StartLoc = getLoc();
2619 auto Res = tryParseScalarRegister(RegNo);
2620 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2624 // Matches a register name or register alias previously defined by '.req
'
2625 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
2627 unsigned RegNum = 0;
2628 if ((RegNum = matchSVEDataVectorRegName(Name)))
2629 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2631 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
2632 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2634 if ((RegNum = MatchNeonVectorRegName(Name)))
2635 return Kind == RegKind::NeonVector ? RegNum : 0;
2637 if ((RegNum = matchMatrixRegName(Name)))
2638 return Kind == RegKind::Matrix ? RegNum : 0;
2640 // The parsed register must be of RegKind Scalar
2641 if ((RegNum = MatchRegisterName(Name)))
2642 return Kind == RegKind::Scalar ? RegNum : 0;
2645 // Handle a few common aliases of registers.
2646 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
2647 .Case("fp", AArch64::FP)
2648 .Case("lr", AArch64::LR)
2649 .Case("x31", AArch64::XZR)
2650 .Case("w31", AArch64::WZR)
2652 return Kind == RegKind::Scalar ? RegNum : 0;
2654 // Check for aliases registered via .req. Canonicalize to lower case.
2655 // That's more consistent since
register names are
case insensitive,
and
2657 auto Entry = RegisterReqs.
find(
Name.lower());
2658 if (Entry == RegisterReqs.
end())
2662 if (
Kind == Entry->getValue().first)
2663 RegNum = Entry->getValue().second;
2672 AArch64AsmParser::tryParseScalarRegister(
unsigned &RegNum) {
2693 Error(
S,
"Expected cN operand where 0 <= N <= 15");
2697 StringRef Tok = getTok().getIdentifier();
2698 if (Tok[0] !=
'c' && Tok[0] !=
'C') {
2699 Error(
S,
"Expected cN operand where 0 <= N <= 15");
2705 if (BadNum || CRNum > 15) {
2706 Error(
S,
"Expected cN operand where 0 <= N <= 15");
2712 AArch64Operand::CreateSysCR(CRNum,
S, getLoc(), getContext()));
2717 template <
bool IsSVEPrefetch>
2724 if (IsSVEPrefetch) {
2725 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(
N))
2727 }
else if (
auto Res = AArch64PRFM::lookupPRFMByName(
N))
2732 auto LookupByEncoding = [](
unsigned E) {
2733 if (IsSVEPrefetch) {
2734 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(
E))
2736 }
else if (
auto Res = AArch64PRFM::lookupPRFMByEncoding(
E))
2740 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2747 if (getParser().parseExpression(ImmVal))
2752 TokError(
"immediate value expected for prefetch operand");
2756 if (prfop > MaxVal) {
2757 TokError(
"prefetch operand out of range, [0," + utostr(MaxVal) +
2762 auto PRFM = LookupByEncoding(MCE->
getValue());
2763 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(
""),
2769 TokError(
"prefetch hint expected");
2773 auto PRFM = LookupByName(Tok.
getString());
2775 TokError(
"prefetch hint expected");
2779 Operands.push_back(AArch64Operand::CreatePrefetch(
2791 TokError(
"invalid operand for instruction");
2795 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.
getString());
2797 TokError(
"invalid operand for instruction");
2801 Operands.push_back(AArch64Operand::CreatePSBHint(
2802 PSB->Encoding, Tok.
getString(),
S, getContext()));
2813 TokError(
"invalid operand for instruction");
2817 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.
getString());
2819 TokError(
"invalid operand for instruction");
2823 Operands.push_back(AArch64Operand::CreateBTIHint(
2824 BTI->Encoding, Tok.
getString(),
S, getContext()));
2834 const MCExpr *Expr =
nullptr;
2840 if (parseSymbolicImmVal(Expr))
2846 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2856 Error(
S,
"gotpage label reference not allowed an addend");
2867 Error(
S,
"page or gotpage label reference expected");
2876 Operands.push_back(AArch64Operand::CreateImm(Expr,
S,
E, getContext()));
2886 const MCExpr *Expr =
nullptr;
2895 if (parseSymbolicImmVal(Expr))
2901 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2908 Error(
S,
"unexpected adr label");
2914 Operands.push_back(AArch64Operand::CreateImm(Expr,
S,
E, getContext()));
2919 template<
bool AddFPZeroAsLiteral>
2933 TokError(
"invalid floating point immediate");
2939 if (Tok.
getIntVal() > 255 || isNegative) {
2940 TokError(
"encoded floating point value out of range");
2946 AArch64Operand::CreateFPImm(
F,
true,
S, getContext()));
2953 TokError(
"invalid floating point representation");
2958 RealVal.changeSign();
2960 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
2961 Operands.push_back(AArch64Operand::CreateToken(
"#0",
S, getContext()));
2962 Operands.push_back(AArch64Operand::CreateToken(
".0",
S, getContext()));
2964 Operands.push_back(AArch64Operand::CreateFPImm(
2986 if (parseSymbolicImmVal(
Imm))
2990 AArch64Operand::CreateImm(
Imm,
S, getLoc(), getContext()));
2999 !getTok().getIdentifier().equals_insensitive(
"lsl")) {
3000 Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3010 Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3014 int64_t ShiftAmount = getTok().getIntVal();
3016 if (ShiftAmount < 0) {
3017 Error(getLoc(),
"positive shift amount required");
3023 if (ShiftAmount == 0 &&
Imm !=
nullptr) {
3025 AArch64Operand::CreateImm(
Imm,
S, getLoc(), getContext()));
3029 Operands.push_back(AArch64Operand::CreateShiftedImm(
Imm, ShiftAmount,
S,
3030 getLoc(), getContext()));
3037 AArch64AsmParser::parseCondCodeString(
StringRef Cond, std::string &Suggestion) {
3060 getSTI().getFeatureBits()[AArch64::FeatureSVE]) {
3075 Suggestion =
"nfrst";
3082 bool invertCondCode) {
3088 std::string Suggestion;
3091 std::string
Msg =
"invalid condition code";
3092 if (!Suggestion.empty())
3093 Msg +=
", did you mean " + Suggestion +
"?";
3094 return TokError(
Msg);
3098 if (invertCondCode) {
3100 return TokError(
"condition codes AL and NV are invalid for this instruction");
3105 AArch64Operand::CreateCondCode(CC,
S, getLoc(), getContext()));
3115 TokError(
"invalid operand for instruction");
3119 unsigned PStateImm = -1;
3120 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.
getString());
3121 if (SVCR && SVCR->haveFeatures(getSTI().getFeatureBits()))
3122 PStateImm = SVCR->Encoding;
3125 AArch64Operand::CreateSVCR(PStateImm, Tok.
getString(),
S, getContext()));
3137 if (
Name.equals_insensitive(
"za")) {
3139 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3140 AArch64::ZA, 0, MatrixKind::Array,
S, getLoc(),
3145 if (parseOperand(
Operands,
false,
false))
3156 size_t DotPosition =
Name.find(
'.');
3164 .
Case(
"h", MatrixKind::Row)
3165 .
Case(
"v", MatrixKind::Col)
3171 TokError(
"Expected the register to be followed by element width suffix");
3174 unsigned ElementWidth = KindRes->second;
3178 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3179 Reg, ElementWidth,
Kind,
S, getLoc(), getContext()));
3184 if (parseOperand(
Operands,
false,
false))
3226 TokError(
"expected #imm after shift specifier");
3233 AArch64Operand::CreateShiftExtend(ShOp, 0,
false,
S,
E, getContext()));
3242 Error(
E,
"expected integer shift amount");
3247 if (getParser().parseExpression(ImmVal))
3252 Error(
E,
"expected constant '#imm' after shift specifier");
3257 Operands.push_back(AArch64Operand::CreateShiftExtend(
3258 ShOp, MCE->
getValue(),
true,
S,
E, getContext()));
3266 {
"crc", {AArch64::FeatureCRC}},
3267 {
"sm4", {AArch64::FeatureSM4}},
3268 {
"sha3", {AArch64::FeatureSHA3}},
3269 {
"sha2", {AArch64::FeatureSHA2}},
3270 {
"aes", {AArch64::FeatureAES}},
3271 {
"crypto", {AArch64::FeatureCrypto}},
3272 {
"fp", {AArch64::FeatureFPARMv8}},
3273 {
"simd", {AArch64::FeatureNEON}},
3274 {
"ras", {AArch64::FeatureRAS}},
3275 {
"lse", {AArch64::FeatureLSE}},
3276 {
"predres", {AArch64::FeaturePredRes}},
3277 {
"ccdp", {AArch64::FeatureCacheDeepPersist}},
3278 {
"mte", {AArch64::FeatureMTE}},
3279 {
"memtag", {AArch64::FeatureMTE}},
3280 {
"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3281 {
"pan", {AArch64::FeaturePAN}},
3282 {
"pan-rwv", {AArch64::FeaturePAN_RWV}},
3283 {
"ccpp", {AArch64::FeatureCCPP}},
3284 {
"rcpc", {AArch64::FeatureRCPC}},
3285 {
"rng", {AArch64::FeatureRandGen}},
3286 {
"sve", {AArch64::FeatureSVE}},
3287 {
"sve2", {AArch64::FeatureSVE2}},
3288 {
"sve2-aes", {AArch64::FeatureSVE2AES}},
3289 {
"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3290 {
"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3291 {
"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3292 {
"ls64", {AArch64::FeatureLS64}},
3293 {
"xs", {AArch64::FeatureXS}},
3294 {
"pauth", {AArch64::FeaturePAuth}},
3295 {
"flagm", {AArch64::FeatureFlagM}},
3296 {
"rme", {AArch64::FeatureRME}},
3297 {
"sme", {AArch64::FeatureSME}},
3298 {
"sme-f64", {AArch64::FeatureSMEF64}},
3299 {
"sme-i64", {AArch64::FeatureSMEI64}},
3300 {
"hbc", {AArch64::FeatureHBC}},
3301 {
"mops", {AArch64::FeatureMOPS}},
3309 if (FBS[AArch64::HasV8_0aOps])
3311 if (FBS[AArch64::HasV8_1aOps])
3313 else if (FBS[AArch64::HasV8_2aOps])
3315 else if (FBS[AArch64::HasV8_3aOps])
3317 else if (FBS[AArch64::HasV8_4aOps])
3319 else if (FBS[AArch64::HasV8_5aOps])
3321 else if (FBS[AArch64::HasV8_6aOps])
3323 else if (FBS[AArch64::HasV8_7aOps])
3325 else if (FBS[AArch64::HasV8_8aOps])
3327 else if (FBS[AArch64::HasV9_0aOps])
3329 else if (FBS[AArch64::HasV9_1aOps])
3331 else if (FBS[AArch64::HasV9_2aOps])
3333 else if (FBS[AArch64::HasV9_3aOps])
3335 else if (FBS[AArch64::HasV8_0rOps])
3342 ExtMatches.push_back(
Ext.Name);
3344 Str += !ExtMatches.empty() ? llvm::join(ExtMatches,
", ") :
"(unknown)";
3351 const uint16_t Cm = (Encoding & 0x78) >> 3;
3352 const uint16_t Cn = (Encoding & 0x780) >> 7;
3353 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3358 AArch64Operand::CreateImm(Expr,
S, getLoc(), getContext()));
3360 AArch64Operand::CreateSysCR(Cn,
S, getLoc(), getContext()));
3362 AArch64Operand::CreateSysCR(Cm,
S, getLoc(), getContext()));
3365 AArch64Operand::CreateImm(Expr,
S, getLoc(), getContext()));
3372 if (
Name.contains(
'.'))
3373 return TokError(
"invalid operand");
3376 Operands.push_back(AArch64Operand::CreateToken(
"sys", NameLoc, getContext()));
3382 if (Mnemonic ==
"ic") {
3385 return TokError(
"invalid operand for IC instruction");
3386 else if (!IC->
haveFeatures(getSTI().getFeatureBits())) {
3387 std::string Str(
"IC " + std::string(IC->
Name) +
" requires: ");
3389 return TokError(Str);
3392 }
else if (Mnemonic ==
"dc") {
3395 return TokError(
"invalid operand for DC instruction");
3396 else if (!
DC->haveFeatures(getSTI().getFeatureBits())) {
3397 std::string Str(
"DC " + std::string(
DC->Name) +
" requires: ");
3399 return TokError(Str);
3402 }
else if (Mnemonic ==
"at") {
3405 return TokError(
"invalid operand for AT instruction");
3406 else if (!AT->
haveFeatures(getSTI().getFeatureBits())) {
3407 std::string Str(
"AT " + std::string(AT->
Name) +
" requires: ");
3409 return TokError(Str);
3412 }
else if (Mnemonic ==
"tlbi") {
3415 return TokError(
"invalid operand for TLBI instruction");
3416 else if (!TLBI->
haveFeatures(getSTI().getFeatureBits())) {
3417 std::string Str(
"TLBI " + std::string(TLBI->
Name) +
" requires: ");
3419 return TokError(Str);
3422 }
else if (Mnemonic ==
"cfp" || Mnemonic ==
"dvp" || Mnemonic ==
"cpp") {
3425 return TokError(
"invalid operand for prediction restriction instruction");
3426 else if (!PRCTX->
haveFeatures(getSTI().getFeatureBits())) {
3428 Mnemonic.
upper() + std::string(PRCTX->
Name) +
" requires: ");
3430 return TokError(Str);
3433 Mnemonic ==
"cfp" ? 4 :
3434 Mnemonic ==
"dvp" ? 5 :
3435 Mnemonic ==
"cpp" ? 7 :
3437 assert(PRCTX_Op2 &&
"Invalid mnemonic for prediction restriction instruction");
3444 bool HasRegister =
false;
3449 return TokError(
"expected register operand");
3453 if (ExpectRegister && !HasRegister)
3454 return TokError(
"specified " + Mnemonic +
" op requires a register");
3455 else if (!ExpectRegister && HasRegister)
3456 return TokError(
"specified " + Mnemonic +
" op does not use a register");
3470 TokError(
"'csync' operand expected");
3475 SMLoc ExprLoc = getLoc();
3477 if (getParser().parseExpression(ImmVal))
3481 Error(ExprLoc,
"immediate value expected for barrier operand");
3485 if (Mnemonic ==
"dsb" &&
Value > 15) {
3493 Error(ExprLoc,
"barrier operand out of range");
3496 auto DB = AArch64DB::lookupDBByEncoding(
Value);
3497 Operands.push_back(AArch64Operand::CreateBarrier(
Value, DB ? DB->Name :
"",
3498 ExprLoc, getContext(),
3504 TokError(
"invalid operand for instruction");
3509 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3510 auto DB = AArch64DB::lookupDBByName(Operand);
3512 if (Mnemonic ==
"isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3513 TokError(
"'sy' or #imm operand expected");
3516 }
else if (Mnemonic ==
"tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3517 TokError(
"'csync' operand expected");
3519 }
else if (!DB && !TSB) {
3520 if (Mnemonic ==
"dsb") {
3525 TokError(
"invalid barrier option name");
3529 Operands.push_back(AArch64Operand::CreateBarrier(
3530 DB ? DB->Encoding : TSB->Encoding, Tok.
getString(), getLoc(),
3531 getContext(),
false ));
3541 assert(Mnemonic ==
"dsb" &&
"Instruction does not accept nXS operands");
3542 if (Mnemonic !=
"dsb")
3548 SMLoc ExprLoc = getLoc();
3549 if (getParser().parseExpression(ImmVal))
3553 Error(ExprLoc,
"immediate value expected for barrier operand");
3560 Error(ExprLoc,
"barrier operand out of range");
3563 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(
Value);
3564 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
3565 ExprLoc, getContext(),
3571 TokError(
"invalid operand for instruction");
3576 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
3579 TokError(
"invalid barrier option name");
3584 AArch64Operand::CreateBarrier(DB->Encoding, Tok.
getString(), getLoc(),
3585 getContext(),
true ));
3598 if (AArch64SVCR::lookupSVCRByName(Tok.
getString()))
3603 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3604 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
3605 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3609 auto PState = AArch64PState::lookupPStateByName(Tok.
getString());
3610 unsigned PStateImm = -1;
3611 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3612 PStateImm = PState->Encoding;
3615 AArch64Operand::CreateSysReg(Tok.
getString(), getLoc(), MRSReg, MSRReg,
3616 PStateImm, getContext()));
3632 tryParseVectorRegister(
Reg,
Kind, RegKind::NeonVector);
3640 unsigned ElementWidth = KindRes->second;
3642 AArch64Operand::CreateVectorReg(
Reg, RegKind::NeonVector, ElementWidth,
3643 S, getLoc(), getContext()));
3648 Operands.push_back(AArch64Operand::CreateToken(
Kind,
S, getContext()));
3655 SMLoc SIdx = getLoc();
3658 if (getParser().parseExpression(ImmVal))
3662 TokError(
"immediate value expected for vector index");
3671 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->
getValue(), SIdx,
3684 RegKind MatchKind) {
3693 size_t Start = 0, Next =
Name.find(
'.');
3695 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3701 TokError(
"invalid vector kind qualifier");
3718 const SMLoc S = getLoc();
3721 auto Res = tryParseVectorRegister(RegNum,
Kind, RegKind::SVEPredicateVector);
3729 unsigned ElementWidth = KindRes->second;
3730 Operands.push_back(AArch64Operand::CreateVectorReg(
3731 RegNum, RegKind::SVEPredicateVector, ElementWidth,
S,
3732 getLoc(), getContext()));
3737 if (parseOperand(
Operands,
false,
false))
3746 if (!
Kind.empty()) {
3747 Error(
S,
"not expecting size suffix");
3752 Operands.push_back(AArch64Operand::CreateToken(
"/", getLoc(), getContext()));
3757 auto Pred = getTok().getString().lower();
3758 if (Pred !=
"z" && Pred !=
"m") {
3759 Error(getLoc(),
"expecting 'm' or 'z' predication");
3764 const char *ZM = Pred ==
"z" ?
"z" :
"m";
3765 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
3774 if (!tryParseNeonVectorRegister(
Operands))
3784 bool AArch64AsmParser::parseSymbolicImmVal(
const MCExpr *&ImmVal) {
3785 bool HasELFModifier =
false;
3789 HasELFModifier =
true;
3792 return TokError(
"expect relocation specifier in operand after ':'");
3794 std::string LowerCase = getTok().getIdentifier().lower();
3845 return TokError(
"expect relocation specifier in operand after ':'");
3849 if (parseToken(
AsmToken::Colon,
"expect ':' after relocation specifier"))
3853 if (getParser().parseExpression(ImmVal))
3867 auto ParseMatrixTile = [
this](
unsigned &
Reg,
unsigned &ElementWidth) {
3869 size_t DotPosition =
Name.find(
'.');
3881 TokError(
"Expected the register to be followed by element width suffix");
3884 ElementWidth = KindRes->second;
3891 auto LCurly = getTok();
3896 Operands.push_back(AArch64Operand::CreateMatrixTileList(
3897 0,
S, getLoc(), getContext()));
3902 if (getTok().getString().equals_insensitive(
"za")) {
3908 Operands.push_back(AArch64Operand::CreateMatrixTileList(
3909 0xFF,
S, getLoc(), getContext()));
3913 SMLoc TileLoc = getLoc();
3915 unsigned FirstReg, ElementWidth;
3916 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
3918 getLexer().UnLex(LCurly);
3924 unsigned PrevReg = FirstReg;
3927 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
3930 SeenRegs.
insert(FirstReg);
3934 unsigned Reg, NextElementWidth;
3935 ParseRes = ParseMatrixTile(
Reg, NextElementWidth);
3940 if (ElementWidth != NextElementWidth) {
3941 Error(TileLoc,
"mismatched register size suffix");
3946 Warning(TileLoc,
"tile list not in ascending order");
3949 Warning(TileLoc,
"duplicate tile in list");
3952 AArch64Operand::ComputeRegsForAlias(
Reg, DRegs, ElementWidth);
3961 unsigned RegMask = 0;
3962 for (
auto Reg : DRegs)
3966 AArch64Operand::CreateMatrixTileList(RegMask,
S, getLoc(), getContext()));
3971 template <RegKind VectorKind>
3981 bool NoMatchIsError) {
3982 auto RegTok = getTok();
3983 auto ParseRes = tryParseVectorRegister(
Reg,
Kind, VectorKind);
3993 !RegTok.getString().startswith_insensitive(
"za"))) {
3994 Error(Loc,
"vector register expected");
4002 auto LCurly = getTok();
4007 auto ParseRes = ParseVector(FirstReg,
Kind, getLoc(), ExpectMatch);
4017 int64_t PrevReg = FirstReg;
4021 SMLoc Loc = getLoc();
4025 ParseRes = ParseVector(
Reg, NextKind, getLoc(),
true);
4030 if (
Kind != NextKind) {
4031 Error(Loc,
"mismatched register size suffix");
4035 unsigned Space = (PrevReg <
Reg) ? (
Reg - PrevReg) : (
Reg + 32 - PrevReg);
4037 if (Space == 0 || Space > 3) {
4038 Error(Loc,
"invalid number of vectors");
4046 SMLoc Loc = getLoc();
4049 ParseRes = ParseVector(
Reg, NextKind, getLoc(),
true);
4054 if (
Kind != NextKind) {
4055 Error(Loc,
"mismatched register size suffix");
4060 if (getContext().getRegisterInfo()->getEncodingValue(
Reg) !=
4061 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
4062 Error(Loc,
"registers must be sequential");
4075 Error(
S,
"invalid number of vectors");
4079 unsigned NumElements = 0;
4080 unsigned ElementWidth = 0;
4081 if (!
Kind.empty()) {
4083 std::tie(NumElements, ElementWidth) = *VK;
4086 Operands.push_back(AArch64Operand::CreateVectorList(
4087 FirstReg, Count, NumElements, ElementWidth, VectorKind,
S, getLoc(),
4095 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(
Operands,
true);
4104 SMLoc StartLoc = getLoc();
4112 Operands.push_back(AArch64Operand::CreateReg(
4120 Error(getLoc(),
"index must be absent or #0");
4125 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4126 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4127 Error(getLoc(),
"index must be absent or #0");
4131 Operands.push_back(AArch64Operand::CreateReg(
4136 template <
bool ParseShiftExtend, RegConstra
intEqualityTy EqTy>
4139 SMLoc StartLoc = getLoc();
4148 Operands.push_back(AArch64Operand::CreateReg(
4158 Res = tryParseOptionalShiftExtend(ExtOpnd);
4162 auto Ext =
static_cast<AArch64Operand*
>(ExtOpnd.back().get());
4163 Operands.push_back(AArch64Operand::CreateReg(
4165 Ext->getShiftExtendType(),
Ext->getShiftExtendAmount(),
4166 Ext->hasShiftExtendAmount()));
4180 if (!getTok().getString().equals_insensitive(
"mul") ||
4181 !(NextIsVL || NextIsHash))
4185 AArch64Operand::CreateToken(
"mul", getLoc(), getContext()));
4190 AArch64Operand::CreateToken(
"vl", getLoc(), getContext()));
4202 if (
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4203 Operands.push_back(AArch64Operand::CreateImm(
4210 return Error(getLoc(),
"expected 'vl' or '#<imm>'");
4214 auto Tok = getTok();
4224 AArch64Operand::CreateToken(Keyword, Tok.
getLoc(), getContext()));
4233 bool invertCondCode) {
4237 MatchOperandParserImpl(
Operands, Mnemonic,
true);
4251 switch (getLexer().getKind()) {
4255 if (parseSymbolicImmVal(Expr))
4256 return Error(
S,
"invalid operand");
4259 Operands.push_back(AArch64Operand::CreateImm(Expr,
S,
E, getContext()));
4264 AArch64Operand::CreateToken(
"[", getLoc(), getContext()));
4269 return parseOperand(
Operands,
false,
false);
4272 if (!parseNeonVectorList(
Operands))
4276 AArch64Operand::CreateToken(
"{", getLoc(), getContext()));
4281 return parseOperand(
Operands,
false,
false);
4286 return parseCondCode(
Operands, invertCondCode);
4294 if (!parseOptionalMulOperand(
Operands))
4299 if (Mnemonic ==
"smstart" || Mnemonic ==
"smstop")
4300 return parseKeywordOperand(
Operands);
4310 if (Mnemonic ==
"brb")
4311 return parseKeywordOperand(
Operands);
4317 if (getParser().parseExpression(IdVal))
4320 Operands.push_back(AArch64Operand::CreateImm(IdVal,
S,
E, getContext()));
4332 bool isNegative =
false;
4348 if (Mnemonic !=
"fcmp" && Mnemonic !=
"fcmpe" && Mnemonic !=
"fcmeq" &&
4349 Mnemonic !=
"fcmge" && Mnemonic !=
"fcmgt" && Mnemonic !=
"fcmle" &&
4350 Mnemonic !=
"fcmlt" && Mnemonic !=
"fcmne")
4351 return TokError(
"unexpected floating point literal");
4352 else if (
IntVal != 0 || isNegative)
4353 return TokError(
"expected floating-point constant #0.0");
4356 Operands.push_back(AArch64Operand::CreateToken(
"#0",
S, getContext()));
4357 Operands.push_back(AArch64Operand::CreateToken(
".0",
S, getContext()));
4362 if (parseSymbolicImmVal(ImmVal))
4366 Operands.push_back(AArch64Operand::CreateImm(ImmVal,
S,
E, getContext()));
4370 SMLoc Loc = getLoc();
4371 if (Mnemonic !=
"ldr")
4372 return TokError(
"unexpected token in operand");
4374 const MCExpr *SubExprVal;
4375 if (getParser().parseExpression(SubExprVal))
4379 !
static_cast<AArch64Operand &
>(*
Operands[1]).isScalarReg())
4380 return Error(Loc,
"Only valid when first operand is register");
4383 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4389 if (isa<MCConstantExpr>(SubExprVal)) {
4390 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
4391 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
4396 if (ShiftAmt <= MaxShiftAmt &&
Imm <= 0xFFFF) {
4397 Operands[0] = AArch64Operand::CreateToken(
"movz", Loc, Ctx);
4398 Operands.push_back(AArch64Operand::CreateImm(
4402 ShiftAmt,
true,
S,
E, Ctx));
4408 return Error(Loc,
"Immediate too large for register");
4412 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
4413 Operands.push_back(AArch64Operand::CreateImm(CPLoc,
S,
E, Ctx));
4419 bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
4420 const MCExpr *Expr =
nullptr;
4422 if (
check(getParser().parseExpression(Expr), L,
"expected expression"))
4425 if (
check(!
Value, L,
"expected constant expression"))
4427 Out =
Value->getValue();
4431 bool AArch64AsmParser::parseComma() {
4439 bool AArch64AsmParser::parseRegisterInRange(
unsigned &Out,
unsigned Base,
4440 unsigned First,
unsigned Last) {
4443 if (
check(ParseRegister(
Reg, Start, End), getLoc(),
"expected register"))
4448 unsigned RangeEnd =
Last;
4449 if (
Base == AArch64::X0) {
4450 if (Last == AArch64::FP) {
4451 RangeEnd = AArch64::X28;
4452 if (
Reg == AArch64::FP) {
4457 if (Last == AArch64::LR) {
4458 RangeEnd = AArch64::X28;
4459 if (
Reg == AArch64::FP) {
4462 }
else if (
Reg == AArch64::LR) {
4469 if (
check(Reg < First || Reg > RangeEnd, Start,
4470 Twine(
"expected register in range ") +
4480 auto &AOp1 =
static_cast<const AArch64Operand&
>(Op1);
4481 auto &AOp2 =
static_cast<const AArch64Operand&
>(Op2);
4482 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
4483 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
4486 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
4487 "Testing equality of non-scalar registers not supported");
4490 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
4492 if (AOp1.getRegEqualityTy() == EqualsSubReg)
4494 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
4496 if (AOp2.getRegEqualityTy() == EqualsSubReg)
4508 .
Case(
"beq",
"b.eq")
4509 .
Case(
"bne",
"b.ne")
4510 .
Case(
"bhs",
"b.hs")
4511 .
Case(
"bcs",
"b.cs")
4512 .
Case(
"blo",
"b.lo")
4513 .
Case(
"bcc",
"b.cc")
4514 .
Case(
"bmi",
"b.mi")
4515 .
Case(
"bpl",
"b.pl")
4516 .
Case(
"bvs",
"b.vs")
4517 .
Case(
"bvc",
"b.vc")
4518 .
Case(
"bhi",
"b.hi")
4519 .
Case(
"bls",
"b.ls")
4520 .
Case(
"bge",
"b.ge")
4521 .
Case(
"blt",
"b.lt")
4522 .
Case(
"bgt",
"b.gt")
4523 .
Case(
"ble",
"b.le")
4524 .
Case(
"bal",
"b.al")
4525 .
Case(
"bnv",
"b.nv")
4530 getTok().getIdentifier().lower() ==
".req") {
4531 parseDirectiveReq(
Name, NameLoc);
4538 size_t Start = 0, Next =
Name.find(
'.');
4543 if (Head ==
"ic" || Head ==
"dc" || Head ==
"at" || Head ==
"tlbi" ||
4544 Head ==
"cfp" || Head ==
"dvp" || Head ==
"cpp")
4545 return parseSysAlias(Head, NameLoc,
Operands);
4547 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
4553 Next =
Name.find(
'.', Start + 1);
4554 Head =
Name.slice(Start + 1, Next);
4558 std::string Suggestion;
4561 std::string
Msg =
"invalid condition code";
4562 if (!Suggestion.empty())
4563 Msg +=
", did you mean " + Suggestion +
"?";
4566 Operands.push_back(AArch64Operand::CreateToken(
".", SuffixLoc, getContext(),
4569 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
4575 Next =
Name.find(
'.', Start + 1);
4576 Head =
Name.slice(Start, Next);
4579 Operands.push_back(AArch64Operand::CreateToken(
4580 Head, SuffixLoc, getContext(),
true));
4585 bool condCodeFourthOperand =
4586 (Head ==
"ccmp" || Head ==
"ccmn" || Head ==
"fccmp" ||
4587 Head ==
"fccmpe" || Head ==
"fcsel" || Head ==
"csel" ||
4588 Head ==
"csinc" || Head ==
"csinv" || Head ==
"csneg");
4596 bool condCodeSecondOperand = (Head ==
"cset" || Head ==
"csetm");
4597 bool condCodeThirdOperand =
4598 (Head ==
"cinc" || Head ==
"cinv" || Head ==
"cneg");
4606 if (parseOperand(
Operands, (
N == 4 && condCodeFourthOperand) ||
4607 (
N == 3 && condCodeThirdOperand) ||
4608 (
N == 2 && condCodeSecondOperand),
4609 condCodeSecondOperand || condCodeThirdOperand)) {
4629 AArch64Operand::CreateToken(
"]", getLoc(), getContext()));
4632 AArch64Operand::CreateToken(
"!", getLoc(), getContext()));
4635 AArch64Operand::CreateToken(
"}", getLoc(), getContext()));
4648 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
4649 return (ZReg == ((
Reg - AArch64::B0) + AArch64::Z0)) ||
4650 (ZReg == ((
Reg - AArch64::H0) + AArch64::Z0)) ||
4651 (ZReg == ((
Reg - AArch64::S0) + AArch64::Z0)) ||
4652 (ZReg == ((
Reg - AArch64::D0) + AArch64::Z0)) ||
4653 (ZReg == ((
Reg - AArch64::Q0) + AArch64::Z0)) ||
4654 (ZReg == ((
Reg - AArch64::Z0) + AArch64::Z0));
4660 bool AArch64AsmParser::validateInstruction(
MCInst &Inst,
SMLoc &IDLoc,
4669 PrefixInfo
Prefix = NextPrefix;
4670 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.
TSFlags);
4682 return Error(IDLoc,
"instruction is unpredictable when following a"
4683 " movprfx, suggest replacing movprfx with mov");
4687 return Error(Loc[0],
"instruction is unpredictable when following a"
4688 " movprfx writing to a different destination");
4695 return Error(Loc[0],
"instruction is unpredictable when following a"
4696 " movprfx and destination also used as non-destructive"
4700 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
4701 if (
Prefix.isPredicated()) {
4715 return Error(IDLoc,
"instruction is unpredictable when following a"
4716 " predicated movprfx, suggest using unpredicated movprfx");
4720 return Error(IDLoc,
"instruction is unpredictable when following a"
4721 " predicated movprfx using a different general predicate");
4725 return Error(IDLoc,
"instruction is unpredictable when following a"
4726 " predicated movprfx with a different element size");
4734 case AArch64::LDPSWpre:
4735 case AArch64::LDPWpost:
4736 case AArch64::LDPWpre:
4737 case AArch64::LDPXpost:
4738 case AArch64::LDPXpre: {
4743 return Error(Loc[0],
"unpredictable LDP instruction, writeback base "
4744 "is also a destination");
4746 return Error(Loc[1],
"unpredictable LDP instruction, writeback base "
4747 "is also a destination");
4750 case AArch64::LDPDi:
4751 case AArch64::LDPQi:
4752 case AArch64::LDPSi:
4753 case AArch64::LDPSWi:
4754 case AArch64::LDPWi:
4755 case AArch64::LDPXi: {
4759 return Error(Loc[1],
"unpredictable LDP instruction, Rt2==Rt");
4762 case AArch64::LDPDpost:
4763 case AArch64::LDPDpre:
4764 case AArch64::LDPQpost:
4765 case AArch64::LDPQpre:
4766 case AArch64::LDPSpost:
4767 case AArch64::LDPSpre:
4768 case AArch64::LDPSWpost: {
4772 return Error(Loc[1],
"unpredictable LDP instruction, Rt2==Rt");
4775 case AArch64::STPDpost:
4776 case AArch64::STPDpre:
4777 case AArch64::STPQpost:
4778 case AArch64::STPQpre:
4779 case AArch64::STPSpost:
4780 case AArch64::STPSpre:
4781 case AArch64::STPWpost:
4782 case AArch64::STPWpre:
4783 case AArch64::STPXpost:
4784 case AArch64::STPXpre: {
4789 return Error(Loc[0],
"unpredictable STP instruction, writeback base "
4790 "is also a source");
4792 return Error(Loc[1],
"unpredictable STP instruction, writeback base "
4793 "is also a source");
4796 case AArch64::LDRBBpre:
4797 case AArch64::LDRBpre:
4798 case AArch64::LDRHHpre:
4799 case AArch64::LDRHpre:
4800 case AArch64::LDRSBWpre:
4801 case AArch64::LDRSBXpre:
4802 case AArch64::LDRSHWpre:
4803 case AArch64::LDRSHXpre:
4804 case AArch64::LDRSWpre:
4805 case AArch64::LDRWpre:
4806 case AArch64::LDRXpre:
4807 case AArch64::LDRBBpost:
4808 case AArch64::LDRBpost:
4809 case AArch64::LDRHHpost:
4810 case AArch64::LDRHpost:
4811 case AArch64::LDRSBWpost:
4812 case AArch64::LDRSBXpost:
4813 case AArch64::LDRSHWpost:
4814 case AArch64::LDRSHXpost:
4815 case AArch64::LDRSWpost:
4816 case AArch64::LDRWpost:
4817 case AArch64::LDRXpost: {
4821 return Error(Loc[0],
"unpredictable LDR instruction, writeback base "
4822 "is also a source");
4825 case AArch64::STRBBpost:
4826 case AArch64::STRBpost:
4827 case AArch64::STRHHpost:
4828 case AArch64::STRHpost:
4829 case AArch64::STRWpost:
4830 case AArch64::STRXpost:
4831 case AArch64::STRBBpre:
4832 case AArch64::STRBpre:
4833 case AArch64::STRHHpre:
4834 case AArch64::STRHpre:
4835 case AArch64::STRWpre:
4836 case AArch64::STRXpre: {
4840 return Error(Loc[0],
"unpredictable STR instruction, writeback base "
4841 "is also a source");
4844 case AArch64::STXRB:
4845 case AArch64::STXRH:
4846 case AArch64::STXRW:
4847 case AArch64::STXRX:
4848 case AArch64::STLXRB:
4849 case AArch64::STLXRH:
4850 case AArch64::STLXRW:
4851 case AArch64::STLXRX: {
4857 return Error(Loc[0],
4858 "unpredictable STXR instruction, status is also a source");
4861 case AArch64::STXPW:
4862 case AArch64::STXPX:
4863 case AArch64::STLXPW:
4864 case AArch64::STLXPX: {
4871 return Error(Loc[0],
4872 "unpredictable STXP instruction, status is also a source");
4875 case AArch64::LDRABwriteback:
4876 case AArch64::LDRAAwriteback: {
4880 return Error(Loc[0],
4881 "unpredictable LDRA instruction, writeback base"
4882 " is also a destination");
4889 case AArch64::CPYFP:
4890 case AArch64::CPYFPWN:
4891 case AArch64::CPYFPRN:
4892 case AArch64::CPYFPN:
4893 case AArch64::CPYFPWT:
4894 case AArch64::CPYFPWTWN:
4895 case AArch64::CPYFPWTRN:
4896 case AArch64::CPYFPWTN:
4897 case AArch64::CPYFPRT:
4898 case AArch64::CPYFPRTWN:
4899 case AArch64::CPYFPRTRN:
4900 case AArch64::CPYFPRTN:
4901 case AArch64::CPYFPT:
4902 case AArch64::CPYFPTWN:
4903 case AArch64::CPYFPTRN:
4904 case AArch64::CPYFPTN:
4905 case AArch64::CPYFM:
4906 case AArch64::CPYFMWN:
4907 case AArch64::CPYFMRN:
4908 case AArch64::CPYFMN:
4909 case AArch64::CPYFMWT:
4910 case AArch64::CPYFMWTWN:
4911 case AArch64::CPYFMWTRN:
4912 case AArch64::CPYFMWTN:
4913 case AArch64::CPYFMRT:
4914 case AArch64::CPYFMRTWN:
4915 case AArch64::CPYFMRTRN:
4916 case AArch64::CPYFMRTN:
4917 case AArch64::CPYFMT:
4918 case AArch64::CPYFMTWN:
4919 case AArch64::CPYFMTRN:
4920 case AArch64::CPYFMTN:
4921 case AArch64::CPYFE:
4922 case AArch64::CPYFEWN:
4923 case AArch64::CPYFERN:
4924 case AArch64::CPYFEN:
4925 case AArch64::CPYFEWT:
4926 case AArch64::CPYFEWTWN:
4927 case AArch64::CPYFEWTRN:
4928 case AArch64::CPYFEWTN:
4929 case AArch64::CPYFERT:
4930 case AArch64::CPYFERTWN:
4931 case AArch64::CPYFERTRN:
4932 case AArch64::CPYFERTN:
4933 case AArch64::CPYFET:
4934 case AArch64::CPYFETWN:
4935 case AArch64::CPYFETRN:
4936 case AArch64::CPYFETN:
4938 case AArch64::CPYPWN:
4939 case AArch64::CPYPRN:
4940 case AArch64::CPYPN:
4941 case AArch64::CPYPWT:
4942 case AArch64::CPYPWTWN:
4943 case AArch64::CPYPWTRN:
4944 case AArch64::CPYPWTN:
4945 case AArch64::CPYPRT:
4946 case AArch64::CPYPRTWN:
4947 case AArch64::CPYPRTRN:
4948 case AArch64::CPYPRTN:
4949 case AArch64::CPYPT:
4950 case AArch64::CPYPTWN:
4951 case AArch64::CPYPTRN:
4952 case AArch64::CPYPTN:
4954 case AArch64::CPYMWN:
4955 case AArch64::CPYMRN:
4956 case AArch64::CPYMN:
4957 case AArch64::CPYMWT:
4958 case AArch64::CPYMWTWN:
4959 case AArch64::CPYMWTRN:
4960 case AArch64::CPYMWTN:
4961 case AArch64::CPYMRT:
4962 case AArch64::CPYMRTWN:
4963 case AArch64::CPYMRTRN:
4964 case AArch64::CPYMRTN:
4965 case AArch64::CPYMT:
4966 case AArch64::CPYMTWN:
4967 case AArch64::CPYMTRN:
4968 case AArch64::CPYMTN:
4970 case AArch64::CPYEWN:
4971 case AArch64::CPYERN:
4972 case AArch64::CPYEN:
4973 case AArch64::CPYEWT:
4974 case AArch64::CPYEWTWN:
4975 case AArch64::CPYEWTRN:
4976 case AArch64::CPYEWTN:
4977 case AArch64::CPYERT:
4978 case AArch64::CPYERTWN:
4979 case AArch64::CPYERTRN:
4980 case AArch64::CPYERTN:
4981 case AArch64::CPYET:
4982 case AArch64::CPYETWN:
4983 case AArch64::CPYETRN:
4984 case AArch64::CPYETN: {
4992 return Error(Loc[0],
4993 "invalid CPY instruction, Xd_wb and Xd do not match");
4995 return Error(Loc[0],
4996 "invalid CPY instruction, Xs_wb and Xs do not match");
4998 return Error(Loc[0],
4999 "invalid CPY instruction, Xn_wb and Xn do not match");
5001 return Error(Loc[0],
"invalid CPY instruction, destination and source"
5002 " registers are the same");
5004 return Error(Loc[0],
"invalid CPY instruction, destination and size"
5005 " registers are the same");
5007 return Error(Loc[0],
"invalid CPY instruction, source and size"
5008 " registers are the same");
5012 case AArch64::SETPT:
5013 case AArch64::SETPN:
5014 case AArch64::SETPTN:
5016 case AArch64::SETMT:
5017 case AArch64::SETMN:
5018 case AArch64::SETMTN:
5020 case AArch64::SETET:
5021 case AArch64::SETEN:
5022 case AArch64::SETETN:
5023 case AArch64::SETGP:
5024 case AArch64::SETGPT:
5025 case AArch64::SETGPN:
5026 case AArch64::SETGPTN:
5027 case AArch64::SETGM:
5028 case AArch64::SETGMT:
5029 case AArch64::SETGMN:
5030 case AArch64::SETGMTN:
5031 case AArch64::MOPSSETGE:
5032 case AArch64::MOPSSETGET:
5033 case AArch64::MOPSSETGEN:
5034 case AArch64::MOPSSETGETN: {
5041 return Error(Loc[0],
5042 "invalid SET instruction, Xd_wb and Xd do not match");
5044 return Error(Loc[0],
5045 "invalid SET instruction, Xn_wb and Xn do not match");
5047 return Error(Loc[0],
"invalid SET instruction, destination and size"
5048 " registers are the same");
5050 return Error(Loc[0],
"invalid SET instruction, destination and source"
5051 " registers are the same");
5053 return Error(Loc[0],
"invalid SET instruction, source and size"
5054 " registers are the same");
5063 case AArch64::ADDSWri:
5064 case AArch64::ADDSXri:
5065 case AArch64::ADDWri:
5066 case AArch64::ADDXri:
5067 case AArch64::SUBSWri:
5068 case AArch64::SUBSXri:
5069 case AArch64::SUBWri:
5070 case AArch64::SUBXri: {
5078 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
5105 return Error(Loc.back(),
"invalid immediate expression");
5118 unsigned VariantID = 0);
5120 bool AArch64AsmParser::showMatchError(
SMLoc Loc,
unsigned ErrCode,
5124 case Match_InvalidTiedOperand: {
5125 RegConstraintEqualityTy EqTy =
5127 .getRegEqualityTy();
5129 case RegConstraintEqualityTy::EqualsSubReg:
5130 return Error(Loc,
"operand must be 64-bit form of destination register");
5131 case RegConstraintEqualityTy::EqualsSuperReg:
5132 return Error(Loc,
"operand must be 32-bit form of destination register");
5133 case RegConstraintEqualityTy::EqualsReg:
5134 return Error(Loc,
"operand must match destination register");
5138 case Match_MissingFeature:
5140 "instruction requires a CPU feature not currently enabled");
5141 case Match_InvalidOperand:
5142 return Error(Loc,
"invalid operand for instruction");
5143 case Match_InvalidSuffix:
5144 return Error(Loc,
"invalid type suffix for instruction");
5145 case Match_InvalidCondCode:
5146 return Error(Loc,
"expected AArch64 condition code");
5147 case Match_AddSubRegExtendSmall:
5149 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5150 case Match_AddSubRegExtendLarge:
5152 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5153 case Match_AddSubSecondSource:
5155 "expected compatible register, symbol or integer in range [0, 4095]");
5156 case Match_LogicalSecondSource:
5157 return Error(Loc,
"expected compatible register or logical immediate");
5158 case Match_InvalidMovImm32Shift:
5159 return Error(Loc,
"expected 'lsl' with optional integer 0 or 16");
5160 case Match_InvalidMovImm64Shift:
5161 return Error(Loc,
"expected 'lsl' with optional integer 0, 16, 32 or 48");
5162 case Match_AddSubRegShift32:
5164 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5165 case Match_AddSubRegShift64:
5167 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5168 case Match_InvalidFPImm:
5170 "expected compatible register or floating-point constant");
5171 case Match_InvalidMemoryIndexedSImm6:
5172 return Error(Loc,
"index must be an integer in range [-32, 31].");
5173 case Match_InvalidMemoryIndexedSImm5:
5174 return Error(Loc,
"index must be an integer in range [-16, 15].");
5175 case Match_InvalidMemoryIndexed1SImm4:
5176 return Error(Loc,
"index must be an integer in range [-8, 7].");
5177 case Match_InvalidMemoryI