71 SVEPredicateAsCounter,
77enum class MatrixKind {
Array, Tile, Row, Col };
79enum RegConstraintEqualityTy {
97 case AArch64::MOVPRFX_ZZ:
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
109 "No destructive element size set for movprfx");
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
121 "No destructive element size set for movprfx");
132 PrefixInfo() =
default;
133 bool isActive()
const {
return Active; }
135 unsigned getElementSize()
const {
139 unsigned getDstReg()
const {
return Dst; }
140 unsigned getPgReg()
const {
147 bool Predicated =
false;
148 unsigned ElementSize;
164 std::string &Suggestion);
166 unsigned matchRegisterNameAlias(
StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(
const MCExpr *&ImmVal);
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
177 bool parseRegisterInRange(
unsigned &Out,
unsigned Base,
unsigned First,
183 bool parseDirectiveArch(
SMLoc L);
184 bool parseDirectiveArchExtension(
SMLoc L);
185 bool parseDirectiveCPU(
SMLoc L);
186 bool parseDirectiveInst(
SMLoc L);
188 bool parseDirectiveTLSDescCall(
SMLoc L);
191 bool parseDirectiveLtorg(
SMLoc L);
194 bool parseDirectiveUnreq(
SMLoc L);
195 bool parseDirectiveCFINegateRAState();
196 bool parseDirectiveCFIBKeyFrame();
197 bool parseDirectiveCFIMTETaggedFrame();
199 bool parseDirectiveVariantPCS(
SMLoc L);
201 bool parseDirectiveSEHAllocStack(
SMLoc L);
202 bool parseDirectiveSEHPrologEnd(
SMLoc L);
203 bool parseDirectiveSEHSaveR19R20X(
SMLoc L);
204 bool parseDirectiveSEHSaveFPLR(
SMLoc L);
205 bool parseDirectiveSEHSaveFPLRX(
SMLoc L);
206 bool parseDirectiveSEHSaveReg(
SMLoc L);
207 bool parseDirectiveSEHSaveRegX(
SMLoc L);
208 bool parseDirectiveSEHSaveRegP(
SMLoc L);
209 bool parseDirectiveSEHSaveRegPX(
SMLoc L);
210 bool parseDirectiveSEHSaveLRPair(
SMLoc L);
211 bool parseDirectiveSEHSaveFReg(
SMLoc L);
212 bool parseDirectiveSEHSaveFRegX(
SMLoc L);
213 bool parseDirectiveSEHSaveFRegP(
SMLoc L);
214 bool parseDirectiveSEHSaveFRegPX(
SMLoc L);
215 bool parseDirectiveSEHSetFP(
SMLoc L);
216 bool parseDirectiveSEHAddFP(
SMLoc L);
217 bool parseDirectiveSEHNop(
SMLoc L);
218 bool parseDirectiveSEHSaveNext(
SMLoc L);
219 bool parseDirectiveSEHEpilogStart(
SMLoc L);
220 bool parseDirectiveSEHEpilogEnd(
SMLoc L);
221 bool parseDirectiveSEHTrapFrame(
SMLoc L);
222 bool parseDirectiveSEHMachineFrame(
SMLoc L);
223 bool parseDirectiveSEHContext(
SMLoc L);
224 bool parseDirectiveSEHClearUnwoundToCall(
SMLoc L);
225 bool parseDirectiveSEHPACSignLR(
SMLoc L);
226 bool parseDirectiveSEHSaveAnyReg(
SMLoc L,
bool Paired,
bool Writeback);
228 bool validateInstruction(
MCInst &Inst,
SMLoc &IDLoc,
230 unsigned getNumRegsForRegKind(RegKind K);
234 bool MatchingInlineAsm)
override;
238#define GET_ASSEMBLER_HEADER
239#include "AArch64GenAsmMatcher.inc"
253 template <
bool IsSVEPrefetch = false>
260 template<
bool AddFPZeroAsLiteral>
268 template <
bool ParseShiftExtend,
269 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
272 template <
bool ParseShiftExtend,
bool ParseSuffix>
274 template <RegKind RK>
276 template <RegKind VectorKind>
278 bool ExpectMatch =
false);
286 enum AArch64MatchResultTy {
288#define GET_OPERAND_DIAGNOSTIC_TYPES
289#include "AArch64GenAsmMatcher.inc"
320 SMLoc &EndLoc)
override;
322 SMLoc &EndLoc)
override;
325 unsigned Kind)
override;
327 static bool classifySymbolRef(
const MCExpr *Expr,
359 SMLoc StartLoc, EndLoc;
368 struct ShiftExtendOp {
371 bool HasExplicitAmount;
381 RegConstraintEqualityTy EqualityTy;
397 ShiftExtendOp ShiftExtend;
402 unsigned ElementWidth;
406 struct MatrixTileListOp {
407 unsigned RegMask = 0;
410 struct VectorListOp {
414 unsigned NumElements;
415 unsigned ElementWidth;
416 RegKind RegisterKind;
419 struct VectorIndexOp {
427 struct ShiftedImmOp {
429 unsigned ShiftAmount;
486 unsigned PStateField;
492 struct MatrixRegOp MatrixReg;
493 struct MatrixTileListOp MatrixTileList;
494 struct VectorListOp VectorList;
495 struct VectorIndexOp VectorIndex;
497 struct ShiftedImmOp ShiftedImm;
498 struct ImmRangeOp ImmRange;
500 struct FPImmOp FPImm;
502 struct SysRegOp SysReg;
503 struct SysCRImmOp SysCRImm;
505 struct PSBHintOp PSBHint;
506 struct BTIHintOp BTIHint;
507 struct ShiftExtendOp ShiftExtend;
520 StartLoc =
o.StartLoc;
530 ShiftedImm =
o.ShiftedImm;
533 ImmRange =
o.ImmRange;
547 case k_MatrixRegister:
548 MatrixReg =
o.MatrixReg;
550 case k_MatrixTileList:
551 MatrixTileList =
o.MatrixTileList;
554 VectorList =
o.VectorList;
557 VectorIndex =
o.VectorIndex;
563 SysCRImm =
o.SysCRImm;
575 ShiftExtend =
o.ShiftExtend;
584 SMLoc getStartLoc()
const override {
return StartLoc; }
586 SMLoc getEndLoc()
const override {
return EndLoc; }
589 assert(Kind == k_Token &&
"Invalid access!");
593 bool isTokenSuffix()
const {
594 assert(Kind == k_Token &&
"Invalid access!");
598 const MCExpr *getImm()
const {
599 assert(Kind == k_Immediate &&
"Invalid access!");
603 const MCExpr *getShiftedImmVal()
const {
604 assert(Kind == k_ShiftedImm &&
"Invalid access!");
605 return ShiftedImm.Val;
608 unsigned getShiftedImmShift()
const {
609 assert(Kind == k_ShiftedImm &&
"Invalid access!");
610 return ShiftedImm.ShiftAmount;
613 unsigned getFirstImmVal()
const {
614 assert(Kind == k_ImmRange &&
"Invalid access!");
615 return ImmRange.First;
618 unsigned getLastImmVal()
const {
619 assert(Kind == k_ImmRange &&
"Invalid access!");
620 return ImmRange.Last;
624 assert(Kind == k_CondCode &&
"Invalid access!");
629 assert (Kind == k_FPImm &&
"Invalid access!");
630 return APFloat(APFloat::IEEEdouble(),
APInt(64, FPImm.Val,
true));
633 bool getFPImmIsExact()
const {
634 assert (Kind == k_FPImm &&
"Invalid access!");
635 return FPImm.IsExact;
638 unsigned getBarrier()
const {
639 assert(Kind == k_Barrier &&
"Invalid access!");
644 assert(Kind == k_Barrier &&
"Invalid access!");
648 bool getBarriernXSModifier()
const {
649 assert(Kind == k_Barrier &&
"Invalid access!");
653 unsigned getReg()
const override {
654 assert(Kind == k_Register &&
"Invalid access!");
658 unsigned getMatrixReg()
const {
659 assert(Kind == k_MatrixRegister &&
"Invalid access!");
660 return MatrixReg.RegNum;
663 unsigned getMatrixElementWidth()
const {
664 assert(Kind == k_MatrixRegister &&
"Invalid access!");
665 return MatrixReg.ElementWidth;
668 MatrixKind getMatrixKind()
const {
669 assert(Kind == k_MatrixRegister &&
"Invalid access!");
670 return MatrixReg.Kind;
673 unsigned getMatrixTileListRegMask()
const {
674 assert(isMatrixTileList() &&
"Invalid access!");
675 return MatrixTileList.RegMask;
678 RegConstraintEqualityTy getRegEqualityTy()
const {
679 assert(Kind == k_Register &&
"Invalid access!");
680 return Reg.EqualityTy;
683 unsigned getVectorListStart()
const {
684 assert(Kind == k_VectorList &&
"Invalid access!");
685 return VectorList.RegNum;
688 unsigned getVectorListCount()
const {
689 assert(Kind == k_VectorList &&
"Invalid access!");
690 return VectorList.Count;
693 unsigned getVectorListStride()
const {
694 assert(Kind == k_VectorList &&
"Invalid access!");
695 return VectorList.Stride;
698 int getVectorIndex()
const {
699 assert(Kind == k_VectorIndex &&
"Invalid access!");
700 return VectorIndex.Val;
704 assert(Kind == k_SysReg &&
"Invalid access!");
705 return StringRef(SysReg.Data, SysReg.Length);
708 unsigned getSysCR()
const {
709 assert(Kind == k_SysCR &&
"Invalid access!");
713 unsigned getPrefetch()
const {
714 assert(Kind == k_Prefetch &&
"Invalid access!");
718 unsigned getPSBHint()
const {
719 assert(Kind == k_PSBHint &&
"Invalid access!");
724 assert(Kind == k_PSBHint &&
"Invalid access!");
725 return StringRef(PSBHint.Data, PSBHint.Length);
728 unsigned getBTIHint()
const {
729 assert(Kind == k_BTIHint &&
"Invalid access!");
734 assert(Kind == k_BTIHint &&
"Invalid access!");
735 return StringRef(BTIHint.Data, BTIHint.Length);
739 assert(Kind == k_SVCR &&
"Invalid access!");
740 return StringRef(SVCR.Data, SVCR.Length);
744 assert(Kind == k_Prefetch &&
"Invalid access!");
749 if (Kind == k_ShiftExtend)
750 return ShiftExtend.Type;
751 if (Kind == k_Register)
752 return Reg.ShiftExtend.Type;
756 unsigned getShiftExtendAmount()
const {
757 if (Kind == k_ShiftExtend)
758 return ShiftExtend.Amount;
759 if (Kind == k_Register)
760 return Reg.ShiftExtend.Amount;
764 bool hasShiftExtendAmount()
const {
765 if (Kind == k_ShiftExtend)
766 return ShiftExtend.HasExplicitAmount;
767 if (Kind == k_Register)
768 return Reg.ShiftExtend.HasExplicitAmount;
772 bool isImm()
const override {
return Kind == k_Immediate; }
773 bool isMem()
const override {
return false; }
775 bool isUImm6()
const {
782 return (Val >= 0 && Val < 64);
785 template <
int W
idth>
bool isSImm()
const {
return isSImmScaled<Width, 1>(); }
788 return isImmScaled<Bits, Scale>(
true);
791 template <
int Bits,
int Scale,
int Offset = 0,
bool IsRange = false>
793 if (IsRange && isImmRange() &&
794 (getLastImmVal() != getFirstImmVal() +
Offset))
795 return DiagnosticPredicateTy::NoMatch;
797 return isImmScaled<Bits, Scale, IsRange>(
false);
800 template <
int Bits,
int Scale,
bool IsRange = false>
802 if ((!
isImm() && !isImmRange()) || (
isImm() && IsRange) ||
803 (isImmRange() && !IsRange))
804 return DiagnosticPredicateTy::NoMatch;
808 Val = getFirstImmVal();
812 return DiagnosticPredicateTy::NoMatch;
816 int64_t MinVal, MaxVal;
818 int64_t Shift =
Bits - 1;
819 MinVal = (int64_t(1) << Shift) * -Scale;
820 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
823 MaxVal = ((int64_t(1) <<
Bits) - 1) * Scale;
826 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
827 return DiagnosticPredicateTy::Match;
829 return DiagnosticPredicateTy::NearMatch;
834 return DiagnosticPredicateTy::NoMatch;
835 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
837 return DiagnosticPredicateTy::NoMatch;
839 if (Val >= 0 && Val < 32)
840 return DiagnosticPredicateTy::Match;
841 return DiagnosticPredicateTy::NearMatch;
846 return DiagnosticPredicateTy::NoMatch;
847 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
849 return DiagnosticPredicateTy::NoMatch;
851 if (Val >= 0 && Val <= 1)
852 return DiagnosticPredicateTy::Match;
853 return DiagnosticPredicateTy::NearMatch;
856 bool isSymbolicUImm12Offset(
const MCExpr *Expr)
const {
860 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
892 template <
int Scale>
bool isUImm12Offset()
const {
898 return isSymbolicUImm12Offset(getImm());
901 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
904 template <
int N,
int M>
905 bool isImmInRange()
const {
912 return (Val >=
N && Val <= M);
917 template <
typename T>
918 bool isLogicalImm()
const {
935 bool isShiftedImm()
const {
return Kind == k_ShiftedImm; }
937 bool isImmRange()
const {
return Kind == k_ImmRange; }
942 template <
unsigned W
idth>
943 std::optional<std::pair<int64_t, unsigned>> getShiftedVal()
const {
944 if (isShiftedImm() && Width == getShiftedImmShift())
945 if (
auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
946 return std::make_pair(
CE->getValue(), Width);
949 if (
auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
950 int64_t Val =
CE->getValue();
952 return std::make_pair(Val >> Width, Width);
954 return std::make_pair(Val, 0u);
960 bool isAddSubImm()
const {
961 if (!isShiftedImm() && !
isImm())
967 if (isShiftedImm()) {
968 unsigned Shift = ShiftedImm.ShiftAmount;
969 Expr = ShiftedImm.Val;
970 if (Shift != 0 && Shift != 12)
979 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
980 DarwinRefKind, Addend)) {
997 if (
auto ShiftedVal = getShiftedVal<12>())
998 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1005 bool isAddSubImmNeg()
const {
1006 if (!isShiftedImm() && !
isImm())
1010 if (
auto ShiftedVal = getShiftedVal<12>())
1011 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1021 template <
typename T>
1023 if (!isShiftedImm() && (!
isImm() || !isa<MCConstantExpr>(getImm())))
1024 return DiagnosticPredicateTy::NoMatch;
1026 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>
::value ||
1027 std::is_same<int8_t, T>::value;
1028 if (
auto ShiftedImm = getShiftedVal<8>())
1029 if (!(IsByte && ShiftedImm->second) &&
1030 AArch64_AM::isSVECpyImm<T>(
uint64_t(ShiftedImm->first)
1031 << ShiftedImm->second))
1032 return DiagnosticPredicateTy::Match;
1034 return DiagnosticPredicateTy::NearMatch;
1041 if (!isShiftedImm() && (!
isImm() || !isa<MCConstantExpr>(getImm())))
1042 return DiagnosticPredicateTy::NoMatch;
1044 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>
::value ||
1045 std::is_same<int8_t, T>::value;
1046 if (
auto ShiftedImm = getShiftedVal<8>())
1047 if (!(IsByte && ShiftedImm->second) &&
1048 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1049 << ShiftedImm->second))
1050 return DiagnosticPredicateTy::Match;
1052 return DiagnosticPredicateTy::NearMatch;
1056 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1057 return DiagnosticPredicateTy::Match;
1058 return DiagnosticPredicateTy::NoMatch;
1061 bool isCondCode()
const {
return Kind == k_CondCode; }
1063 bool isSIMDImmType10()
const {
1073 bool isBranchTarget()
const {
1082 assert(
N > 0 &&
"Branch target immediate cannot be 0 bits!");
1083 return (Val >= -((1<<(
N-1)) << 2) && Val <= (((1<<(
N-1))-1) << 2));
1094 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1095 DarwinRefKind, Addend)) {
1104 bool isMovWSymbolG3()
const {
1108 bool isMovWSymbolG2()
const {
1109 return isMovWSymbol(
1116 bool isMovWSymbolG1()
const {
1117 return isMovWSymbol(
1125 bool isMovWSymbolG0()
const {
1126 return isMovWSymbol(
1134 template<
int RegW
idth,
int Shift>
1135 bool isMOVZMovAlias()
const {
1136 if (!
isImm())
return false;
1149 template<
int RegW
idth,
int Shift>
1150 bool isMOVNMovAlias()
const {
1151 if (!
isImm())
return false;
1154 if (!CE)
return false;
1160 bool isFPImm()
const {
1161 return Kind == k_FPImm &&
1165 bool isBarrier()
const {
1166 return Kind == k_Barrier && !getBarriernXSModifier();
1168 bool isBarriernXS()
const {
1169 return Kind == k_Barrier && getBarriernXSModifier();
1171 bool isSysReg()
const {
return Kind == k_SysReg; }
1173 bool isMRSSystemRegister()
const {
1174 if (!isSysReg())
return false;
1176 return SysReg.MRSReg != -1U;
1179 bool isMSRSystemRegister()
const {
1180 if (!isSysReg())
return false;
1181 return SysReg.MSRReg != -1U;
1184 bool isSystemPStateFieldWithImm0_1()
const {
1185 if (!isSysReg())
return false;
1186 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1189 bool isSystemPStateFieldWithImm0_15()
const {
1192 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1195 bool isSVCR()
const {
1198 return SVCR.PStateField != -1U;
1201 bool isReg()
const override {
1202 return Kind == k_Register;
1205 bool isVectorList()
const {
return Kind == k_VectorList; }
1207 bool isScalarReg()
const {
1208 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar;
1211 bool isNeonVectorReg()
const {
1212 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector;
1215 bool isNeonVectorRegLo()
const {
1216 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1217 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1219 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1223 bool isMatrix()
const {
return Kind == k_MatrixRegister; }
1224 bool isMatrixTileList()
const {
return Kind == k_MatrixTileList; }
1226 template <
unsigned Class>
bool isSVEPredicateAsCounterReg()
const {
1229 case AArch64::PPRRegClassID:
1230 case AArch64::PPR_3bRegClassID:
1231 case AArch64::PPR_p8to15RegClassID:
1232 RK = RegKind::SVEPredicateAsCounter;
1238 return (Kind == k_Register &&
Reg.Kind == RK) &&
1239 AArch64MCRegisterClasses[
Class].contains(
getReg());
1242 template <
unsigned Class>
bool isSVEVectorReg()
const {
1245 case AArch64::ZPRRegClassID:
1246 case AArch64::ZPR_3bRegClassID:
1247 case AArch64::ZPR_4bRegClassID:
1248 RK = RegKind::SVEDataVector;
1250 case AArch64::PPRRegClassID:
1251 case AArch64::PPR_3bRegClassID:
1252 RK = RegKind::SVEPredicateVector;
1258 return (Kind == k_Register &&
Reg.Kind == RK) &&
1259 AArch64MCRegisterClasses[
Class].contains(
getReg());
1262 template <
unsigned Class>
bool isFPRasZPR()
const {
1263 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1264 AArch64MCRegisterClasses[
Class].contains(
getReg());
1267 template <
int ElementW
idth,
unsigned Class>
1269 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateVector)
1270 return DiagnosticPredicateTy::NoMatch;
1272 if (isSVEVectorReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1273 return DiagnosticPredicateTy::Match;
1275 return DiagnosticPredicateTy::NearMatch;
1278 template <
int ElementW
idth,
unsigned Class>
1280 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateAsCounter)
1281 return DiagnosticPredicateTy::NoMatch;
1283 if (isSVEPredicateAsCounterReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1284 return DiagnosticPredicateTy::Match;
1286 return DiagnosticPredicateTy::NearMatch;
1289 template <
int ElementW
idth,
unsigned Class>
1291 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEDataVector)
1292 return DiagnosticPredicateTy::NoMatch;
1294 if (isSVEVectorReg<Class>() &&
Reg.ElementWidth == ElementWidth)
1295 return DiagnosticPredicateTy::Match;
1297 return DiagnosticPredicateTy::NearMatch;
1300 template <
int ElementWidth,
unsigned Class,
1302 bool ShiftWidthAlwaysSame>
1304 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1305 if (!VectorMatch.isMatch())
1306 return DiagnosticPredicateTy::NoMatch;
1311 bool MatchShift = getShiftExtendAmount() ==
Log2_32(ShiftWidth / 8);
1314 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1315 return DiagnosticPredicateTy::NoMatch;
1317 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1318 return DiagnosticPredicateTy::Match;
1320 return DiagnosticPredicateTy::NearMatch;
1323 bool isGPR32as64()
const {
1324 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1325 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(
Reg.RegNum);
1328 bool isGPR64as32()
const {
1329 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1330 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(
Reg.RegNum);
1333 bool isGPR64x8()
const {
1334 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1335 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1339 bool isWSeqPair()
const {
1340 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1341 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1345 bool isXSeqPair()
const {
1346 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1347 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1351 bool isSyspXzrPair()
const {
1352 return isGPR64<AArch64::GPR64RegClassID>() &&
Reg.RegNum == AArch64::XZR;
1355 template<
int64_t Angle,
int64_t Remainder>
1357 if (!
isImm())
return DiagnosticPredicateTy::NoMatch;
1360 if (!CE)
return DiagnosticPredicateTy::NoMatch;
1363 if (
Value % Angle == Remainder &&
Value <= 270)
1364 return DiagnosticPredicateTy::Match;
1365 return DiagnosticPredicateTy::NearMatch;
1368 template <
unsigned RegClassID>
bool isGPR64()
const {
1369 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1370 AArch64MCRegisterClasses[RegClassID].contains(
getReg());
1373 template <
unsigned RegClassID,
int ExtW
idth>
1375 if (Kind != k_Register ||
Reg.Kind != RegKind::Scalar)
1376 return DiagnosticPredicateTy::NoMatch;
1378 if (isGPR64<RegClassID>() && getShiftExtendType() ==
AArch64_AM::LSL &&
1379 getShiftExtendAmount() ==
Log2_32(ExtWidth / 8))
1380 return DiagnosticPredicateTy::Match;
1381 return DiagnosticPredicateTy::NearMatch;
1386 template <RegKind VectorKind,
unsigned NumRegs>
1387 bool isImplicitlyTypedVectorList()
const {
1388 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1389 VectorList.NumElements == 0 &&
1390 VectorList.RegisterKind == VectorKind;
1393 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1394 unsigned ElementWidth,
unsigned Stride = 1>
1395 bool isTypedVectorList()
const {
1396 if (Kind != k_VectorList)
1398 if (VectorList.Count != NumRegs)
1400 if (VectorList.RegisterKind != VectorKind)
1402 if (VectorList.ElementWidth != ElementWidth)
1404 if (VectorList.Stride != Stride)
1406 return VectorList.NumElements == NumElements;
1409 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1410 unsigned ElementWidth>
1413 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1415 return DiagnosticPredicateTy::NoMatch;
1416 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1417 return DiagnosticPredicateTy::NearMatch;
1418 return DiagnosticPredicateTy::Match;
1421 template <RegKind VectorKind,
unsigned NumRegs,
unsigned Stride,
1422 unsigned ElementWidth>
1424 bool Res = isTypedVectorList<VectorKind, NumRegs, 0,
1425 ElementWidth, Stride>();
1427 return DiagnosticPredicateTy::NoMatch;
1428 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1429 ((VectorList.RegNum >= AArch64::Z16) &&
1430 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1431 return DiagnosticPredicateTy::Match;
1432 return DiagnosticPredicateTy::NoMatch;
1435 template <
int Min,
int Max>
1437 if (Kind != k_VectorIndex)
1438 return DiagnosticPredicateTy::NoMatch;
1439 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1440 return DiagnosticPredicateTy::Match;
1441 return DiagnosticPredicateTy::NearMatch;
1444 bool isToken()
const override {
return Kind == k_Token; }
1446 bool isTokenEqual(
StringRef Str)
const {
1447 return Kind == k_Token && getToken() == Str;
1449 bool isSysCR()
const {
return Kind == k_SysCR; }
1450 bool isPrefetch()
const {
return Kind == k_Prefetch; }
1451 bool isPSBHint()
const {
return Kind == k_PSBHint; }
1452 bool isBTIHint()
const {
return Kind == k_BTIHint; }
1453 bool isShiftExtend()
const {
return Kind == k_ShiftExtend; }
1454 bool isShifter()
const {
1455 if (!isShiftExtend())
1465 if (Kind != k_FPImm)
1466 return DiagnosticPredicateTy::NoMatch;
1468 if (getFPImmIsExact()) {
1470 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1471 assert(Desc &&
"Unknown enum value");
1474 APFloat RealVal(APFloat::IEEEdouble());
1476 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1477 if (
errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1480 if (
getFPImm().bitwiseIsEqual(RealVal))
1481 return DiagnosticPredicateTy::Match;
1484 return DiagnosticPredicateTy::NearMatch;
1487 template <
unsigned ImmA,
unsigned ImmB>
1490 if ((Res = isExactFPImm<ImmA>()))
1491 return DiagnosticPredicateTy::Match;
1492 if ((Res = isExactFPImm<ImmB>()))
1493 return DiagnosticPredicateTy::Match;
1497 bool isExtend()
const {
1498 if (!isShiftExtend())
1507 getShiftExtendAmount() <= 4;
1510 bool isExtend64()
const {
1520 bool isExtendLSL64()
const {
1526 getShiftExtendAmount() <= 4;
1529 template<
int W
idth>
bool isMemXExtend()
const {
1534 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1535 getShiftExtendAmount() == 0);
1538 template<
int W
idth>
bool isMemWExtend()
const {
1543 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1544 getShiftExtendAmount() == 0);
1547 template <
unsigned w
idth>
1548 bool isArithmeticShifter()
const {
1558 template <
unsigned w
idth>
1559 bool isLogicalShifter()
const {
1567 getShiftExtendAmount() < width;
1570 bool isMovImm32Shifter()
const {
1578 uint64_t Val = getShiftExtendAmount();
1579 return (Val == 0 || Val == 16);
1582 bool isMovImm64Shifter()
const {
1590 uint64_t Val = getShiftExtendAmount();
1591 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1594 bool isLogicalVecShifter()
const {
1599 unsigned Shift = getShiftExtendAmount();
1601 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1604 bool isLogicalVecHalfWordShifter()
const {
1605 if (!isLogicalVecShifter())
1609 unsigned Shift = getShiftExtendAmount();
1611 (Shift == 0 || Shift == 8);
1614 bool isMoveVecShifter()
const {
1615 if (!isShiftExtend())
1619 unsigned Shift = getShiftExtendAmount();
1621 (Shift == 8 || Shift == 16);
1630 bool isSImm9OffsetFB()
const {
1631 return isSImm<9>() && !isUImm12Offset<
Width / 8>();
1634 bool isAdrpLabel()
const {
1641 int64_t Val =
CE->getValue();
1642 int64_t Min = - (4096 * (1LL << (21 - 1)));
1643 int64_t
Max = 4096 * ((1LL << (21 - 1)) - 1);
1644 return (Val % 4096) == 0 && Val >= Min && Val <=
Max;
1650 bool isAdrLabel()
const {
1657 int64_t Val =
CE->getValue();
1658 int64_t Min = - (1LL << (21 - 1));
1659 int64_t
Max = ((1LL << (21 - 1)) - 1);
1660 return Val >= Min && Val <=
Max;
1666 template <MatrixKind Kind,
unsigned EltSize,
unsigned RegClass>
1669 return DiagnosticPredicateTy::NoMatch;
1670 if (getMatrixKind() != Kind ||
1671 !AArch64MCRegisterClasses[RegClass].
contains(getMatrixReg()) ||
1672 EltSize != getMatrixElementWidth())
1673 return DiagnosticPredicateTy::NearMatch;
1674 return DiagnosticPredicateTy::Match;
1681 else if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1687 void addRegOperands(
MCInst &Inst,
unsigned N)
const {
1688 assert(
N == 1 &&
"Invalid number of operands!");
1692 void addMatrixOperands(
MCInst &Inst,
unsigned N)
const {
1693 assert(
N == 1 &&
"Invalid number of operands!");
1697 void addGPR32as64Operands(
MCInst &Inst,
unsigned N)
const {
1698 assert(
N == 1 &&
"Invalid number of operands!");
1700 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].
contains(
getReg()));
1709 void addGPR64as32Operands(
MCInst &Inst,
unsigned N)
const {
1710 assert(
N == 1 &&
"Invalid number of operands!");
1712 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].
contains(
getReg()));
1721 template <
int W
idth>
1722 void addFPRasZPRRegOperands(
MCInst &Inst,
unsigned N)
const {
1725 case 8:
Base = AArch64::B0;
break;
1726 case 16:
Base = AArch64::H0;
break;
1727 case 32:
Base = AArch64::S0;
break;
1728 case 64:
Base = AArch64::D0;
break;
1729 case 128:
Base = AArch64::Q0;
break;
1736 void addVectorReg64Operands(
MCInst &Inst,
unsigned N)
const {
1737 assert(
N == 1 &&
"Invalid number of operands!");
1739 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1743 void addVectorReg128Operands(
MCInst &Inst,
unsigned N)
const {
1744 assert(
N == 1 &&
"Invalid number of operands!");
1746 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1750 void addVectorRegLoOperands(
MCInst &Inst,
unsigned N)
const {
1751 assert(
N == 1 &&
"Invalid number of operands!");
1755 enum VecListIndexType {
1756 VecListIdx_DReg = 0,
1757 VecListIdx_QReg = 1,
1758 VecListIdx_ZReg = 2,
1759 VecListIdx_PReg = 3,
1762 template <VecListIndexType RegTy,
unsigned NumRegs>
1763 void addVectorListOperands(
MCInst &Inst,
unsigned N)
const {
1764 assert(
N == 1 &&
"Invalid number of operands!");
1765 static const unsigned FirstRegs[][5] = {
1767 AArch64::D0, AArch64::D0_D1,
1768 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1770 AArch64::Q0, AArch64::Q0_Q1,
1771 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1773 AArch64::Z0, AArch64::Z0_Z1,
1774 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1776 AArch64::P0, AArch64::P0_P1 }
1779 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1780 " NumRegs must be <= 4 for ZRegs");
1782 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1783 " NumRegs must be <= 2 for PRegs");
1785 unsigned FirstReg = FirstRegs[(
unsigned)RegTy][NumRegs];
1787 FirstRegs[(
unsigned)RegTy][0]));
1790 template <
unsigned NumRegs>
1791 void addStridedVectorListOperands(
MCInst &Inst,
unsigned N)
const {
1792 assert(
N == 1 &&
"Invalid number of operands!");
1793 assert((NumRegs == 2 || NumRegs == 4) &&
" NumRegs must be 2 or 4");
1797 if (getVectorListStart() < AArch64::Z16) {
1798 assert((getVectorListStart() < AArch64::Z8) &&
1799 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1801 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1803 assert((getVectorListStart() < AArch64::Z24) &&
1804 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1806 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1810 if (getVectorListStart() < AArch64::Z16) {
1811 assert((getVectorListStart() < AArch64::Z4) &&
1812 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1814 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1816 assert((getVectorListStart() < AArch64::Z20) &&
1817 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1819 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1827 void addMatrixTileListOperands(
MCInst &Inst,
unsigned N)
const {
1828 assert(
N == 1 &&
"Invalid number of operands!");
1829 unsigned RegMask = getMatrixTileListRegMask();
1830 assert(RegMask <= 0xFF &&
"Invalid mask!");
1834 void addVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
1835 assert(
N == 1 &&
"Invalid number of operands!");
1839 template <
unsigned ImmIs0,
unsigned ImmIs1>
1840 void addExactFPImmOperands(
MCInst &Inst,
unsigned N)
const {
1841 assert(
N == 1 &&
"Invalid number of operands!");
1842 assert(
bool(isExactFPImm<ImmIs0, ImmIs1>()) &&
"Invalid operand");
1846 void addImmOperands(
MCInst &Inst,
unsigned N)
const {
1847 assert(
N == 1 &&
"Invalid number of operands!");
1851 addExpr(Inst, getImm());
1854 template <
int Shift>
1855 void addImmWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1856 assert(
N == 2 &&
"Invalid number of operands!");
1857 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1860 }
else if (isShiftedImm()) {
1861 addExpr(Inst, getShiftedImmVal());
1864 addExpr(Inst, getImm());
1869 template <
int Shift>
1870 void addImmNegWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1871 assert(
N == 2 &&
"Invalid number of operands!");
1872 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1879 void addCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
1880 assert(
N == 1 &&
"Invalid number of operands!");
1884 void addAdrpLabelOperands(
MCInst &Inst,
unsigned N)
const {
1885 assert(
N == 1 &&
"Invalid number of operands!");
1888 addExpr(Inst, getImm());
1893 void addAdrLabelOperands(
MCInst &Inst,
unsigned N)
const {
1894 addImmOperands(Inst,
N);
1898 void addUImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
1899 assert(
N == 1 &&
"Invalid number of operands!");
1909 void addUImm6Operands(
MCInst &Inst,
unsigned N)
const {
1910 assert(
N == 1 &&
"Invalid number of operands!");
1915 template <
int Scale>
1916 void addImmScaledOperands(
MCInst &Inst,
unsigned N)
const {
1917 assert(
N == 1 &&
"Invalid number of operands!");
1922 template <
int Scale>
1923 void addImmScaledRangeOperands(
MCInst &Inst,
unsigned N)
const {
1924 assert(
N == 1 &&
"Invalid number of operands!");
1928 template <
typename T>
1929 void addLogicalImmOperands(
MCInst &Inst,
unsigned N)
const {
1930 assert(
N == 1 &&
"Invalid number of operands!");
1932 std::make_unsigned_t<T> Val = MCE->
getValue();
1937 template <
typename T>
1938 void addLogicalImmNotOperands(
MCInst &Inst,
unsigned N)
const {
1939 assert(
N == 1 &&
"Invalid number of operands!");
1941 std::make_unsigned_t<T> Val = ~MCE->getValue();
1946 void addSIMDImmType10Operands(
MCInst &Inst,
unsigned N)
const {
1947 assert(
N == 1 &&
"Invalid number of operands!");
1953 void addBranchTarget26Operands(
MCInst &Inst,
unsigned N)
const {
1957 assert(
N == 1 &&
"Invalid number of operands!");
1960 addExpr(Inst, getImm());
1963 assert(MCE &&
"Invalid constant immediate operand!");
1967 void addPCRelLabel19Operands(
MCInst &Inst,
unsigned N)
const {
1971 assert(
N == 1 &&
"Invalid number of operands!");
1974 addExpr(Inst, getImm());
1977 assert(MCE &&
"Invalid constant immediate operand!");
1981 void addBranchTarget14Operands(
MCInst &Inst,
unsigned N)
const {
1985 assert(
N == 1 &&
"Invalid number of operands!");
1988 addExpr(Inst, getImm());
1991 assert(MCE &&
"Invalid constant immediate operand!");
1995 void addFPImmOperands(
MCInst &Inst,
unsigned N)
const {
1996 assert(
N == 1 &&
"Invalid number of operands!");
2001 void addBarrierOperands(
MCInst &Inst,
unsigned N)
const {
2002 assert(
N == 1 &&
"Invalid number of operands!");
2006 void addBarriernXSOperands(
MCInst &Inst,
unsigned N)
const {
2007 assert(
N == 1 &&
"Invalid number of operands!");
2011 void addMRSSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
2012 assert(
N == 1 &&
"Invalid number of operands!");
2017 void addMSRSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
2018 assert(
N == 1 &&
"Invalid number of operands!");
2023 void addSystemPStateFieldWithImm0_1Operands(
MCInst &Inst,
unsigned N)
const {
2024 assert(
N == 1 &&
"Invalid number of operands!");
2029 void addSVCROperands(
MCInst &Inst,
unsigned N)
const {
2030 assert(
N == 1 &&
"Invalid number of operands!");
2035 void addSystemPStateFieldWithImm0_15Operands(
MCInst &Inst,
unsigned N)
const {
2036 assert(
N == 1 &&
"Invalid number of operands!");
2041 void addSysCROperands(
MCInst &Inst,
unsigned N)
const {
2042 assert(
N == 1 &&
"Invalid number of operands!");
2046 void addPrefetchOperands(
MCInst &Inst,
unsigned N)
const {
2047 assert(
N == 1 &&
"Invalid number of operands!");
2051 void addPSBHintOperands(
MCInst &Inst,
unsigned N)
const {
2052 assert(
N == 1 &&
"Invalid number of operands!");
2056 void addBTIHintOperands(
MCInst &Inst,
unsigned N)
const {
2057 assert(
N == 1 &&
"Invalid number of operands!");
2061 void addShifterOperands(
MCInst &Inst,
unsigned N)
const {
2062 assert(
N == 1 &&
"Invalid number of operands!");
2068 void addSyspXzrPairOperand(
MCInst &Inst,
unsigned N)
const {
2069 assert(
N == 1 &&
"Invalid number of operands!");
2077 if (
Reg != AArch64::XZR)
2083 void addExtendOperands(
MCInst &Inst,
unsigned N)
const {
2084 assert(
N == 1 &&
"Invalid number of operands!");
2091 void addExtend64Operands(
MCInst &Inst,
unsigned N)
const {
2092 assert(
N == 1 &&
"Invalid number of operands!");
2099 void addMemExtendOperands(
MCInst &Inst,
unsigned N)
const {
2100 assert(
N == 2 &&
"Invalid number of operands!");
2111 void addMemExtend8Operands(
MCInst &Inst,
unsigned N)
const {
2112 assert(
N == 2 &&
"Invalid number of operands!");
2120 void addMOVZMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
2121 assert(
N == 1 &&
"Invalid number of operands!");
2128 addExpr(Inst, getImm());
2133 void addMOVNMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
2134 assert(
N == 1 &&
"Invalid number of operands!");
2141 void addComplexRotationEvenOperands(
MCInst &Inst,
unsigned N)
const {
2142 assert(
N == 1 &&
"Invalid number of operands!");
2147 void addComplexRotationOddOperands(
MCInst &Inst,
unsigned N)
const {
2148 assert(
N == 1 &&
"Invalid number of operands!");
2155 static std::unique_ptr<AArch64Operand>
2157 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2158 Op->Tok.Data = Str.data();
2159 Op->Tok.Length = Str.size();
2160 Op->Tok.IsSuffix = IsSuffix;
2166 static std::unique_ptr<AArch64Operand>
2168 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2170 unsigned ShiftAmount = 0,
2171 unsigned HasExplicitAmount =
false) {
2172 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2173 Op->Reg.RegNum = RegNum;
2175 Op->Reg.ElementWidth = 0;
2176 Op->Reg.EqualityTy = EqTy;
2177 Op->Reg.ShiftExtend.Type = ExtTy;
2178 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2179 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2185 static std::unique_ptr<AArch64Operand>
2186 CreateVectorReg(
unsigned RegNum, RegKind Kind,
unsigned ElementWidth,
2189 unsigned ShiftAmount = 0,
2190 unsigned HasExplicitAmount =
false) {
2191 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2192 Kind == RegKind::SVEPredicateVector ||
2193 Kind == RegKind::SVEPredicateAsCounter) &&
2194 "Invalid vector kind");
2195 auto Op = CreateReg(RegNum, Kind, S,
E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2197 Op->Reg.ElementWidth = ElementWidth;
2201 static std::unique_ptr<AArch64Operand>
2202 CreateVectorList(
unsigned RegNum,
unsigned Count,
unsigned Stride,
2203 unsigned NumElements,
unsigned ElementWidth,
2205 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2206 Op->VectorList.RegNum = RegNum;
2207 Op->VectorList.Count = Count;
2208 Op->VectorList.Stride = Stride;
2209 Op->VectorList.NumElements = NumElements;
2210 Op->VectorList.ElementWidth = ElementWidth;
2211 Op->VectorList.RegisterKind = RegisterKind;
2217 static std::unique_ptr<AArch64Operand>
2219 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2220 Op->VectorIndex.Val =
Idx;
2226 static std::unique_ptr<AArch64Operand>
2228 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2229 Op->MatrixTileList.RegMask = RegMask;
2236 const unsigned ElementWidth) {
2237 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2239 {{0, AArch64::ZAB0},
2240 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2241 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2242 {{8, AArch64::ZAB0},
2243 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2244 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2245 {{16, AArch64::ZAH0},
2246 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2247 {{16, AArch64::ZAH1},
2248 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2249 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2250 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2251 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2252 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2255 if (ElementWidth == 64)
2258 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth,
Reg)];
2259 assert(!Regs.empty() &&
"Invalid tile or element width!");
2260 for (
auto OutReg : Regs)
2265 static std::unique_ptr<AArch64Operand> CreateImm(
const MCExpr *Val,
SMLoc S,
2267 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2274 static std::unique_ptr<AArch64Operand> CreateShiftedImm(
const MCExpr *Val,
2275 unsigned ShiftAmount,
2278 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2279 Op->ShiftedImm .Val = Val;
2280 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2286 static std::unique_ptr<AArch64Operand> CreateImmRange(
unsigned First,
2290 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2292 Op->ImmRange.Last =
Last;
2297 static std::unique_ptr<AArch64Operand>
2299 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2300 Op->CondCode.Code =
Code;
2306 static std::unique_ptr<AArch64Operand>
2308 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2310 Op->FPImm.IsExact = IsExact;
2316 static std::unique_ptr<AArch64Operand> CreateBarrier(
unsigned Val,
2320 bool HasnXSModifier) {
2321 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2322 Op->Barrier.Val = Val;
2323 Op->Barrier.Data = Str.data();
2324 Op->Barrier.Length = Str.size();
2325 Op->Barrier.HasnXSModifier = HasnXSModifier;
2331 static std::unique_ptr<AArch64Operand> CreateSysReg(
StringRef Str,
SMLoc S,
2336 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2337 Op->SysReg.Data = Str.data();
2338 Op->SysReg.Length = Str.size();
2339 Op->SysReg.MRSReg = MRSReg;
2340 Op->SysReg.MSRReg = MSRReg;
2341 Op->SysReg.PStateField = PStateField;
2347 static std::unique_ptr<AArch64Operand> CreateSysCR(
unsigned Val,
SMLoc S,
2349 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2350 Op->SysCRImm.Val = Val;
2356 static std::unique_ptr<AArch64Operand> CreatePrefetch(
unsigned Val,
2360 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2361 Op->Prefetch.Val = Val;
2362 Op->Barrier.Data = Str.data();
2363 Op->Barrier.Length = Str.size();
2369 static std::unique_ptr<AArch64Operand> CreatePSBHint(
unsigned Val,
2373 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2374 Op->PSBHint.Val = Val;
2375 Op->PSBHint.Data = Str.data();
2376 Op->PSBHint.Length = Str.size();
2382 static std::unique_ptr<AArch64Operand> CreateBTIHint(
unsigned Val,
2386 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2387 Op->BTIHint.Val = Val | 32;
2388 Op->BTIHint.Data = Str.data();
2389 Op->BTIHint.Length = Str.size();
2395 static std::unique_ptr<AArch64Operand>
2396 CreateMatrixRegister(
unsigned RegNum,
unsigned ElementWidth, MatrixKind Kind,
2398 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2399 Op->MatrixReg.RegNum = RegNum;
2400 Op->MatrixReg.ElementWidth = ElementWidth;
2401 Op->MatrixReg.Kind =
Kind;
2407 static std::unique_ptr<AArch64Operand>
2409 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2410 Op->SVCR.PStateField = PStateField;
2411 Op->SVCR.Data = Str.data();
2412 Op->SVCR.Length = Str.size();
2418 static std::unique_ptr<AArch64Operand>
2421 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2422 Op->ShiftExtend.Type = ShOp;
2423 Op->ShiftExtend.Amount = Val;
2424 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2436 OS <<
"<fpimm " <<
getFPImm().bitcastToAPInt().getZExtValue();
2437 if (!getFPImmIsExact())
2444 OS <<
"<barrier " <<
Name <<
">";
2446 OS <<
"<barrier invalid #" << getBarrier() <<
">";
2452 case k_ShiftedImm: {
2453 unsigned Shift = getShiftedImmShift();
2454 OS <<
"<shiftedimm ";
2455 OS << *getShiftedImmVal();
2461 OS << getFirstImmVal();
2462 OS <<
":" << getLastImmVal() <<
">";
2468 case k_VectorList: {
2469 OS <<
"<vectorlist ";
2470 unsigned Reg = getVectorListStart();
2471 for (
unsigned i = 0, e = getVectorListCount(); i !=
e; ++i)
2472 OS <<
Reg + i * getVectorListStride() <<
" ";
2477 OS <<
"<vectorindex " << getVectorIndex() <<
">";
2480 OS <<
"<sysreg: " << getSysReg() <<
'>';
2483 OS <<
"'" << getToken() <<
"'";
2486 OS <<
"c" << getSysCR();
2491 OS <<
"<prfop " <<
Name <<
">";
2493 OS <<
"<prfop invalid #" << getPrefetch() <<
">";
2497 OS << getPSBHintName();
2500 OS << getBTIHintName();
2502 case k_MatrixRegister:
2503 OS <<
"<matrix " << getMatrixReg() <<
">";
2505 case k_MatrixTileList: {
2506 OS <<
"<matrixlist ";
2507 unsigned RegMask = getMatrixTileListRegMask();
2508 unsigned MaxBits = 8;
2509 for (
unsigned I = MaxBits;
I > 0; --
I)
2510 OS << ((RegMask & (1 << (
I - 1))) >> (
I - 1));
2519 OS <<
"<register " <<
getReg() <<
">";
2520 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2525 << getShiftExtendAmount();
2526 if (!hasShiftExtendAmount())
2542 .
Case(
"v0", AArch64::Q0)
2543 .
Case(
"v1", AArch64::Q1)
2544 .
Case(
"v2", AArch64::Q2)
2545 .
Case(
"v3", AArch64::Q3)
2546 .
Case(
"v4", AArch64::Q4)
2547 .
Case(
"v5", AArch64::Q5)
2548 .
Case(
"v6", AArch64::Q6)
2549 .
Case(
"v7", AArch64::Q7)
2550 .
Case(
"v8", AArch64::Q8)
2551 .
Case(
"v9", AArch64::Q9)
2552 .
Case(
"v10", AArch64::Q10)
2553 .
Case(
"v11", AArch64::Q11)
2554 .
Case(
"v12", AArch64::Q12)
2555 .
Case(
"v13", AArch64::Q13)
2556 .
Case(
"v14", AArch64::Q14)
2557 .
Case(
"v15", AArch64::Q15)
2558 .
Case(
"v16", AArch64::Q16)
2559 .
Case(
"v17", AArch64::Q17)
2560 .
Case(
"v18", AArch64::Q18)
2561 .
Case(
"v19", AArch64::Q19)
2562 .
Case(
"v20", AArch64::Q20)
2563 .
Case(
"v21", AArch64::Q21)
2564 .
Case(
"v22", AArch64::Q22)
2565 .
Case(
"v23", AArch64::Q23)
2566 .
Case(
"v24", AArch64::Q24)
2567 .
Case(
"v25", AArch64::Q25)
2568 .
Case(
"v26", AArch64::Q26)
2569 .
Case(
"v27", AArch64::Q27)
2570 .
Case(
"v28", AArch64::Q28)
2571 .
Case(
"v29", AArch64::Q29)
2572 .
Case(
"v30", AArch64::Q30)
2573 .
Case(
"v31", AArch64::Q31)
2582 RegKind VectorKind) {
2583 std::pair<int, int> Res = {-1, -1};
2585 switch (VectorKind) {
2586 case RegKind::NeonVector:
2590 .Case(
".1d", {1, 64})
2591 .Case(
".1q", {1, 128})
2593 .Case(
".2h", {2, 16})
2594 .Case(
".2s", {2, 32})
2595 .Case(
".2d", {2, 64})
2598 .Case(
".4b", {4, 8})
2599 .Case(
".4h", {4, 16})
2600 .Case(
".4s", {4, 32})
2601 .Case(
".8b", {8, 8})
2602 .Case(
".8h", {8, 16})
2603 .Case(
".16b", {16, 8})
2608 .Case(
".h", {0, 16})
2609 .Case(
".s", {0, 32})
2610 .Case(
".d", {0, 64})
2613 case RegKind::SVEPredicateAsCounter:
2614 case RegKind::SVEPredicateVector:
2615 case RegKind::SVEDataVector:
2616 case RegKind::Matrix:
2620 .Case(
".h", {0, 16})
2621 .Case(
".s", {0, 32})
2622 .Case(
".d", {0, 64})
2623 .Case(
".q", {0, 128})
2630 if (Res == std::make_pair(-1, -1))
2631 return std::nullopt;
2633 return std::optional<std::pair<int, int>>(Res);
2642 .
Case(
"z0", AArch64::Z0)
2643 .
Case(
"z1", AArch64::Z1)
2644 .
Case(
"z2", AArch64::Z2)
2645 .
Case(
"z3", AArch64::Z3)
2646 .
Case(
"z4", AArch64::Z4)
2647 .
Case(
"z5", AArch64::Z5)
2648 .
Case(
"z6", AArch64::Z6)
2649 .
Case(
"z7", AArch64::Z7)
2650 .
Case(
"z8", AArch64::Z8)
2651 .
Case(
"z9", AArch64::Z9)
2652 .
Case(
"z10", AArch64::Z10)
2653 .
Case(
"z11", AArch64::Z11)
2654 .
Case(
"z12", AArch64::Z12)
2655 .
Case(
"z13", AArch64::Z13)
2656 .
Case(
"z14", AArch64::Z14)
2657 .
Case(
"z15", AArch64::Z15)
2658 .
Case(
"z16", AArch64::Z16)
2659 .
Case(
"z17", AArch64::Z17)
2660 .
Case(
"z18", AArch64::Z18)
2661 .
Case(
"z19", AArch64::Z19)
2662 .
Case(
"z20", AArch64::Z20)
2663 .
Case(
"z21", AArch64::Z21)
2664 .
Case(
"z22", AArch64::Z22)
2665 .
Case(
"z23", AArch64::Z23)
2666 .
Case(
"z24", AArch64::Z24)
2667 .
Case(
"z25", AArch64::Z25)
2668 .
Case(
"z26", AArch64::Z26)
2669 .
Case(
"z27", AArch64::Z27)
2670 .
Case(
"z28", AArch64::Z28)
2671 .
Case(
"z29", AArch64::Z29)
2672 .
Case(
"z30", AArch64::Z30)
2673 .
Case(
"z31", AArch64::Z31)
2679 .
Case(
"p0", AArch64::P0)
2680 .
Case(
"p1", AArch64::P1)
2681 .
Case(
"p2", AArch64::P2)
2682 .
Case(
"p3", AArch64::P3)
2683 .
Case(
"p4", AArch64::P4)
2684 .
Case(
"p5", AArch64::P5)
2685 .
Case(
"p6", AArch64::P6)
2686 .
Case(
"p7", AArch64::P7)
2687 .
Case(
"p8", AArch64::P8)
2688 .
Case(
"p9", AArch64::P9)
2689 .
Case(
"p10", AArch64::P10)
2690 .
Case(
"p11", AArch64::P11)
2691 .
Case(
"p12", AArch64::P12)
2692 .
Case(
"p13", AArch64::P13)
2693 .
Case(
"p14", AArch64::P14)
2694 .
Case(
"p15", AArch64::P15)
2700 .
Case(
"pn0", AArch64::P0)
2701 .
Case(
"pn1", AArch64::P1)
2702 .
Case(
"pn2", AArch64::P2)
2703 .
Case(
"pn3", AArch64::P3)
2704 .
Case(
"pn4", AArch64::P4)
2705 .
Case(
"pn5", AArch64::P5)
2706 .
Case(
"pn6", AArch64::P6)
2707 .
Case(
"pn7", AArch64::P7)
2708 .
Case(
"pn8", AArch64::P8)
2709 .
Case(
"pn9", AArch64::P9)
2710 .
Case(
"pn10", AArch64::P10)
2711 .
Case(
"pn11", AArch64::P11)
2712 .
Case(
"pn12", AArch64::P12)
2713 .
Case(
"pn13", AArch64::P13)
2714 .
Case(
"pn14", AArch64::P14)
2715 .
Case(
"pn15", AArch64::P15)
2721 .
Case(
"za0.d", AArch64::ZAD0)
2722 .
Case(
"za1.d", AArch64::ZAD1)
2723 .
Case(
"za2.d", AArch64::ZAD2)
2724 .
Case(
"za3.d", AArch64::ZAD3)
2725 .
Case(
"za4.d", AArch64::ZAD4)
2726 .
Case(
"za5.d", AArch64::ZAD5)
2727 .
Case(
"za6.d", AArch64::ZAD6)
2728 .
Case(
"za7.d", AArch64::ZAD7)
2729 .
Case(
"za0.s", AArch64::ZAS0)
2730 .
Case(
"za1.s", AArch64::ZAS1)
2731 .
Case(
"za2.s", AArch64::ZAS2)
2732 .
Case(
"za3.s", AArch64::ZAS3)
2733 .
Case(
"za0.h", AArch64::ZAH0)
2734 .
Case(
"za1.h", AArch64::ZAH1)
2735 .
Case(
"za0.b", AArch64::ZAB0)
2741 .
Case(
"za", AArch64::ZA)
2742 .
Case(
"za0.q", AArch64::ZAQ0)
2743 .
Case(
"za1.q", AArch64::ZAQ1)
2744 .
Case(
"za2.q", AArch64::ZAQ2)
2745 .
Case(
"za3.q", AArch64::ZAQ3)
2746 .
Case(
"za4.q", AArch64::ZAQ4)
2747 .
Case(
"za5.q", AArch64::ZAQ5)
2748 .
Case(
"za6.q", AArch64::ZAQ6)
2749 .
Case(
"za7.q", AArch64::ZAQ7)
2750 .
Case(
"za8.q", AArch64::ZAQ8)
2751 .
Case(
"za9.q", AArch64::ZAQ9)
2752 .
Case(
"za10.q", AArch64::ZAQ10)
2753 .
Case(
"za11.q", AArch64::ZAQ11)
2754 .
Case(
"za12.q", AArch64::ZAQ12)
2755 .
Case(
"za13.q", AArch64::ZAQ13)
2756 .
Case(
"za14.q", AArch64::ZAQ14)
2757 .
Case(
"za15.q", AArch64::ZAQ15)
2758 .
Case(
"za0.d", AArch64::ZAD0)
2759 .
Case(
"za1.d", AArch64::ZAD1)
2760 .
Case(
"za2.d", AArch64::ZAD2)
2761 .
Case(
"za3.d", AArch64::ZAD3)
2762 .
Case(
"za4.d", AArch64::ZAD4)
2763 .
Case(
"za5.d", AArch64::ZAD5)
2764 .
Case(
"za6.d", AArch64::ZAD6)
2765 .
Case(
"za7.d", AArch64::ZAD7)
2766 .
Case(
"za0.s", AArch64::ZAS0)
2767 .
Case(
"za1.s", AArch64::ZAS1)
2768 .
Case(
"za2.s", AArch64::ZAS2)
2769 .
Case(
"za3.s", AArch64::ZAS3)
2770 .
Case(
"za0.h", AArch64::ZAH0)
2771 .
Case(
"za1.h", AArch64::ZAH1)
2772 .
Case(
"za0.b", AArch64::ZAB0)
2773 .
Case(
"za0h.q", AArch64::ZAQ0)
2774 .
Case(
"za1h.q", AArch64::ZAQ1)
2775 .
Case(
"za2h.q", AArch64::ZAQ2)
2776 .
Case(
"za3h.q", AArch64::ZAQ3)
2777 .
Case(
"za4h.q", AArch64::ZAQ4)
2778 .
Case(
"za5h.q", AArch64::ZAQ5)
2779 .
Case(
"za6h.q", AArch64::ZAQ6)
2780 .
Case(
"za7h.q", AArch64::ZAQ7)
2781 .
Case(
"za8h.q", AArch64::ZAQ8)
2782 .
Case(
"za9h.q", AArch64::ZAQ9)
2783 .
Case(
"za10h.q", AArch64::ZAQ10)
2784 .
Case(
"za11h.q", AArch64::ZAQ11)
2785 .
Case(
"za12h.q", AArch64::ZAQ12)
2786 .
Case(
"za13h.q", AArch64::ZAQ13)
2787 .
Case(
"za14h.q", AArch64::ZAQ14)
2788 .
Case(
"za15h.q", AArch64::ZAQ15)
2789 .
Case(
"za0h.d", AArch64::ZAD0)
2790 .
Case(
"za1h.d", AArch64::ZAD1)
2791 .
Case(
"za2h.d", AArch64::ZAD2)
2792 .
Case(
"za3h.d", AArch64::ZAD3)
2793 .
Case(
"za4h.d", AArch64::ZAD4)
2794 .
Case(
"za5h.d", AArch64::ZAD5)
2795 .
Case(
"za6h.d", AArch64::ZAD6)
2796 .
Case(
"za7h.d", AArch64::ZAD7)
2797 .
Case(
"za0h.s", AArch64::ZAS0)
2798 .
Case(
"za1h.s", AArch64::ZAS1)
2799 .
Case(
"za2h.s", AArch64::ZAS2)
2800 .
Case(
"za3h.s", AArch64::ZAS3)
2801 .
Case(
"za0h.h", AArch64::ZAH0)
2802 .
Case(
"za1h.h", AArch64::ZAH1)
2803 .
Case(
"za0h.b", AArch64::ZAB0)
2804 .
Case(
"za0v.q", AArch64::ZAQ0)
2805 .
Case(
"za1v.q", AArch64::ZAQ1)
2806 .
Case(
"za2v.q", AArch64::ZAQ2)
2807 .
Case(
"za3v.q", AArch64::ZAQ3)
2808 .
Case(
"za4v.q", AArch64::ZAQ4)
2809 .
Case(
"za5v.q", AArch64::ZAQ5)
2810 .
Case(
"za6v.q", AArch64::ZAQ6)
2811 .
Case(
"za7v.q", AArch64::ZAQ7)
2812 .
Case(
"za8v.q", AArch64::ZAQ8)
2813 .
Case(
"za9v.q", AArch64::ZAQ9)
2814 .
Case(
"za10v.q", AArch64::ZAQ10)
2815 .
Case(
"za11v.q", AArch64::ZAQ11)
2816 .
Case(
"za12v.q", AArch64::ZAQ12)
2817 .
Case(
"za13v.q", AArch64::ZAQ13)
2818 .
Case(
"za14v.q", AArch64::ZAQ14)
2819 .
Case(
"za15v.q", AArch64::ZAQ15)
2820 .
Case(
"za0v.d", AArch64::ZAD0)
2821 .
Case(
"za1v.d", AArch64::ZAD1)
2822 .
Case(
"za2v.d", AArch64::ZAD2)
2823 .
Case(
"za3v.d", AArch64::ZAD3)
2824 .
Case(
"za4v.d", AArch64::ZAD4)
2825 .
Case(
"za5v.d", AArch64::ZAD5)
2826 .
Case(
"za6v.d", AArch64::ZAD6)
2827 .
Case(
"za7v.d", AArch64::ZAD7)
2828 .
Case(
"za0v.s", AArch64::ZAS0)
2829 .
Case(
"za1v.s", AArch64::ZAS1)
2830 .
Case(
"za2v.s", AArch64::ZAS2)
2831 .
Case(
"za3v.s", AArch64::ZAS3)
2832 .
Case(
"za0v.h", AArch64::ZAH0)
2833 .
Case(
"za1v.h", AArch64::ZAH1)
2834 .
Case(
"za0v.b", AArch64::ZAB0)
2838bool AArch64AsmParser::parseRegister(
MCRegister &RegNo,
SMLoc &StartLoc,
2846 StartLoc = getLoc();
2847 auto Res = tryParseScalarRegister(RegNo);
2853unsigned AArch64AsmParser::matchRegisterNameAlias(
StringRef Name,
2855 unsigned RegNum = 0;
2857 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2860 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2863 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2866 return Kind == RegKind::NeonVector ? RegNum : 0;
2869 return Kind == RegKind::Matrix ? RegNum : 0;
2871 if (
Name.equals_insensitive(
"zt0"))
2872 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2876 return (Kind == RegKind::Scalar) ? RegNum : 0;
2881 .
Case(
"fp", AArch64::FP)
2882 .
Case(
"lr", AArch64::LR)
2883 .
Case(
"x31", AArch64::XZR)
2884 .
Case(
"w31", AArch64::WZR)
2886 return Kind == RegKind::Scalar ? RegNum : 0;
2891 auto Entry = RegisterReqs.
find(
Name.lower());
2892 if (Entry == RegisterReqs.
end())
2896 if (Kind == Entry->getValue().first)
2897 RegNum = Entry->getValue().second;
2902unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2904 case RegKind::Scalar:
2905 case RegKind::NeonVector:
2906 case RegKind::SVEDataVector:
2908 case RegKind::Matrix:
2909 case RegKind::SVEPredicateVector:
2910 case RegKind::SVEPredicateAsCounter:
2912 case RegKind::LookupTable:
2922AArch64AsmParser::tryParseScalarRegister(
MCRegister &RegNum) {
2928 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
2943 Error(S,
"Expected cN operand where 0 <= N <= 15");
2948 if (Tok[0] !=
'c' && Tok[0] !=
'C') {
2949 Error(S,
"Expected cN operand where 0 <= N <= 15");
2955 if (BadNum || CRNum > 15) {
2956 Error(S,
"Expected cN operand where 0 <= N <= 15");
2962 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2972 unsigned MaxVal = 63;
2978 if (getParser().parseExpression(ImmVal))
2983 TokError(
"immediate value expected for prefetch operand");
2987 if (prfop > MaxVal) {
2988 TokError(
"prefetch operand out of range, [0," + utostr(MaxVal) +
2993 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->
getValue());
2994 Operands.push_back(AArch64Operand::CreatePrefetch(
2995 prfop, RPRFM ? RPRFM->Name :
"", S, getContext()));
3000 TokError(
"prefetch hint expected");
3004 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.
getString());
3006 TokError(
"prefetch hint expected");
3010 Operands.push_back(AArch64Operand::CreatePrefetch(
3011 RPRFM->Encoding, Tok.
getString(), S, getContext()));
3017template <
bool IsSVEPrefetch>
3024 if (IsSVEPrefetch) {
3025 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(
N))
3026 return std::optional<unsigned>(Res->Encoding);
3027 }
else if (
auto Res = AArch64PRFM::lookupPRFMByName(
N))
3028 return std::optional<unsigned>(Res->Encoding);
3029 return std::optional<unsigned>();
3032 auto LookupByEncoding = [](
unsigned E) {
3033 if (IsSVEPrefetch) {
3034 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(
E))
3035 return std::optional<StringRef>(Res->Name);
3036 }
else if (
auto Res = AArch64PRFM::lookupPRFMByEncoding(
E))
3037 return std::optional<StringRef>(Res->Name);
3038 return std::optional<StringRef>();
3040 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3047 if (getParser().parseExpression(ImmVal))
3052 TokError(
"immediate value expected for prefetch operand");
3056 if (prfop > MaxVal) {
3057 TokError(
"prefetch operand out of range, [0," + utostr(MaxVal) +
3062 auto PRFM = LookupByEncoding(MCE->
getValue());
3063 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(
""),
3069 TokError(
"prefetch hint expected");
3073 auto PRFM = LookupByName(Tok.
getString());
3075 TokError(
"prefetch hint expected");
3079 Operands.push_back(AArch64Operand::CreatePrefetch(
3080 *PRFM, Tok.
getString(), S, getContext()));
3091 TokError(
"invalid operand for instruction");
3095 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.
getString());
3097 TokError(
"invalid operand for instruction");
3101 Operands.push_back(AArch64Operand::CreatePSBHint(
3102 PSB->Encoding, Tok.
getString(), S, getContext()));
3109 SMLoc StartLoc = getLoc();
3115 auto RegTok = getTok();
3119 if (RegNum != AArch64::XZR) {
3120 getLexer().UnLex(RegTok);
3128 TokError(
"expected register operand");
3132 if (RegNum != AArch64::XZR) {
3133 TokError(
"xzr must be followed by xzr");
3139 Operands.push_back(AArch64Operand::CreateReg(
3140 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3151 TokError(
"invalid operand for instruction");
3155 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.
getString());
3157 TokError(
"invalid operand for instruction");
3161 Operands.push_back(AArch64Operand::CreateBTIHint(
3162 BTI->Encoding, Tok.
getString(), S, getContext()));
3172 const MCExpr *Expr =
nullptr;
3178 if (parseSymbolicImmVal(Expr))
3184 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3194 Error(S,
"gotpage label reference not allowed an addend");
3205 Error(S,
"page or gotpage label reference expected");
3214 Operands.push_back(AArch64Operand::CreateImm(Expr, S,
E, getContext()));
3224 const MCExpr *Expr =
nullptr;
3233 if (parseSymbolicImmVal(Expr))
3239 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3246 Error(S,
"unexpected adr label");
3252 Operands.push_back(AArch64Operand::CreateImm(Expr, S,
E, getContext()));
3257template<
bool AddFPZeroAsLiteral>
3271 TokError(
"invalid floating point immediate");
3277 if (Tok.
getIntVal() > 255 || isNegative) {
3278 TokError(
"encoded floating point value out of range");
3284 AArch64Operand::CreateFPImm(
F,
true, S, getContext()));
3287 APFloat RealVal(APFloat::IEEEdouble());
3289 RealVal.convertFromString(Tok.
getString(), APFloat::rmTowardZero);
3291 TokError(
"invalid floating point representation");
3296 RealVal.changeSign();
3298 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3299 Operands.push_back(AArch64Operand::CreateToken(
"#0", S, getContext()));
3300 Operands.push_back(AArch64Operand::CreateToken(
".0", S, getContext()));
3302 Operands.push_back(AArch64Operand::CreateFPImm(
3303 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3328 if (parseSymbolicImmVal(Imm))
3332 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3339 if (!parseOptionalVGOperand(
Operands, VecGroup)) {
3341 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3343 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3349 !getTok().getIdentifier().equals_insensitive(
"lsl")) {
3350 Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3360 Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3364 int64_t ShiftAmount = getTok().getIntVal();
3366 if (ShiftAmount < 0) {
3367 Error(getLoc(),
"positive shift amount required");
3373 if (ShiftAmount == 0 && Imm !=
nullptr) {
3375 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3379 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3380 getLoc(), getContext()));
3387AArch64AsmParser::parseCondCodeString(
StringRef Cond, std::string &Suggestion) {
3424 Suggestion =
"nfrst";
3431 bool invertCondCode) {
3437 std::string Suggestion;
3440 std::string
Msg =
"invalid condition code";
3441 if (!Suggestion.empty())
3442 Msg +=
", did you mean " + Suggestion +
"?";
3443 return TokError(Msg);
3447 if (invertCondCode) {
3449 return TokError(
"condition codes AL and NV are invalid for this instruction");
3454 AArch64Operand::CreateCondCode(
CC, S, getLoc(), getContext()));
3464 TokError(
"invalid operand for instruction");
3468 unsigned PStateImm = -1;
3469 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.
getString());
3472 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3473 PStateImm = SVCR->Encoding;
3476 AArch64Operand::CreateSVCR(PStateImm, Tok.
getString(), S, getContext()));
3488 if (
Name.equals_insensitive(
"za") ||
Name.starts_with_insensitive(
"za.")) {
3490 unsigned ElementWidth = 0;
3491 auto DotPosition =
Name.find(
'.');
3493 const auto &KindRes =
3497 "Expected the register to be followed by element width suffix");
3500 ElementWidth = KindRes->second;
3502 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3503 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3508 if (parseOperand(
Operands,
false,
false))
3515 unsigned Reg = matchRegisterNameAlias(
Name, RegKind::Matrix);
3519 size_t DotPosition =
Name.find(
'.');
3527 .
Case(
"h", MatrixKind::Row)
3528 .
Case(
"v", MatrixKind::Col)
3534 TokError(
"Expected the register to be followed by element width suffix");
3537 unsigned ElementWidth = KindRes->second;
3541 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3542 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3547 if (parseOperand(
Operands,
false,
false))
3589 TokError(
"expected #imm after shift specifier");
3596 AArch64Operand::CreateShiftExtend(ShOp, 0,
false, S,
E, getContext()));
3605 Error(
E,
"expected integer shift amount");
3610 if (getParser().parseExpression(ImmVal))
3615 Error(
E,
"expected constant '#imm' after shift specifier");
3620 Operands.push_back(AArch64Operand::CreateShiftExtend(
3621 ShOp, MCE->
getValue(),
true, S,
E, getContext()));
3629 {
"crc", {AArch64::FeatureCRC}},
3630 {
"sm4", {AArch64::FeatureSM4}},
3631 {
"sha3", {AArch64::FeatureSHA3}},
3632 {
"sha2", {AArch64::FeatureSHA2}},
3633 {
"aes", {AArch64::FeatureAES}},
3634 {
"crypto", {AArch64::FeatureCrypto}},
3635 {
"fp", {AArch64::FeatureFPARMv8}},
3636 {
"simd", {AArch64::FeatureNEON}},
3637 {
"ras", {AArch64::FeatureRAS}},
3638 {
"rasv2", {AArch64::FeatureRASv2}},
3639 {
"lse", {AArch64::FeatureLSE}},
3640 {
"predres", {AArch64::FeaturePredRes}},
3641 {
"predres2", {AArch64::FeatureSPECRES2}},
3642 {
"ccdp", {AArch64::FeatureCacheDeepPersist}},
3643 {
"mte", {AArch64::FeatureMTE}},
3644 {
"memtag", {AArch64::FeatureMTE}},
3645 {
"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3646 {
"pan", {AArch64::FeaturePAN}},
3647 {
"pan-rwv", {AArch64::FeaturePAN_RWV}},
3648 {
"ccpp", {AArch64::FeatureCCPP}},
3649 {
"rcpc", {AArch64::FeatureRCPC}},
3650 {
"rng", {AArch64::FeatureRandGen}},
3651 {
"sve", {AArch64::FeatureSVE}},
3652 {
"sve2", {AArch64::FeatureSVE2}},
3653 {
"sve2-aes", {AArch64::FeatureSVE2AES}},
3654 {
"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3655 {
"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3656 {
"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3657 {
"sve2p1", {AArch64::FeatureSVE2p1}},
3658 {
"b16b16", {AArch64::FeatureB16B16}},
3659 {
"ls64", {AArch64::FeatureLS64}},
3660 {
"xs", {AArch64::FeatureXS}},
3661 {
"pauth", {AArch64::FeaturePAuth}},
3662 {
"flagm", {AArch64::FeatureFlagM}},
3663 {
"rme", {AArch64::FeatureRME}},
3664 {
"sme", {AArch64::FeatureSME}},
3665 {
"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3666 {
"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3667 {
"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3668 {
"sme2", {AArch64::FeatureSME2}},
3669 {
"sme2p1", {AArch64::FeatureSME2p1}},
3670 {
"hbc", {AArch64::FeatureHBC}},
3671 {
"mops", {AArch64::FeatureMOPS}},
3672 {
"mec", {AArch64::FeatureMEC}},
3673 {
"the", {AArch64::FeatureTHE}},
3674 {
"d128", {AArch64::FeatureD128}},
3675 {
"lse128", {AArch64::FeatureLSE128}},
3676 {
"ite", {AArch64::FeatureITE}},
3677 {
"cssc", {AArch64::FeatureCSSC}},
3678 {
"rcpc3", {AArch64::FeatureRCPC3}},
3679 {
"gcs", {AArch64::FeatureGCS}},
3680 {
"bf16", {AArch64::FeatureBF16}},
3681 {
"compnum", {AArch64::FeatureComplxNum}},
3682 {
"dotprod", {AArch64::FeatureDotProd}},
3683 {
"f32mm", {AArch64::FeatureMatMulFP32}},
3684 {
"f64mm", {AArch64::FeatureMatMulFP64}},
3685 {
"fp16", {AArch64::FeatureFullFP16}},
3686 {
"fp16fml", {AArch64::FeatureFP16FML}},
3687 {
"i8mm", {AArch64::FeatureMatMulInt8}},
3688 {
"lor", {AArch64::FeatureLOR}},
3689 {
"profile", {AArch64::FeatureSPE}},
3693 {
"rdm", {AArch64::FeatureRDM}},
3694 {
"rdma", {AArch64::FeatureRDM}},
3695 {
"sb", {AArch64::FeatureSB}},
3696 {
"ssbs", {AArch64::FeatureSSBS}},
3697 {
"tme", {AArch64::FeatureTME}},
3701 if (FBS[AArch64::HasV8_0aOps])
3703 if (FBS[AArch64::HasV8_1aOps])
3705 else if (FBS[AArch64::HasV8_2aOps])
3707 else if (FBS[AArch64::HasV8_3aOps])
3709 else if (FBS[AArch64::HasV8_4aOps])
3711 else if (FBS[AArch64::HasV8_5aOps])
3713 else if (FBS[AArch64::HasV8_6aOps])
3715 else if (FBS[AArch64::HasV8_7aOps])
3717 else if (FBS[AArch64::HasV8_8aOps])
3719 else if (FBS[AArch64::HasV8_9aOps])
3721 else if (FBS[AArch64::HasV9_0aOps])
3723 else if (FBS[AArch64::HasV9_1aOps])
3725 else if (FBS[AArch64::HasV9_2aOps])
3727 else if (FBS[AArch64::HasV9_3aOps])
3729 else if (FBS[AArch64::HasV9_4aOps])
3731 else if (FBS[AArch64::HasV8_0rOps])
3740 Str += !ExtMatches.
empty() ? llvm::join(ExtMatches,
", ") :
"(unknown)";
3747 const uint16_t Cm = (Encoding & 0x78) >> 3;
3748 const uint16_t Cn = (Encoding & 0x780) >> 7;
3749 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3754 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3756 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3758 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3761 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3768 if (
Name.contains(
'.'))
3769 return TokError(
"invalid operand");
3772 Operands.push_back(AArch64Operand::CreateToken(
"sys", NameLoc, getContext()));
3778 if (Mnemonic ==
"ic") {
3781 return TokError(
"invalid operand for IC instruction");
3782 else if (!IC->
haveFeatures(getSTI().getFeatureBits())) {
3783 std::string Str(
"IC " + std::string(IC->
Name) +
" requires: ");
3785 return TokError(Str);
3788 }
else if (Mnemonic ==
"dc") {
3791 return TokError(
"invalid operand for DC instruction");
3792 else if (!DC->
haveFeatures(getSTI().getFeatureBits())) {
3793 std::string Str(
"DC " + std::string(DC->
Name) +
" requires: ");
3795 return TokError(Str);
3798 }
else if (Mnemonic ==
"at") {
3801 return TokError(
"invalid operand for AT instruction");
3802 else if (!AT->
haveFeatures(getSTI().getFeatureBits())) {
3803 std::string Str(
"AT " + std::string(AT->
Name) +
" requires: ");
3805 return TokError(Str);
3808 }
else if (Mnemonic ==
"tlbi") {
3811 return TokError(
"invalid operand for TLBI instruction");
3812 else if (!TLBI->
haveFeatures(getSTI().getFeatureBits())) {
3813 std::string Str(
"TLBI " + std::string(TLBI->
Name) +
" requires: ");
3815 return TokError(Str);
3818 }
else if (Mnemonic ==
"cfp" || Mnemonic ==
"dvp" || Mnemonic ==
"cpp" || Mnemonic ==
"cosp") {
3820 if (
Op.lower() !=
"rctx")
3821 return TokError(
"invalid operand for prediction restriction instruction");
3823 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3824 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3825 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3827 if (Mnemonic ==
"cosp" && !hasSpecres2)
3828 return TokError(
"COSP requires: predres2");
3830 return TokError(Mnemonic.
upper() +
"RCTX requires: predres");
3832 uint16_t PRCTX_Op2 = Mnemonic ==
"cfp" ? 0b100
3833 : Mnemonic ==
"dvp" ? 0b101
3834 : Mnemonic ==
"cosp" ? 0b110
3835 : Mnemonic ==
"cpp" ? 0b111
3838 "Invalid mnemonic for prediction restriction instruction");
3839 const auto SYS_3_7_3 = 0b01101110011;
3840 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3842 createSysAlias(Encoding,
Operands, S);
3848 bool HasRegister =
false;
3853 return TokError(
"expected register operand");
3857 if (ExpectRegister && !HasRegister)
3858 return TokError(
"specified " + Mnemonic +
" op requires a register");
3859 else if (!ExpectRegister && HasRegister)
3860 return TokError(
"specified " + Mnemonic +
" op does not use a register");
3872 if (
Name.contains(
'.'))
3873 return TokError(
"invalid operand");
3877 AArch64Operand::CreateToken(
"sysp", NameLoc, getContext()));
3883 if (Mnemonic ==
"tlbip") {
3884 bool HasnXSQualifier =
Op.ends_with_insensitive(
"nXS");
3885 if (HasnXSQualifier) {
3886 Op =
Op.drop_back(3);
3890 return TokError(
"invalid operand for TLBIP instruction");
3892 TLBIorig->
Name, TLBIorig->
Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3899 std::string(TLBI.
Name) + (HasnXSQualifier ?
"nXS" :
"");
3900 std::string Str(
"TLBIP " +
Name +
" requires: ");
3902 return TokError(Str);
3913 return TokError(
"expected register identifier");
3918 return TokError(
"specified " + Mnemonic +
3919 " op requires a pair of registers");
3933 TokError(
"'csync' operand expected");
3938 SMLoc ExprLoc = getLoc();
3940 if (getParser().parseExpression(ImmVal))
3944 Error(ExprLoc,
"immediate value expected for barrier operand");
3948 if (Mnemonic ==
"dsb" &&
Value > 15) {
3955 if (Value < 0 || Value > 15) {
3956 Error(ExprLoc,
"barrier operand out of range");
3959 auto DB = AArch64DB::lookupDBByEncoding(
Value);
3960 Operands.push_back(AArch64Operand::CreateBarrier(
Value, DB ?
DB->Name :
"",
3961 ExprLoc, getContext(),
3967 TokError(
"invalid operand for instruction");
3972 auto TSB = AArch64TSB::lookupTSBByName(Operand);
3973 auto DB = AArch64DB::lookupDBByName(Operand);
3975 if (Mnemonic ==
"isb" && (!DB ||
DB->Encoding != AArch64DB::sy)) {
3976 TokError(
"'sy' or #imm operand expected");
3979 }
else if (Mnemonic ==
"tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3980 TokError(
"'csync' operand expected");
3982 }
else if (!DB && !TSB) {
3983 if (Mnemonic ==
"dsb") {
3988 TokError(
"invalid barrier option name");
3992 Operands.push_back(AArch64Operand::CreateBarrier(
3993 DB ?
DB->Encoding : TSB->Encoding, Tok.
getString(), getLoc(),
3994 getContext(),
false ));
4004 assert(Mnemonic ==
"dsb" &&
"Instruction does not accept nXS operands");
4005 if (Mnemonic !=
"dsb")
4011 SMLoc ExprLoc = getLoc();
4012 if (getParser().parseExpression(ImmVal))
4016 Error(ExprLoc,
"immediate value expected for barrier operand");
4023 Error(ExprLoc,
"barrier operand out of range");
4026 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(
Value);
4027 Operands.push_back(AArch64Operand::CreateBarrier(
DB->Encoding,
DB->Name,
4028 ExprLoc, getContext(),
4034 TokError(
"invalid operand for instruction");
4039 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4042 TokError(
"invalid barrier option name");
4047 AArch64Operand::CreateBarrier(
DB->Encoding, Tok.
getString(), getLoc(),
4048 getContext(),
true ));
4061 if (AArch64SVCR::lookupSVCRByName(Tok.
getString()))
4066 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4067 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4068 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4072 unsigned PStateImm = -1;
4073 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.
getString());
4074 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4075 PStateImm = PState15->Encoding;
4077 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.
getString());
4078 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4079 PStateImm = PState1->Encoding;
4083 AArch64Operand::CreateSysReg(Tok.
getString(), getLoc(), MRSReg, MSRReg,
4084 PStateImm, getContext()));
4100 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4108 unsigned ElementWidth = KindRes->second;
4110 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4111 S, getLoc(), getContext()));
4116 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4123 SMLoc SIdx = getLoc();
4126 if (getParser().parseExpression(ImmVal))
4130 TokError(
"immediate value expected for vector index");
4139 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->
getValue(), SIdx,
4152 RegKind MatchKind) {
4161 size_t Start = 0, Next =
Name.find(
'.');
4163 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4169 TokError(
"invalid vector kind qualifier");
4186 const SMLoc S = getLoc();
4189 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4197 unsigned ElementWidth = KindRes->second;
4198 Operands.push_back(AArch64Operand::CreateVectorReg(
4199 RegNum, RK, ElementWidth, S,
4200 getLoc(), getContext()));
4203 if (RK == RegKind::SVEPredicateAsCounter) {
4210 if (parseOperand(
Operands,
false,
false))
4220 if (!
Kind.empty()) {
4221 Error(S,
"not expecting size suffix");
4226 Operands.push_back(AArch64Operand::CreateToken(
"/", getLoc(), getContext()));
4231 auto Pred = getTok().getString().lower();
4232 if (RK == RegKind::SVEPredicateAsCounter && Pred !=
"z") {
4233 Error(getLoc(),
"expecting 'z' predication");
4237 if (RK == RegKind::SVEPredicateVector && Pred !=
"z" && Pred !=
"m") {
4238 Error(getLoc(),
"expecting 'm' or 'z' predication");
4243 const char *ZM = Pred ==
"z" ?
"z" :
"m";
4244 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4253 if (!tryParseNeonVectorRegister(
Operands))
4266bool AArch64AsmParser::parseSymbolicImmVal(
const MCExpr *&ImmVal) {
4267 bool HasELFModifier =
false;
4271 HasELFModifier =
true;
4274 return TokError(
"expect relocation specifier in operand after ':'");
4276 std::string LowerCase = getTok().getIdentifier().lower();
4327 return TokError(
"expect relocation specifier in operand after ':'");
4331 if (parseToken(
AsmToken::Colon,
"expect ':' after relocation specifier"))
4335 if (getParser().parseExpression(ImmVal))
4349 auto ParseMatrixTile = [
this](
unsigned &
Reg,
unsigned &ElementWidth) {
4351 size_t DotPosition =
Name.find(
'.');
4360 const std::optional<std::pair<int, int>> &KindRes =
4363 TokError(
"Expected the register to be followed by element width suffix");
4366 ElementWidth = KindRes->second;
4373 auto LCurly = getTok();
4378 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4379 0, S, getLoc(), getContext()));
4384 if (getTok().getString().equals_insensitive(
"za")) {
4390 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4391 0xFF, S, getLoc(), getContext()));
4395 SMLoc TileLoc = getLoc();
4397 unsigned FirstReg, ElementWidth;
4398 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4400 getLexer().UnLex(LCurly);
4406 unsigned PrevReg = FirstReg;
4409 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4412 SeenRegs.
insert(FirstReg);
4416 unsigned Reg, NextElementWidth;
4417 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4422 if (ElementWidth != NextElementWidth) {
4423 Error(TileLoc,
"mismatched register size suffix");
4428 Warning(TileLoc,
"tile list not in ascending order");
4431 Warning(TileLoc,
"duplicate tile in list");
4434 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4443 unsigned RegMask = 0;
4444 for (
auto Reg : DRegs)
4448 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4453template <RegKind VectorKind>
4463 bool NoMatchIsError) {
4464 auto RegTok = getTok();
4465 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4473 RegTok.getString().equals_insensitive(
"zt0"))
4479 !RegTok.getString().starts_with_insensitive(
"za"))) {
4480 Error(Loc,
"vector register expected");
4487 int NumRegs = getNumRegsForRegKind(VectorKind);
4489 auto LCurly = getTok();
4494 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4504 int64_t PrevReg = FirstReg;
4509 SMLoc Loc = getLoc();
4513 ParseRes = ParseVector(Reg, NextKind, getLoc(),
true);
4518 if (Kind != NextKind) {
4519 Error(Loc,
"mismatched register size suffix");
4524 (PrevReg <
Reg) ? (Reg - PrevReg) : (
Reg + NumRegs - PrevReg);
4526 if (Space == 0 || Space > 3) {
4527 Error(Loc,
"invalid number of vectors");
4534 bool HasCalculatedStride =
false;
4536 SMLoc Loc = getLoc();
4539 ParseRes = ParseVector(Reg, NextKind, getLoc(),
true);
4544 if (Kind != NextKind) {
4545 Error(Loc,
"mismatched register size suffix");
4549 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4550 unsigned PrevRegVal =
4551 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4552 if (!HasCalculatedStride) {
4553 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4554 : (RegVal + NumRegs - PrevRegVal);
4555 HasCalculatedStride =
true;
4559 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs)) {
4560 Error(Loc,
"registers must have the same sequential stride");
4573 Error(S,
"invalid number of vectors");
4577 unsigned NumElements = 0;
4578 unsigned ElementWidth = 0;
4579 if (!
Kind.empty()) {
4581 std::tie(NumElements, ElementWidth) = *VK;
4584 Operands.push_back(AArch64Operand::CreateVectorList(
4585 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4586 getLoc(), getContext()));
4593 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(
Operands,
true);
4602 SMLoc StartLoc = getLoc();
4610 Operands.push_back(AArch64Operand::CreateReg(
4611 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4618 Error(getLoc(),
"index must be absent or #0");
4623 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4624 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
4625 Error(getLoc(),
"index must be absent or #0");
4629 Operands.push_back(AArch64Operand::CreateReg(
4630 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4636 SMLoc StartLoc = getLoc();
4640 unsigned RegNum = matchRegisterNameAlias(
Name, RegKind::LookupTable);
4645 Operands.push_back(AArch64Operand::CreateReg(
4646 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4652 if (getParser().parseExpression(ImmVal))
4656 TokError(
"immediate value expected for vector index");
4662 Operands.push_back(AArch64Operand::CreateImm(
4664 getLoc(), getContext()));
4670template <
bool ParseShiftExtend, RegConstra
intEqualityTy EqTy>
4673 SMLoc StartLoc = getLoc();
4682 Operands.push_back(AArch64Operand::CreateReg(
4683 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4692 Res = tryParseOptionalShiftExtend(ExtOpnd);
4696 auto Ext =
static_cast<AArch64Operand*
>(ExtOpnd.
back().get());
4697 Operands.push_back(AArch64Operand::CreateReg(
4698 RegNum, RegKind::Scalar, StartLoc,
Ext->getEndLoc(), getContext(), EqTy,
4699 Ext->getShiftExtendType(),
Ext->getShiftExtendAmount(),
4700 Ext->hasShiftExtendAmount()));
4714 if (!getTok().getString().equals_insensitive(
"mul") ||
4715 !(NextIsVL || NextIsHash))
4719 AArch64Operand::CreateToken(
"mul", getLoc(), getContext()));
4724 AArch64Operand::CreateToken(
"vl", getLoc(), getContext()));
4736 if (
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4737 Operands.push_back(AArch64Operand::CreateImm(
4744 return Error(getLoc(),
"expected 'vl' or '#<imm>'");
4750 auto Tok = Parser.
getTok();
4755 .
Case(
"vgx2",
"vgx2")
4756 .
Case(
"vgx4",
"vgx4")
4768 auto Tok = getTok();
4778 AArch64Operand::CreateToken(Keyword, Tok.
getLoc(), getContext()));
4787 bool invertCondCode) {
4791 MatchOperandParserImpl(
Operands, Mnemonic,
true);
4805 switch (getLexer().getKind()) {
4809 if (parseSymbolicImmVal(Expr))
4810 return Error(S,
"invalid operand");
4813 Operands.push_back(AArch64Operand::CreateImm(Expr, S,
E, getContext()));
4818 AArch64Operand::CreateToken(
"[", getLoc(), getContext()));
4823 return parseOperand(
Operands,
false,
false);
4826 if (!parseNeonVectorList(
Operands))
4830 AArch64Operand::CreateToken(
"{", getLoc(), getContext()));
4835 return parseOperand(
Operands,
false,
false);
4840 if (!parseOptionalVGOperand(
Operands, VecGroup)) {
4842 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4847 return parseCondCode(
Operands, invertCondCode);
4855 if (!parseOptionalMulOperand(
Operands))
4866 if (Mnemonic ==
"brb" || Mnemonic ==
"smstart" || Mnemonic ==
"smstop" ||
4868 return parseKeywordOperand(
Operands);
4874 if (getParser().parseExpression(IdVal))
4877 Operands.push_back(AArch64Operand::CreateImm(IdVal, S,
E, getContext()));