71 SVEPredicateAsCounter,
77enum class MatrixKind {
Array, Tile, Row, Col };
79enum RegConstraintEqualityTy {
94 static PrefixInfo CreateFromInst(
const MCInst &Inst,
uint64_t TSFlags) {
97 case AArch64::MOVPRFX_ZZ:
101 case AArch64::MOVPRFX_ZPmZ_B:
102 case AArch64::MOVPRFX_ZPmZ_H:
103 case AArch64::MOVPRFX_ZPmZ_S:
104 case AArch64::MOVPRFX_ZPmZ_D:
109 "No destructive element size set for movprfx");
113 case AArch64::MOVPRFX_ZPzZ_B:
114 case AArch64::MOVPRFX_ZPzZ_H:
115 case AArch64::MOVPRFX_ZPzZ_S:
116 case AArch64::MOVPRFX_ZPzZ_D:
121 "No destructive element size set for movprfx");
132 PrefixInfo() =
default;
133 bool isActive()
const {
return Active; }
135 unsigned getElementSize()
const {
139 unsigned getDstReg()
const {
return Dst; }
140 unsigned getPgReg()
const {
148 unsigned ElementSize;
164 std::string &Suggestion);
166 unsigned matchRegisterNameAlias(
StringRef Name, RegKind Kind);
168 bool parseSymbolicImmVal(
const MCExpr *&ImmVal);
174 bool invertCondCode);
175 bool parseImmExpr(int64_t &Out);
177 bool parseRegisterInRange(
unsigned &Out,
unsigned Base,
unsigned First,
183 bool parseAuthExpr(
const MCExpr *&Res,
SMLoc &EndLoc);
185 bool parseDirectiveArch(
SMLoc L);
186 bool parseDirectiveArchExtension(
SMLoc L);
187 bool parseDirectiveCPU(
SMLoc L);
188 bool parseDirectiveInst(
SMLoc L);
190 bool parseDirectiveTLSDescCall(
SMLoc L);
193 bool parseDirectiveLtorg(
SMLoc L);
196 bool parseDirectiveUnreq(
SMLoc L);
197 bool parseDirectiveCFINegateRAState();
198 bool parseDirectiveCFIBKeyFrame();
199 bool parseDirectiveCFIMTETaggedFrame();
201 bool parseDirectiveVariantPCS(
SMLoc L);
203 bool parseDirectiveSEHAllocStack(
SMLoc L);
204 bool parseDirectiveSEHPrologEnd(
SMLoc L);
205 bool parseDirectiveSEHSaveR19R20X(
SMLoc L);
206 bool parseDirectiveSEHSaveFPLR(
SMLoc L);
207 bool parseDirectiveSEHSaveFPLRX(
SMLoc L);
208 bool parseDirectiveSEHSaveReg(
SMLoc L);
209 bool parseDirectiveSEHSaveRegX(
SMLoc L);
210 bool parseDirectiveSEHSaveRegP(
SMLoc L);
211 bool parseDirectiveSEHSaveRegPX(
SMLoc L);
212 bool parseDirectiveSEHSaveLRPair(
SMLoc L);
213 bool parseDirectiveSEHSaveFReg(
SMLoc L);
214 bool parseDirectiveSEHSaveFRegX(
SMLoc L);
215 bool parseDirectiveSEHSaveFRegP(
SMLoc L);
216 bool parseDirectiveSEHSaveFRegPX(
SMLoc L);
217 bool parseDirectiveSEHSetFP(
SMLoc L);
218 bool parseDirectiveSEHAddFP(
SMLoc L);
219 bool parseDirectiveSEHNop(
SMLoc L);
220 bool parseDirectiveSEHSaveNext(
SMLoc L);
221 bool parseDirectiveSEHEpilogStart(
SMLoc L);
222 bool parseDirectiveSEHEpilogEnd(
SMLoc L);
223 bool parseDirectiveSEHTrapFrame(
SMLoc L);
224 bool parseDirectiveSEHMachineFrame(
SMLoc L);
225 bool parseDirectiveSEHContext(
SMLoc L);
226 bool parseDirectiveSEHECContext(
SMLoc L);
227 bool parseDirectiveSEHClearUnwoundToCall(
SMLoc L);
228 bool parseDirectiveSEHPACSignLR(
SMLoc L);
229 bool parseDirectiveSEHSaveAnyReg(
SMLoc L,
bool Paired,
bool Writeback);
231 bool validateInstruction(
MCInst &Inst,
SMLoc &IDLoc,
233 unsigned getNumRegsForRegKind(RegKind K);
237 bool MatchingInlineAsm)
override;
241#define GET_ASSEMBLER_HEADER
242#include "AArch64GenAsmMatcher.inc"
256 template <
bool IsSVEPrefetch = false>
263 template <
bool AddFPZeroAsLiteral>
271 template <
bool ParseShiftExtend,
272 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
275 template <
bool ParseShiftExtend,
bool ParseSuffix>
277 template <RegKind RK>
281 template <RegKind VectorKind>
283 bool ExpectMatch =
false);
291 enum AArch64MatchResultTy {
293#define GET_OPERAND_DIAGNOSTIC_TYPES
294#include "AArch64GenAsmMatcher.inc"
297 bool IsWindowsArm64EC;
328 SMLoc &EndLoc)
override;
331 unsigned Kind)
override;
335 static bool classifySymbolRef(
const MCExpr *Expr,
367 SMLoc StartLoc, EndLoc;
376 struct ShiftExtendOp {
379 bool HasExplicitAmount;
389 RegConstraintEqualityTy EqualityTy;
405 ShiftExtendOp ShiftExtend;
410 unsigned ElementWidth;
414 struct MatrixTileListOp {
415 unsigned RegMask = 0;
418 struct VectorListOp {
422 unsigned NumElements;
423 unsigned ElementWidth;
424 RegKind RegisterKind;
427 struct VectorIndexOp {
435 struct ShiftedImmOp {
437 unsigned ShiftAmount;
494 unsigned PStateField;
500 struct MatrixRegOp MatrixReg;
501 struct MatrixTileListOp MatrixTileList;
502 struct VectorListOp VectorList;
503 struct VectorIndexOp VectorIndex;
505 struct ShiftedImmOp ShiftedImm;
506 struct ImmRangeOp ImmRange;
508 struct FPImmOp FPImm;
510 struct SysRegOp SysReg;
511 struct SysCRImmOp SysCRImm;
513 struct PSBHintOp PSBHint;
514 struct BTIHintOp BTIHint;
515 struct ShiftExtendOp ShiftExtend;
528 StartLoc =
o.StartLoc;
538 ShiftedImm =
o.ShiftedImm;
541 ImmRange =
o.ImmRange;
555 case k_MatrixRegister:
556 MatrixReg =
o.MatrixReg;
558 case k_MatrixTileList:
559 MatrixTileList =
o.MatrixTileList;
562 VectorList =
o.VectorList;
565 VectorIndex =
o.VectorIndex;
571 SysCRImm =
o.SysCRImm;
583 ShiftExtend =
o.ShiftExtend;
592 SMLoc getStartLoc()
const override {
return StartLoc; }
594 SMLoc getEndLoc()
const override {
return EndLoc; }
597 assert(Kind == k_Token &&
"Invalid access!");
601 bool isTokenSuffix()
const {
602 assert(Kind == k_Token &&
"Invalid access!");
606 const MCExpr *getImm()
const {
607 assert(Kind == k_Immediate &&
"Invalid access!");
611 const MCExpr *getShiftedImmVal()
const {
612 assert(Kind == k_ShiftedImm &&
"Invalid access!");
613 return ShiftedImm.Val;
616 unsigned getShiftedImmShift()
const {
617 assert(Kind == k_ShiftedImm &&
"Invalid access!");
618 return ShiftedImm.ShiftAmount;
621 unsigned getFirstImmVal()
const {
622 assert(Kind == k_ImmRange &&
"Invalid access!");
623 return ImmRange.First;
626 unsigned getLastImmVal()
const {
627 assert(Kind == k_ImmRange &&
"Invalid access!");
628 return ImmRange.Last;
632 assert(Kind == k_CondCode &&
"Invalid access!");
637 assert (Kind == k_FPImm &&
"Invalid access!");
638 return APFloat(APFloat::IEEEdouble(),
APInt(64, FPImm.Val,
true));
641 bool getFPImmIsExact()
const {
642 assert (Kind == k_FPImm &&
"Invalid access!");
643 return FPImm.IsExact;
646 unsigned getBarrier()
const {
647 assert(Kind == k_Barrier &&
"Invalid access!");
652 assert(Kind == k_Barrier &&
"Invalid access!");
656 bool getBarriernXSModifier()
const {
657 assert(Kind == k_Barrier &&
"Invalid access!");
662 assert(Kind == k_Register &&
"Invalid access!");
666 unsigned getMatrixReg()
const {
667 assert(Kind == k_MatrixRegister &&
"Invalid access!");
668 return MatrixReg.RegNum;
671 unsigned getMatrixElementWidth()
const {
672 assert(Kind == k_MatrixRegister &&
"Invalid access!");
673 return MatrixReg.ElementWidth;
676 MatrixKind getMatrixKind()
const {
677 assert(Kind == k_MatrixRegister &&
"Invalid access!");
678 return MatrixReg.Kind;
681 unsigned getMatrixTileListRegMask()
const {
682 assert(isMatrixTileList() &&
"Invalid access!");
683 return MatrixTileList.RegMask;
686 RegConstraintEqualityTy getRegEqualityTy()
const {
687 assert(Kind == k_Register &&
"Invalid access!");
688 return Reg.EqualityTy;
691 unsigned getVectorListStart()
const {
692 assert(Kind == k_VectorList &&
"Invalid access!");
693 return VectorList.RegNum;
696 unsigned getVectorListCount()
const {
697 assert(Kind == k_VectorList &&
"Invalid access!");
698 return VectorList.Count;
701 unsigned getVectorListStride()
const {
702 assert(Kind == k_VectorList &&
"Invalid access!");
703 return VectorList.Stride;
706 int getVectorIndex()
const {
707 assert(Kind == k_VectorIndex &&
"Invalid access!");
708 return VectorIndex.Val;
712 assert(Kind == k_SysReg &&
"Invalid access!");
713 return StringRef(SysReg.Data, SysReg.Length);
716 unsigned getSysCR()
const {
717 assert(Kind == k_SysCR &&
"Invalid access!");
721 unsigned getPrefetch()
const {
722 assert(Kind == k_Prefetch &&
"Invalid access!");
726 unsigned getPSBHint()
const {
727 assert(Kind == k_PSBHint &&
"Invalid access!");
732 assert(Kind == k_PSBHint &&
"Invalid access!");
733 return StringRef(PSBHint.Data, PSBHint.Length);
736 unsigned getBTIHint()
const {
737 assert(Kind == k_BTIHint &&
"Invalid access!");
742 assert(Kind == k_BTIHint &&
"Invalid access!");
743 return StringRef(BTIHint.Data, BTIHint.Length);
747 assert(Kind == k_SVCR &&
"Invalid access!");
748 return StringRef(SVCR.Data, SVCR.Length);
752 assert(Kind == k_Prefetch &&
"Invalid access!");
757 if (Kind == k_ShiftExtend)
758 return ShiftExtend.Type;
759 if (Kind == k_Register)
760 return Reg.ShiftExtend.Type;
764 unsigned getShiftExtendAmount()
const {
765 if (Kind == k_ShiftExtend)
766 return ShiftExtend.Amount;
767 if (Kind == k_Register)
768 return Reg.ShiftExtend.Amount;
772 bool hasShiftExtendAmount()
const {
773 if (Kind == k_ShiftExtend)
774 return ShiftExtend.HasExplicitAmount;
775 if (Kind == k_Register)
776 return Reg.ShiftExtend.HasExplicitAmount;
780 bool isImm()
const override {
return Kind == k_Immediate; }
781 bool isMem()
const override {
return false; }
783 bool isUImm6()
const {
790 return (Val >= 0 && Val < 64);
793 template <
int W
idth>
bool isSImm()
const {
return isSImmScaled<Width, 1>(); }
796 return isImmScaled<Bits, Scale>(
true);
799 template <
int Bits,
int Scale,
int Offset = 0,
bool IsRange = false>
801 if (IsRange && isImmRange() &&
802 (getLastImmVal() != getFirstImmVal() +
Offset))
803 return DiagnosticPredicateTy::NoMatch;
805 return isImmScaled<Bits, Scale, IsRange>(
false);
808 template <
int Bits,
int Scale,
bool IsRange = false>
810 if ((!
isImm() && !isImmRange()) || (
isImm() && IsRange) ||
811 (isImmRange() && !IsRange))
812 return DiagnosticPredicateTy::NoMatch;
816 Val = getFirstImmVal();
820 return DiagnosticPredicateTy::NoMatch;
824 int64_t MinVal, MaxVal;
826 int64_t Shift =
Bits - 1;
827 MinVal = (int64_t(1) << Shift) * -Scale;
828 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
831 MaxVal = ((int64_t(1) <<
Bits) - 1) * Scale;
834 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
835 return DiagnosticPredicateTy::Match;
837 return DiagnosticPredicateTy::NearMatch;
842 return DiagnosticPredicateTy::NoMatch;
843 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
845 return DiagnosticPredicateTy::NoMatch;
847 if (Val >= 0 && Val < 32)
848 return DiagnosticPredicateTy::Match;
849 return DiagnosticPredicateTy::NearMatch;
854 return DiagnosticPredicateTy::NoMatch;
855 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
857 return DiagnosticPredicateTy::NoMatch;
859 if (Val >= 0 && Val <= 1)
860 return DiagnosticPredicateTy::Match;
861 return DiagnosticPredicateTy::NearMatch;
864 bool isSymbolicUImm12Offset(
const MCExpr *Expr)
const {
868 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
900 template <
int Scale>
bool isUImm12Offset()
const {
906 return isSymbolicUImm12Offset(getImm());
909 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
912 template <
int N,
int M>
913 bool isImmInRange()
const {
920 return (Val >=
N && Val <= M);
925 template <
typename T>
926 bool isLogicalImm()
const {
943 bool isShiftedImm()
const {
return Kind == k_ShiftedImm; }
945 bool isImmRange()
const {
return Kind == k_ImmRange; }
950 template <
unsigned W
idth>
951 std::optional<std::pair<int64_t, unsigned>> getShiftedVal()
const {
952 if (isShiftedImm() && Width == getShiftedImmShift())
953 if (
auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
954 return std::make_pair(
CE->getValue(), Width);
957 if (
auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
958 int64_t Val =
CE->getValue();
960 return std::make_pair(Val >> Width, Width);
962 return std::make_pair(Val, 0u);
968 bool isAddSubImm()
const {
969 if (!isShiftedImm() && !
isImm())
975 if (isShiftedImm()) {
976 unsigned Shift = ShiftedImm.ShiftAmount;
977 Expr = ShiftedImm.Val;
978 if (Shift != 0 && Shift != 12)
987 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
988 DarwinRefKind, Addend)) {
1005 if (
auto ShiftedVal = getShiftedVal<12>())
1006 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1013 bool isAddSubImmNeg()
const {
1014 if (!isShiftedImm() && !
isImm())
1018 if (
auto ShiftedVal = getShiftedVal<12>())
1019 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1029 template <
typename T>
1031 if (!isShiftedImm() && (!
isImm() || !isa<MCConstantExpr>(getImm())))
1032 return DiagnosticPredicateTy::NoMatch;
1034 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>
::value ||
1035 std::is_same<int8_t, T>::value;
1036 if (
auto ShiftedImm = getShiftedVal<8>())
1037 if (!(IsByte && ShiftedImm->second) &&
1038 AArch64_AM::isSVECpyImm<T>(
uint64_t(ShiftedImm->first)
1039 << ShiftedImm->second))
1040 return DiagnosticPredicateTy::Match;
1042 return DiagnosticPredicateTy::NearMatch;
1049 if (!isShiftedImm() && (!
isImm() || !isa<MCConstantExpr>(getImm())))
1050 return DiagnosticPredicateTy::NoMatch;
1052 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>
::value ||
1053 std::is_same<int8_t, T>::value;
1054 if (
auto ShiftedImm = getShiftedVal<8>())
1055 if (!(IsByte && ShiftedImm->second) &&
1056 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1057 << ShiftedImm->second))
1058 return DiagnosticPredicateTy::Match;
1060 return DiagnosticPredicateTy::NearMatch;
1064 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1065 return DiagnosticPredicateTy::Match;
1066 return DiagnosticPredicateTy::NoMatch;
1069 bool isCondCode()
const {
return Kind == k_CondCode; }
1071 bool isSIMDImmType10()
const {
1081 bool isBranchTarget()
const {
1090 assert(
N > 0 &&
"Branch target immediate cannot be 0 bits!");
1091 return (Val >= -((1<<(
N-1)) << 2) && Val <= (((1<<(
N-1))-1) << 2));
1102 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
1103 DarwinRefKind, Addend)) {
1112 bool isMovWSymbolG3()
const {
1116 bool isMovWSymbolG2()
const {
1117 return isMovWSymbol(
1124 bool isMovWSymbolG1()
const {
1125 return isMovWSymbol(
1133 bool isMovWSymbolG0()
const {
1134 return isMovWSymbol(
1142 template<
int RegW
idth,
int Shift>
1143 bool isMOVZMovAlias()
const {
1144 if (!
isImm())
return false;
1157 template<
int RegW
idth,
int Shift>
1158 bool isMOVNMovAlias()
const {
1159 if (!
isImm())
return false;
1162 if (!CE)
return false;
1168 bool isFPImm()
const {
1169 return Kind == k_FPImm &&
1173 bool isBarrier()
const {
1174 return Kind == k_Barrier && !getBarriernXSModifier();
1176 bool isBarriernXS()
const {
1177 return Kind == k_Barrier && getBarriernXSModifier();
1179 bool isSysReg()
const {
return Kind == k_SysReg; }
1181 bool isMRSSystemRegister()
const {
1182 if (!isSysReg())
return false;
1184 return SysReg.MRSReg != -1U;
1187 bool isMSRSystemRegister()
const {
1188 if (!isSysReg())
return false;
1189 return SysReg.MSRReg != -1U;
1192 bool isSystemPStateFieldWithImm0_1()
const {
1193 if (!isSysReg())
return false;
1194 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1197 bool isSystemPStateFieldWithImm0_15()
const {
1200 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1203 bool isSVCR()
const {
1206 return SVCR.PStateField != -1U;
1209 bool isReg()
const override {
1210 return Kind == k_Register;
1213 bool isVectorList()
const {
return Kind == k_VectorList; }
1215 bool isScalarReg()
const {
1216 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar;
1219 bool isNeonVectorReg()
const {
1220 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector;
1223 bool isNeonVectorRegLo()
const {
1224 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1225 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1227 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1231 bool isNeonVectorReg0to7()
const {
1232 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1233 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1237 bool isMatrix()
const {
return Kind == k_MatrixRegister; }
1238 bool isMatrixTileList()
const {
return Kind == k_MatrixTileList; }
1240 template <
unsigned Class>
bool isSVEPredicateAsCounterReg()
const {
1243 case AArch64::PPRRegClassID:
1244 case AArch64::PPR_3bRegClassID:
1245 case AArch64::PPR_p8to15RegClassID:
1246 case AArch64::PNRRegClassID:
1247 case AArch64::PNR_p8to15RegClassID:
1248 case AArch64::PPRorPNRRegClassID:
1249 RK = RegKind::SVEPredicateAsCounter;
1255 return (Kind == k_Register &&
Reg.Kind == RK) &&
1256 AArch64MCRegisterClasses[
Class].contains(
getReg());
1259 template <
unsigned Class>
bool isSVEVectorReg()
const {
1262 case AArch64::ZPRRegClassID:
1263 case AArch64::ZPR_3bRegClassID:
1264 case AArch64::ZPR_4bRegClassID:
1265 RK = RegKind::SVEDataVector;
1267 case AArch64::PPRRegClassID:
1268 case AArch64::PPR_3bRegClassID:
1269 case AArch64::PPR_p8to15RegClassID:
1270 case AArch64::PNRRegClassID:
1271 case AArch64::PNR_p8to15RegClassID:
1272 case AArch64::PPRorPNRRegClassID:
1273 RK = RegKind::SVEPredicateVector;
1279 return (Kind == k_Register &&
Reg.Kind == RK) &&
1280 AArch64MCRegisterClasses[
Class].contains(
getReg());
1283 template <
unsigned Class>
bool isFPRasZPR()
const {
1284 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1285 AArch64MCRegisterClasses[
Class].contains(
getReg());
1288 template <
int ElementW
idth,
unsigned Class>
1290 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateVector)
1291 return DiagnosticPredicateTy::NoMatch;
1293 if (isSVEVectorReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1294 return DiagnosticPredicateTy::Match;
1296 return DiagnosticPredicateTy::NearMatch;
1299 template <
int ElementW
idth,
unsigned Class>
1301 if (Kind != k_Register || (
Reg.Kind != RegKind::SVEPredicateAsCounter &&
1302 Reg.Kind != RegKind::SVEPredicateVector))
1303 return DiagnosticPredicateTy::NoMatch;
1305 if ((isSVEPredicateAsCounterReg<Class>() ||
1306 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1307 Reg.ElementWidth == ElementWidth)
1308 return DiagnosticPredicateTy::Match;
1310 return DiagnosticPredicateTy::NearMatch;
1313 template <
int ElementW
idth,
unsigned Class>
1315 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateAsCounter)
1316 return DiagnosticPredicateTy::NoMatch;
1318 if (isSVEPredicateAsCounterReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1319 return DiagnosticPredicateTy::Match;
1321 return DiagnosticPredicateTy::NearMatch;
1324 template <
int ElementW
idth,
unsigned Class>
1326 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEDataVector)
1327 return DiagnosticPredicateTy::NoMatch;
1329 if (isSVEVectorReg<Class>() &&
Reg.ElementWidth == ElementWidth)
1330 return DiagnosticPredicateTy::Match;
1332 return DiagnosticPredicateTy::NearMatch;
1335 template <
int ElementWidth,
unsigned Class,
1337 bool ShiftWidthAlwaysSame>
1339 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1340 if (!VectorMatch.isMatch())
1341 return DiagnosticPredicateTy::NoMatch;
1346 bool MatchShift = getShiftExtendAmount() ==
Log2_32(ShiftWidth / 8);
1349 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1350 return DiagnosticPredicateTy::NoMatch;
1352 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1353 return DiagnosticPredicateTy::Match;
1355 return DiagnosticPredicateTy::NearMatch;
1358 bool isGPR32as64()
const {
1359 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1360 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(
Reg.RegNum);
1363 bool isGPR64as32()
const {
1364 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1365 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(
Reg.RegNum);
1368 bool isGPR64x8()
const {
1369 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1370 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1374 bool isWSeqPair()
const {
1375 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1376 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1380 bool isXSeqPair()
const {
1381 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1382 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1386 bool isSyspXzrPair()
const {
1387 return isGPR64<AArch64::GPR64RegClassID>() &&
Reg.RegNum == AArch64::XZR;
1390 template<
int64_t Angle,
int64_t Remainder>
1392 if (!
isImm())
return DiagnosticPredicateTy::NoMatch;
1395 if (!CE)
return DiagnosticPredicateTy::NoMatch;
1398 if (
Value % Angle == Remainder &&
Value <= 270)
1399 return DiagnosticPredicateTy::Match;
1400 return DiagnosticPredicateTy::NearMatch;
1403 template <
unsigned RegClassID>
bool isGPR64()
const {
1404 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1405 AArch64MCRegisterClasses[RegClassID].contains(
getReg());
1408 template <
unsigned RegClassID,
int ExtW
idth>
1410 if (Kind != k_Register ||
Reg.Kind != RegKind::Scalar)
1411 return DiagnosticPredicateTy::NoMatch;
1413 if (isGPR64<RegClassID>() && getShiftExtendType() ==
AArch64_AM::LSL &&
1414 getShiftExtendAmount() ==
Log2_32(ExtWidth / 8))
1415 return DiagnosticPredicateTy::Match;
1416 return DiagnosticPredicateTy::NearMatch;
1421 template <RegKind VectorKind,
unsigned NumRegs>
1422 bool isImplicitlyTypedVectorList()
const {
1423 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1424 VectorList.NumElements == 0 &&
1425 VectorList.RegisterKind == VectorKind;
1428 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1429 unsigned ElementWidth,
unsigned Stride = 1>
1430 bool isTypedVectorList()
const {
1431 if (Kind != k_VectorList)
1433 if (VectorList.Count != NumRegs)
1435 if (VectorList.RegisterKind != VectorKind)
1437 if (VectorList.ElementWidth != ElementWidth)
1439 if (VectorList.Stride != Stride)
1441 return VectorList.NumElements == NumElements;
1444 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1445 unsigned ElementWidth>
1448 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1450 return DiagnosticPredicateTy::NoMatch;
1451 if (((VectorList.RegNum - AArch64::Z0) % NumRegs) != 0)
1452 return DiagnosticPredicateTy::NearMatch;
1453 return DiagnosticPredicateTy::Match;
1456 template <RegKind VectorKind,
unsigned NumRegs,
unsigned Stride,
1457 unsigned ElementWidth>
1459 bool Res = isTypedVectorList<VectorKind, NumRegs, 0,
1460 ElementWidth, Stride>();
1462 return DiagnosticPredicateTy::NoMatch;
1463 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1464 ((VectorList.RegNum >= AArch64::Z16) &&
1465 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1466 return DiagnosticPredicateTy::Match;
1467 return DiagnosticPredicateTy::NoMatch;
1470 template <
int Min,
int Max>
1472 if (Kind != k_VectorIndex)
1473 return DiagnosticPredicateTy::NoMatch;
1474 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1475 return DiagnosticPredicateTy::Match;
1476 return DiagnosticPredicateTy::NearMatch;
1479 bool isToken()
const override {
return Kind == k_Token; }
1481 bool isTokenEqual(
StringRef Str)
const {
1482 return Kind == k_Token && getToken() == Str;
1484 bool isSysCR()
const {
return Kind == k_SysCR; }
1485 bool isPrefetch()
const {
return Kind == k_Prefetch; }
1486 bool isPSBHint()
const {
return Kind == k_PSBHint; }
1487 bool isBTIHint()
const {
return Kind == k_BTIHint; }
1488 bool isShiftExtend()
const {
return Kind == k_ShiftExtend; }
1489 bool isShifter()
const {
1490 if (!isShiftExtend())
1500 if (Kind != k_FPImm)
1501 return DiagnosticPredicateTy::NoMatch;
1503 if (getFPImmIsExact()) {
1505 auto *
Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1509 APFloat RealVal(APFloat::IEEEdouble());
1511 RealVal.convertFromString(
Desc->Repr, APFloat::rmTowardZero);
1512 if (
errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1515 if (
getFPImm().bitwiseIsEqual(RealVal))
1516 return DiagnosticPredicateTy::Match;
1519 return DiagnosticPredicateTy::NearMatch;
1522 template <
unsigned ImmA,
unsigned ImmB>
1525 if ((Res = isExactFPImm<ImmA>()))
1526 return DiagnosticPredicateTy::Match;
1527 if ((Res = isExactFPImm<ImmB>()))
1528 return DiagnosticPredicateTy::Match;
1532 bool isExtend()
const {
1533 if (!isShiftExtend())
1542 getShiftExtendAmount() <= 4;
1545 bool isExtend64()
const {
1555 bool isExtendLSL64()
const {
1561 getShiftExtendAmount() <= 4;
1564 bool isLSLImm3Shift()
const {
1565 if (!isShiftExtend())
1571 template<
int W
idth>
bool isMemXExtend()
const {
1576 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1577 getShiftExtendAmount() == 0);
1580 template<
int W
idth>
bool isMemWExtend()
const {
1585 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1586 getShiftExtendAmount() == 0);
1589 template <
unsigned w
idth>
1590 bool isArithmeticShifter()
const {
1600 template <
unsigned w
idth>
1601 bool isLogicalShifter()
const {
1609 getShiftExtendAmount() < width;
1612 bool isMovImm32Shifter()
const {
1620 uint64_t Val = getShiftExtendAmount();
1621 return (Val == 0 || Val == 16);
1624 bool isMovImm64Shifter()
const {
1632 uint64_t Val = getShiftExtendAmount();
1633 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1636 bool isLogicalVecShifter()
const {
1641 unsigned Shift = getShiftExtendAmount();
1643 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1646 bool isLogicalVecHalfWordShifter()
const {
1647 if (!isLogicalVecShifter())
1651 unsigned Shift = getShiftExtendAmount();
1653 (Shift == 0 || Shift == 8);
1656 bool isMoveVecShifter()
const {
1657 if (!isShiftExtend())
1661 unsigned Shift = getShiftExtendAmount();
1663 (Shift == 8 || Shift == 16);
1672 bool isSImm9OffsetFB()
const {
1673 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1676 bool isAdrpLabel()
const {
1683 int64_t Val =
CE->getValue();
1684 int64_t Min = - (4096 * (1LL << (21 - 1)));
1685 int64_t
Max = 4096 * ((1LL << (21 - 1)) - 1);
1686 return (Val % 4096) == 0 && Val >= Min && Val <=
Max;
1692 bool isAdrLabel()
const {
1699 int64_t Val =
CE->getValue();
1700 int64_t Min = - (1LL << (21 - 1));
1701 int64_t
Max = ((1LL << (21 - 1)) - 1);
1702 return Val >= Min && Val <=
Max;
1708 template <MatrixKind Kind,
unsigned EltSize,
unsigned RegClass>
1711 return DiagnosticPredicateTy::NoMatch;
1712 if (getMatrixKind() != Kind ||
1713 !AArch64MCRegisterClasses[RegClass].
contains(getMatrixReg()) ||
1714 EltSize != getMatrixElementWidth())
1715 return DiagnosticPredicateTy::NearMatch;
1716 return DiagnosticPredicateTy::Match;
1719 bool isPAuthPCRelLabel16Operand()
const {
1731 return (Val <= 0) && (Val > -(1 << 18));
1738 else if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1744 void addRegOperands(
MCInst &Inst,
unsigned N)
const {
1745 assert(
N == 1 &&
"Invalid number of operands!");
1749 void addMatrixOperands(
MCInst &Inst,
unsigned N)
const {
1750 assert(
N == 1 &&
"Invalid number of operands!");
1754 void addGPR32as64Operands(
MCInst &Inst,
unsigned N)
const {
1755 assert(
N == 1 &&
"Invalid number of operands!");
1757 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].
contains(
getReg()));
1766 void addGPR64as32Operands(
MCInst &Inst,
unsigned N)
const {
1767 assert(
N == 1 &&
"Invalid number of operands!");
1769 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].
contains(
getReg()));
1778 template <
int W
idth>
1779 void addFPRasZPRRegOperands(
MCInst &Inst,
unsigned N)
const {
1782 case 8:
Base = AArch64::B0;
break;
1783 case 16:
Base = AArch64::H0;
break;
1784 case 32:
Base = AArch64::S0;
break;
1785 case 64:
Base = AArch64::D0;
break;
1786 case 128:
Base = AArch64::Q0;
break;
1793 void addPPRorPNRRegOperands(
MCInst &Inst,
unsigned N)
const {
1794 assert(
N == 1 &&
"Invalid number of operands!");
1797 if (
Reg >= AArch64::PN0 &&
Reg <= AArch64::PN15)
1798 Reg =
Reg - AArch64::PN0 + AArch64::P0;
1802 void addPNRasPPRRegOperands(
MCInst &Inst,
unsigned N)
const {
1803 assert(
N == 1 &&
"Invalid number of operands!");
1808 void addVectorReg64Operands(
MCInst &Inst,
unsigned N)
const {
1809 assert(
N == 1 &&
"Invalid number of operands!");
1811 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1815 void addVectorReg128Operands(
MCInst &Inst,
unsigned N)
const {
1816 assert(
N == 1 &&
"Invalid number of operands!");
1818 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1822 void addVectorRegLoOperands(
MCInst &Inst,
unsigned N)
const {
1823 assert(
N == 1 &&
"Invalid number of operands!");
1827 void addVectorReg0to7Operands(
MCInst &Inst,
unsigned N)
const {
1828 assert(
N == 1 &&
"Invalid number of operands!");
1832 enum VecListIndexType {
1833 VecListIdx_DReg = 0,
1834 VecListIdx_QReg = 1,
1835 VecListIdx_ZReg = 2,
1836 VecListIdx_PReg = 3,
1839 template <VecListIndexType RegTy,
unsigned NumRegs>
1840 void addVectorListOperands(
MCInst &Inst,
unsigned N)
const {
1841 assert(
N == 1 &&
"Invalid number of operands!");
1842 static const unsigned FirstRegs[][5] = {
1844 AArch64::D0, AArch64::D0_D1,
1845 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1847 AArch64::Q0, AArch64::Q0_Q1,
1848 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1850 AArch64::Z0, AArch64::Z0_Z1,
1851 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1853 AArch64::P0, AArch64::P0_P1 }
1856 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1857 " NumRegs must be <= 4 for ZRegs");
1859 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1860 " NumRegs must be <= 2 for PRegs");
1862 unsigned FirstReg = FirstRegs[(
unsigned)RegTy][NumRegs];
1864 FirstRegs[(
unsigned)RegTy][0]));
1867 template <
unsigned NumRegs>
1868 void addStridedVectorListOperands(
MCInst &Inst,
unsigned N)
const {
1869 assert(
N == 1 &&
"Invalid number of operands!");
1870 assert((NumRegs == 2 || NumRegs == 4) &&
" NumRegs must be 2 or 4");
1874 if (getVectorListStart() < AArch64::Z16) {
1875 assert((getVectorListStart() < AArch64::Z8) &&
1876 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1878 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1880 assert((getVectorListStart() < AArch64::Z24) &&
1881 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1883 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1887 if (getVectorListStart() < AArch64::Z16) {
1888 assert((getVectorListStart() < AArch64::Z4) &&
1889 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1891 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1893 assert((getVectorListStart() < AArch64::Z20) &&
1894 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1896 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1904 void addMatrixTileListOperands(
MCInst &Inst,
unsigned N)
const {
1905 assert(
N == 1 &&
"Invalid number of operands!");
1906 unsigned RegMask = getMatrixTileListRegMask();
1907 assert(RegMask <= 0xFF &&
"Invalid mask!");
1911 void addVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
1912 assert(
N == 1 &&
"Invalid number of operands!");
1916 template <
unsigned ImmIs0,
unsigned ImmIs1>
1917 void addExactFPImmOperands(
MCInst &Inst,
unsigned N)
const {
1918 assert(
N == 1 &&
"Invalid number of operands!");
1919 assert(
bool(isExactFPImm<ImmIs0, ImmIs1>()) &&
"Invalid operand");
1923 void addImmOperands(
MCInst &Inst,
unsigned N)
const {
1924 assert(
N == 1 &&
"Invalid number of operands!");
1928 addExpr(Inst, getImm());
1931 template <
int Shift>
1932 void addImmWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1933 assert(
N == 2 &&
"Invalid number of operands!");
1934 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1937 }
else if (isShiftedImm()) {
1938 addExpr(Inst, getShiftedImmVal());
1941 addExpr(Inst, getImm());
1946 template <
int Shift>
1947 void addImmNegWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1948 assert(
N == 2 &&
"Invalid number of operands!");
1949 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1956 void addCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
1957 assert(
N == 1 &&
"Invalid number of operands!");
1961 void addAdrpLabelOperands(
MCInst &Inst,
unsigned N)
const {
1962 assert(
N == 1 &&
"Invalid number of operands!");
1965 addExpr(Inst, getImm());
1970 void addAdrLabelOperands(
MCInst &Inst,
unsigned N)
const {
1971 addImmOperands(Inst,
N);
1975 void addUImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
1976 assert(
N == 1 &&
"Invalid number of operands!");
1986 void addUImm6Operands(
MCInst &Inst,
unsigned N)
const {
1987 assert(
N == 1 &&
"Invalid number of operands!");
1992 template <
int Scale>
1993 void addImmScaledOperands(
MCInst &Inst,
unsigned N)
const {
1994 assert(
N == 1 &&
"Invalid number of operands!");
1999 template <
int Scale>
2000 void addImmScaledRangeOperands(
MCInst &Inst,
unsigned N)
const {
2001 assert(
N == 1 &&
"Invalid number of operands!");
2005 template <
typename T>
2006 void addLogicalImmOperands(
MCInst &Inst,
unsigned N)
const {
2007 assert(
N == 1 &&
"Invalid number of operands!");
2009 std::make_unsigned_t<T> Val = MCE->
getValue();
2014 template <
typename T>
2015 void addLogicalImmNotOperands(
MCInst &Inst,
unsigned N)
const {
2016 assert(
N == 1 &&
"Invalid number of operands!");
2018 std::make_unsigned_t<T> Val = ~MCE->getValue();
2023 void addSIMDImmType10Operands(
MCInst &Inst,
unsigned N)
const {
2024 assert(
N == 1 &&
"Invalid number of operands!");
2030 void addBranchTarget26Operands(
MCInst &Inst,
unsigned N)
const {
2034 assert(
N == 1 &&
"Invalid number of operands!");
2037 addExpr(Inst, getImm());
2040 assert(MCE &&
"Invalid constant immediate operand!");
2044 void addPAuthPCRelLabel16Operands(
MCInst &Inst,
unsigned N)
const {
2048 assert(
N == 1 &&
"Invalid number of operands!");
2051 addExpr(Inst, getImm());
2057 void addPCRelLabel19Operands(
MCInst &Inst,
unsigned N)
const {
2061 assert(
N == 1 &&
"Invalid number of operands!");
2064 addExpr(Inst, getImm());
2067 assert(MCE &&
"Invalid constant immediate operand!");
2071 void addBranchTarget14Operands(
MCInst &Inst,
unsigned N)
const {
2075 assert(
N == 1 &&
"Invalid number of operands!");
2078 addExpr(Inst, getImm());
2081 assert(MCE &&
"Invalid constant immediate operand!");
2085 void addFPImmOperands(
MCInst &Inst,
unsigned N)
const {
2086 assert(
N == 1 &&
"Invalid number of operands!");
2091 void addBarrierOperands(
MCInst &Inst,
unsigned N)
const {
2092 assert(
N == 1 &&
"Invalid number of operands!");
2096 void addBarriernXSOperands(
MCInst &Inst,
unsigned N)
const {
2097 assert(
N == 1 &&
"Invalid number of operands!");
2101 void addMRSSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
2102 assert(
N == 1 &&
"Invalid number of operands!");
2107 void addMSRSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
2108 assert(
N == 1 &&
"Invalid number of operands!");
2113 void addSystemPStateFieldWithImm0_1Operands(
MCInst &Inst,
unsigned N)
const {
2114 assert(
N == 1 &&
"Invalid number of operands!");
2119 void addSVCROperands(
MCInst &Inst,
unsigned N)
const {
2120 assert(
N == 1 &&
"Invalid number of operands!");
2125 void addSystemPStateFieldWithImm0_15Operands(
MCInst &Inst,
unsigned N)
const {
2126 assert(
N == 1 &&
"Invalid number of operands!");
2131 void addSysCROperands(
MCInst &Inst,
unsigned N)
const {
2132 assert(
N == 1 &&
"Invalid number of operands!");
2136 void addPrefetchOperands(
MCInst &Inst,
unsigned N)
const {
2137 assert(
N == 1 &&
"Invalid number of operands!");
2141 void addPSBHintOperands(
MCInst &Inst,
unsigned N)
const {
2142 assert(
N == 1 &&
"Invalid number of operands!");
2146 void addBTIHintOperands(
MCInst &Inst,
unsigned N)
const {
2147 assert(
N == 1 &&
"Invalid number of operands!");
2151 void addShifterOperands(
MCInst &Inst,
unsigned N)
const {
2152 assert(
N == 1 &&
"Invalid number of operands!");
2158 void addLSLImm3ShifterOperands(
MCInst &Inst,
unsigned N)
const {
2159 assert(
N == 1 &&
"Invalid number of operands!");
2160 unsigned Imm = getShiftExtendAmount();
2164 void addSyspXzrPairOperand(
MCInst &Inst,
unsigned N)
const {
2165 assert(
N == 1 &&
"Invalid number of operands!");
2173 if (
Reg != AArch64::XZR)
2179 void addExtendOperands(
MCInst &Inst,
unsigned N)
const {
2180 assert(
N == 1 &&
"Invalid number of operands!");
2187 void addExtend64Operands(
MCInst &Inst,
unsigned N)
const {
2188 assert(
N == 1 &&
"Invalid number of operands!");
2195 void addMemExtendOperands(
MCInst &Inst,
unsigned N)
const {
2196 assert(
N == 2 &&
"Invalid number of operands!");
2207 void addMemExtend8Operands(
MCInst &Inst,
unsigned N)
const {
2208 assert(
N == 2 &&
"Invalid number of operands!");
2216 void addMOVZMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
2217 assert(
N == 1 &&
"Invalid number of operands!");
2224 addExpr(Inst, getImm());
2229 void addMOVNMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
2230 assert(
N == 1 &&
"Invalid number of operands!");
2237 void addComplexRotationEvenOperands(
MCInst &Inst,
unsigned N)
const {
2238 assert(
N == 1 &&
"Invalid number of operands!");
2243 void addComplexRotationOddOperands(
MCInst &Inst,
unsigned N)
const {
2244 assert(
N == 1 &&
"Invalid number of operands!");
2251 static std::unique_ptr<AArch64Operand>
2253 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2254 Op->Tok.Data = Str.data();
2255 Op->Tok.Length = Str.size();
2256 Op->Tok.IsSuffix = IsSuffix;
2262 static std::unique_ptr<AArch64Operand>
2264 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2266 unsigned ShiftAmount = 0,
2267 unsigned HasExplicitAmount =
false) {
2268 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2269 Op->Reg.RegNum = RegNum;
2271 Op->Reg.ElementWidth = 0;
2272 Op->Reg.EqualityTy = EqTy;
2273 Op->Reg.ShiftExtend.Type = ExtTy;
2274 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2275 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2281 static std::unique_ptr<AArch64Operand>
2282 CreateVectorReg(
unsigned RegNum, RegKind Kind,
unsigned ElementWidth,
2285 unsigned ShiftAmount = 0,
2286 unsigned HasExplicitAmount =
false) {
2287 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2288 Kind == RegKind::SVEPredicateVector ||
2289 Kind == RegKind::SVEPredicateAsCounter) &&
2290 "Invalid vector kind");
2291 auto Op = CreateReg(RegNum, Kind, S,
E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2293 Op->Reg.ElementWidth = ElementWidth;
2297 static std::unique_ptr<AArch64Operand>
2298 CreateVectorList(
unsigned RegNum,
unsigned Count,
unsigned Stride,
2299 unsigned NumElements,
unsigned ElementWidth,
2301 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2302 Op->VectorList.RegNum = RegNum;
2303 Op->VectorList.Count = Count;
2304 Op->VectorList.Stride = Stride;
2305 Op->VectorList.NumElements = NumElements;
2306 Op->VectorList.ElementWidth = ElementWidth;
2307 Op->VectorList.RegisterKind = RegisterKind;
2313 static std::unique_ptr<AArch64Operand>
2315 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2316 Op->VectorIndex.Val =
Idx;
2322 static std::unique_ptr<AArch64Operand>
2324 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2325 Op->MatrixTileList.RegMask = RegMask;
2332 const unsigned ElementWidth) {
2333 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2335 {{0, AArch64::ZAB0},
2336 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2337 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2338 {{8, AArch64::ZAB0},
2339 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2340 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2341 {{16, AArch64::ZAH0},
2342 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2343 {{16, AArch64::ZAH1},
2344 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2345 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2346 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2347 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2348 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2351 if (ElementWidth == 64)
2354 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth,
Reg)];
2355 assert(!Regs.empty() &&
"Invalid tile or element width!");
2356 for (
auto OutReg : Regs)
2361 static std::unique_ptr<AArch64Operand> CreateImm(
const MCExpr *Val,
SMLoc S,
2363 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2370 static std::unique_ptr<AArch64Operand> CreateShiftedImm(
const MCExpr *Val,
2371 unsigned ShiftAmount,
2374 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2375 Op->ShiftedImm .Val = Val;
2376 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2382 static std::unique_ptr<AArch64Operand> CreateImmRange(
unsigned First,
2386 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2388 Op->ImmRange.Last =
Last;
2393 static std::unique_ptr<AArch64Operand>
2395 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2396 Op->CondCode.Code =
Code;
2402 static std::unique_ptr<AArch64Operand>
2404 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2406 Op->FPImm.IsExact = IsExact;
2412 static std::unique_ptr<AArch64Operand> CreateBarrier(
unsigned Val,
2416 bool HasnXSModifier) {
2417 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2418 Op->Barrier.Val = Val;
2419 Op->Barrier.Data = Str.data();
2420 Op->Barrier.Length = Str.size();
2421 Op->Barrier.HasnXSModifier = HasnXSModifier;
2427 static std::unique_ptr<AArch64Operand> CreateSysReg(
StringRef Str,
SMLoc S,
2432 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2433 Op->SysReg.Data = Str.data();
2434 Op->SysReg.Length = Str.size();
2435 Op->SysReg.MRSReg = MRSReg;
2436 Op->SysReg.MSRReg = MSRReg;
2437 Op->SysReg.PStateField = PStateField;
2443 static std::unique_ptr<AArch64Operand> CreateSysCR(
unsigned Val,
SMLoc S,
2445 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2446 Op->SysCRImm.Val = Val;
2452 static std::unique_ptr<AArch64Operand> CreatePrefetch(
unsigned Val,
2456 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2457 Op->Prefetch.Val = Val;
2458 Op->Barrier.Data = Str.data();
2459 Op->Barrier.Length = Str.size();
2465 static std::unique_ptr<AArch64Operand> CreatePSBHint(
unsigned Val,
2469 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2470 Op->PSBHint.Val = Val;
2471 Op->PSBHint.Data = Str.data();
2472 Op->PSBHint.Length = Str.size();
2478 static std::unique_ptr<AArch64Operand> CreateBTIHint(
unsigned Val,
2482 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2483 Op->BTIHint.Val = Val | 32;
2484 Op->BTIHint.Data = Str.data();
2485 Op->BTIHint.Length = Str.size();
2491 static std::unique_ptr<AArch64Operand>
2492 CreateMatrixRegister(
unsigned RegNum,
unsigned ElementWidth, MatrixKind Kind,
2494 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2495 Op->MatrixReg.RegNum = RegNum;
2496 Op->MatrixReg.ElementWidth = ElementWidth;
2497 Op->MatrixReg.Kind =
Kind;
2503 static std::unique_ptr<AArch64Operand>
2505 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2506 Op->SVCR.PStateField = PStateField;
2507 Op->SVCR.Data = Str.data();
2508 Op->SVCR.Length = Str.size();
2514 static std::unique_ptr<AArch64Operand>
2517 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2518 Op->ShiftExtend.Type = ShOp;
2519 Op->ShiftExtend.Amount = Val;
2520 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2532 OS <<
"<fpimm " <<
getFPImm().bitcastToAPInt().getZExtValue();
2533 if (!getFPImmIsExact())
2540 OS <<
"<barrier " <<
Name <<
">";
2542 OS <<
"<barrier invalid #" << getBarrier() <<
">";
2548 case k_ShiftedImm: {
2549 unsigned Shift = getShiftedImmShift();
2550 OS <<
"<shiftedimm ";
2551 OS << *getShiftedImmVal();
2557 OS << getFirstImmVal();
2558 OS <<
":" << getLastImmVal() <<
">";
2564 case k_VectorList: {
2565 OS <<
"<vectorlist ";
2566 unsigned Reg = getVectorListStart();
2567 for (
unsigned i = 0, e = getVectorListCount(); i !=
e; ++i)
2568 OS <<
Reg + i * getVectorListStride() <<
" ";
2573 OS <<
"<vectorindex " << getVectorIndex() <<
">";
2576 OS <<
"<sysreg: " << getSysReg() <<
'>';
2579 OS <<
"'" << getToken() <<
"'";
2582 OS <<
"c" << getSysCR();
2587 OS <<
"<prfop " <<
Name <<
">";
2589 OS <<
"<prfop invalid #" << getPrefetch() <<
">";
2593 OS << getPSBHintName();
2596 OS << getBTIHintName();
2598 case k_MatrixRegister:
2599 OS <<
"<matrix " << getMatrixReg() <<
">";
2601 case k_MatrixTileList: {
2602 OS <<
"<matrixlist ";
2603 unsigned RegMask = getMatrixTileListRegMask();
2604 unsigned MaxBits = 8;
2605 for (
unsigned I = MaxBits;
I > 0; --
I)
2606 OS << ((RegMask & (1 << (
I - 1))) >> (
I - 1));
2615 OS <<
"<register " <<
getReg() <<
">";
2616 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2621 << getShiftExtendAmount();
2622 if (!hasShiftExtendAmount())
2638 .
Case(
"v0", AArch64::Q0)
2639 .
Case(
"v1", AArch64::Q1)
2640 .
Case(
"v2", AArch64::Q2)
2641 .
Case(
"v3", AArch64::Q3)
2642 .
Case(
"v4", AArch64::Q4)
2643 .
Case(
"v5", AArch64::Q5)
2644 .
Case(
"v6", AArch64::Q6)
2645 .
Case(
"v7", AArch64::Q7)
2646 .
Case(
"v8", AArch64::Q8)
2647 .
Case(
"v9", AArch64::Q9)
2648 .
Case(
"v10", AArch64::Q10)
2649 .
Case(
"v11", AArch64::Q11)
2650 .
Case(
"v12", AArch64::Q12)
2651 .
Case(
"v13", AArch64::Q13)
2652 .
Case(
"v14", AArch64::Q14)
2653 .
Case(
"v15", AArch64::Q15)
2654 .
Case(
"v16", AArch64::Q16)
2655 .
Case(
"v17", AArch64::Q17)
2656 .
Case(
"v18", AArch64::Q18)
2657 .
Case(
"v19", AArch64::Q19)
2658 .
Case(
"v20", AArch64::Q20)
2659 .
Case(
"v21", AArch64::Q21)
2660 .
Case(
"v22", AArch64::Q22)
2661 .
Case(
"v23", AArch64::Q23)
2662 .
Case(
"v24", AArch64::Q24)
2663 .
Case(
"v25", AArch64::Q25)
2664 .
Case(
"v26", AArch64::Q26)
2665 .
Case(
"v27", AArch64::Q27)
2666 .
Case(
"v28", AArch64::Q28)
2667 .
Case(
"v29", AArch64::Q29)
2668 .
Case(
"v30", AArch64::Q30)
2669 .
Case(
"v31", AArch64::Q31)
2678 RegKind VectorKind) {
2679 std::pair<int, int> Res = {-1, -1};
2681 switch (VectorKind) {
2682 case RegKind::NeonVector:
2685 .Case(
".1d", {1, 64})
2686 .Case(
".1q", {1, 128})
2688 .Case(
".2h", {2, 16})
2689 .Case(
".2b", {2, 8})
2690 .Case(
".2s", {2, 32})
2691 .Case(
".2d", {2, 64})
2694 .Case(
".4b", {4, 8})
2695 .Case(
".4h", {4, 16})
2696 .Case(
".4s", {4, 32})
2697 .Case(
".8b", {8, 8})
2698 .Case(
".8h", {8, 16})
2699 .Case(
".16b", {16, 8})
2704 .Case(
".h", {0, 16})
2705 .Case(
".s", {0, 32})
2706 .Case(
".d", {0, 64})
2709 case RegKind::SVEPredicateAsCounter:
2710 case RegKind::SVEPredicateVector:
2711 case RegKind::SVEDataVector:
2712 case RegKind::Matrix:
2716 .Case(
".h", {0, 16})
2717 .Case(
".s", {0, 32})
2718 .Case(
".d", {0, 64})
2719 .Case(
".q", {0, 128})
2726 if (Res == std::make_pair(-1, -1))
2727 return std::nullopt;
2729 return std::optional<std::pair<int, int>>(Res);
2738 .
Case(
"z0", AArch64::Z0)
2739 .
Case(
"z1", AArch64::Z1)
2740 .
Case(
"z2", AArch64::Z2)
2741 .
Case(
"z3", AArch64::Z3)
2742 .
Case(
"z4", AArch64::Z4)
2743 .
Case(
"z5", AArch64::Z5)
2744 .
Case(
"z6", AArch64::Z6)
2745 .
Case(
"z7", AArch64::Z7)
2746 .
Case(
"z8", AArch64::Z8)
2747 .
Case(
"z9", AArch64::Z9)
2748 .
Case(
"z10", AArch64::Z10)
2749 .
Case(
"z11", AArch64::Z11)
2750 .
Case(
"z12", AArch64::Z12)
2751 .
Case(
"z13", AArch64::Z13)
2752 .
Case(
"z14", AArch64::Z14)
2753 .
Case(
"z15", AArch64::Z15)
2754 .
Case(
"z16", AArch64::Z16)
2755 .
Case(
"z17", AArch64::Z17)
2756 .
Case(
"z18", AArch64::Z18)
2757 .
Case(
"z19", AArch64::Z19)
2758 .
Case(
"z20", AArch64::Z20)
2759 .
Case(
"z21", AArch64::Z21)
2760 .
Case(
"z22", AArch64::Z22)
2761 .
Case(
"z23", AArch64::Z23)
2762 .
Case(
"z24", AArch64::Z24)
2763 .
Case(
"z25", AArch64::Z25)
2764 .
Case(
"z26", AArch64::Z26)
2765 .
Case(
"z27", AArch64::Z27)
2766 .
Case(
"z28", AArch64::Z28)
2767 .
Case(
"z29", AArch64::Z29)
2768 .
Case(
"z30", AArch64::Z30)
2769 .
Case(
"z31", AArch64::Z31)
2775 .
Case(
"p0", AArch64::P0)
2776 .
Case(
"p1", AArch64::P1)
2777 .
Case(
"p2", AArch64::P2)
2778 .
Case(
"p3", AArch64::P3)
2779 .
Case(
"p4", AArch64::P4)
2780 .
Case(
"p5", AArch64::P5)
2781 .
Case(
"p6", AArch64::P6)
2782 .
Case(
"p7", AArch64::P7)
2783 .
Case(
"p8", AArch64::P8)
2784 .
Case(
"p9", AArch64::P9)
2785 .
Case(
"p10", AArch64::P10)
2786 .
Case(
"p11", AArch64::P11)
2787 .
Case(
"p12", AArch64::P12)
2788 .
Case(
"p13", AArch64::P13)
2789 .
Case(
"p14", AArch64::P14)
2790 .
Case(
"p15", AArch64::P15)
2796 .
Case(
"pn0", AArch64::PN0)
2797 .
Case(
"pn1", AArch64::PN1)
2798 .
Case(
"pn2", AArch64::PN2)
2799 .
Case(
"pn3", AArch64::PN3)
2800 .
Case(
"pn4", AArch64::PN4)
2801 .
Case(
"pn5", AArch64::PN5)
2802 .
Case(
"pn6", AArch64::PN6)
2803 .
Case(
"pn7", AArch64::PN7)
2804 .
Case(
"pn8", AArch64::PN8)
2805 .
Case(
"pn9", AArch64::PN9)
2806 .
Case(
"pn10", AArch64::PN10)
2807 .
Case(
"pn11", AArch64::PN11)
2808 .
Case(
"pn12", AArch64::PN12)
2809 .
Case(
"pn13", AArch64::PN13)
2810 .
Case(
"pn14", AArch64::PN14)
2811 .
Case(
"pn15", AArch64::PN15)
2817 .
Case(
"za0.d", AArch64::ZAD0)
2818 .
Case(
"za1.d", AArch64::ZAD1)
2819 .
Case(
"za2.d", AArch64::ZAD2)
2820 .
Case(
"za3.d", AArch64::ZAD3)
2821 .
Case(
"za4.d", AArch64::ZAD4)
2822 .
Case(
"za5.d", AArch64::ZAD5)
2823 .
Case(
"za6.d", AArch64::ZAD6)
2824 .
Case(
"za7.d", AArch64::ZAD7)
2825 .
Case(
"za0.s", AArch64::ZAS0)
2826 .
Case(
"za1.s", AArch64::ZAS1)
2827 .
Case(
"za2.s", AArch64::ZAS2)
2828 .
Case(
"za3.s", AArch64::ZAS3)
2829 .
Case(
"za0.h", AArch64::ZAH0)
2830 .
Case(
"za1.h", AArch64::ZAH1)
2831 .
Case(
"za0.b", AArch64::ZAB0)
2837 .
Case(
"za", AArch64::ZA)
2838 .
Case(
"za0.q", AArch64::ZAQ0)
2839 .
Case(
"za1.q", AArch64::ZAQ1)
2840 .
Case(
"za2.q", AArch64::ZAQ2)
2841 .
Case(
"za3.q", AArch64::ZAQ3)
2842 .
Case(
"za4.q", AArch64::ZAQ4)
2843 .
Case(
"za5.q", AArch64::ZAQ5)
2844 .
Case(
"za6.q", AArch64::ZAQ6)
2845 .
Case(
"za7.q", AArch64::ZAQ7)
2846 .
Case(
"za8.q", AArch64::ZAQ8)
2847 .
Case(
"za9.q", AArch64::ZAQ9)
2848 .
Case(
"za10.q", AArch64::ZAQ10)
2849 .
Case(
"za11.q", AArch64::ZAQ11)
2850 .
Case(
"za12.q", AArch64::ZAQ12)
2851 .
Case(
"za13.q", AArch64::ZAQ13)
2852 .
Case(
"za14.q", AArch64::ZAQ14)
2853 .
Case(
"za15.q", AArch64::ZAQ15)
2854 .
Case(
"za0.d", AArch64::ZAD0)
2855 .
Case(
"za1.d", AArch64::ZAD1)
2856 .
Case(
"za2.d", AArch64::ZAD2)
2857 .
Case(
"za3.d", AArch64::ZAD3)
2858 .
Case(
"za4.d", AArch64::ZAD4)
2859 .
Case(
"za5.d", AArch64::ZAD5)
2860 .
Case(
"za6.d", AArch64::ZAD6)
2861 .
Case(
"za7.d", AArch64::ZAD7)
2862 .
Case(
"za0.s", AArch64::ZAS0)
2863 .
Case(
"za1.s", AArch64::ZAS1)
2864 .
Case(
"za2.s", AArch64::ZAS2)
2865 .
Case(
"za3.s", AArch64::ZAS3)
2866 .
Case(
"za0.h", AArch64::ZAH0)
2867 .
Case(
"za1.h", AArch64::ZAH1)
2868 .
Case(
"za0.b", AArch64::ZAB0)
2869 .
Case(
"za0h.q", AArch64::ZAQ0)
2870 .
Case(
"za1h.q", AArch64::ZAQ1)
2871 .
Case(
"za2h.q", AArch64::ZAQ2)
2872 .
Case(
"za3h.q", AArch64::ZAQ3)
2873 .
Case(
"za4h.q", AArch64::ZAQ4)
2874 .
Case(
"za5h.q", AArch64::ZAQ5)
2875 .
Case(
"za6h.q", AArch64::ZAQ6)
2876 .
Case(
"za7h.q", AArch64::ZAQ7)
2877 .
Case(
"za8h.q", AArch64::ZAQ8)
2878 .
Case(
"za9h.q", AArch64::ZAQ9)
2879 .
Case(
"za10h.q", AArch64::ZAQ10)
2880 .
Case(
"za11h.q", AArch64::ZAQ11)
2881 .
Case(
"za12h.q", AArch64::ZAQ12)
2882 .
Case(
"za13h.q", AArch64::ZAQ13)
2883 .
Case(
"za14h.q", AArch64::ZAQ14)
2884 .
Case(
"za15h.q", AArch64::ZAQ15)
2885 .
Case(
"za0h.d", AArch64::ZAD0)
2886 .
Case(
"za1h.d", AArch64::ZAD1)
2887 .
Case(
"za2h.d", AArch64::ZAD2)
2888 .
Case(
"za3h.d", AArch64::ZAD3)
2889 .
Case(
"za4h.d", AArch64::ZAD4)
2890 .
Case(
"za5h.d", AArch64::ZAD5)
2891 .
Case(
"za6h.d", AArch64::ZAD6)
2892 .
Case(
"za7h.d", AArch64::ZAD7)
2893 .
Case(
"za0h.s", AArch64::ZAS0)
2894 .
Case(
"za1h.s", AArch64::ZAS1)
2895 .
Case(
"za2h.s", AArch64::ZAS2)
2896 .
Case(
"za3h.s", AArch64::ZAS3)
2897 .
Case(
"za0h.h", AArch64::ZAH0)
2898 .
Case(
"za1h.h", AArch64::ZAH1)
2899 .
Case(
"za0h.b", AArch64::ZAB0)
2900 .
Case(
"za0v.q", AArch64::ZAQ0)
2901 .
Case(
"za1v.q", AArch64::ZAQ1)
2902 .
Case(
"za2v.q", AArch64::ZAQ2)
2903 .
Case(
"za3v.q", AArch64::ZAQ3)
2904 .
Case(
"za4v.q", AArch64::ZAQ4)
2905 .
Case(
"za5v.q", AArch64::ZAQ5)
2906 .
Case(
"za6v.q", AArch64::ZAQ6)
2907 .
Case(
"za7v.q", AArch64::ZAQ7)
2908 .
Case(
"za8v.q", AArch64::ZAQ8)
2909 .
Case(
"za9v.q", AArch64::ZAQ9)
2910 .
Case(
"za10v.q", AArch64::ZAQ10)
2911 .
Case(
"za11v.q", AArch64::ZAQ11)
2912 .
Case(
"za12v.q", AArch64::ZAQ12)
2913 .
Case(
"za13v.q", AArch64::ZAQ13)
2914 .
Case(
"za14v.q", AArch64::ZAQ14)
2915 .
Case(
"za15v.q", AArch64::ZAQ15)
2916 .
Case(
"za0v.d", AArch64::ZAD0)
2917 .
Case(
"za1v.d", AArch64::ZAD1)
2918 .
Case(
"za2v.d", AArch64::ZAD2)
2919 .
Case(
"za3v.d", AArch64::ZAD3)
2920 .
Case(
"za4v.d", AArch64::ZAD4)
2921 .
Case(
"za5v.d", AArch64::ZAD5)
2922 .
Case(
"za6v.d", AArch64::ZAD6)
2923 .
Case(
"za7v.d", AArch64::ZAD7)
2924 .
Case(
"za0v.s", AArch64::ZAS0)
2925 .
Case(
"za1v.s", AArch64::ZAS1)
2926 .
Case(
"za2v.s", AArch64::ZAS2)
2927 .
Case(
"za3v.s", AArch64::ZAS3)
2928 .
Case(
"za0v.h", AArch64::ZAH0)
2929 .
Case(
"za1v.h", AArch64::ZAH1)
2930 .
Case(
"za0v.b", AArch64::ZAB0)
2936 return !tryParseRegister(
Reg, StartLoc, EndLoc).isSuccess();
2941 StartLoc = getLoc();
2948unsigned AArch64AsmParser::matchRegisterNameAlias(
StringRef Name,
2950 unsigned RegNum = 0;
2952 return Kind == RegKind::SVEDataVector ? RegNum : 0;
2955 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
2958 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
2961 return Kind == RegKind::NeonVector ? RegNum : 0;
2964 return Kind == RegKind::Matrix ? RegNum : 0;
2966 if (
Name.equals_insensitive(
"zt0"))
2967 return Kind == RegKind::LookupTable ? AArch64::ZT0 : 0;
2971 return (Kind == RegKind::Scalar) ? RegNum : 0;
2976 .
Case(
"fp", AArch64::FP)
2977 .
Case(
"lr", AArch64::LR)
2978 .
Case(
"x31", AArch64::XZR)
2979 .
Case(
"w31", AArch64::WZR)
2981 return Kind == RegKind::Scalar ? RegNum : 0;
2987 if (Entry == RegisterReqs.
end())
2991 if (Kind ==
Entry->getValue().first)
2992 RegNum =
Entry->getValue().second;
2997unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
2999 case RegKind::Scalar:
3000 case RegKind::NeonVector:
3001 case RegKind::SVEDataVector:
3003 case RegKind::Matrix:
3004 case RegKind::SVEPredicateVector:
3005 case RegKind::SVEPredicateAsCounter:
3007 case RegKind::LookupTable:
3022 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3036 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3039 if (Tok[0] !=
'c' && Tok[0] !=
'C')
3040 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3044 if (BadNum || CRNum > 15)
3045 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3049 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3058 unsigned MaxVal = 63;
3064 if (getParser().parseExpression(ImmVal))
3069 return TokError(
"immediate value expected for prefetch operand");
3072 return TokError(
"prefetch operand out of range, [0," + utostr(MaxVal) +
3075 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->
getValue());
3076 Operands.push_back(AArch64Operand::CreatePrefetch(
3077 prfop, RPRFM ? RPRFM->Name :
"", S, getContext()));
3082 return TokError(
"prefetch hint expected");
3084 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.
getString());
3086 return TokError(
"prefetch hint expected");
3088 Operands.push_back(AArch64Operand::CreatePrefetch(
3089 RPRFM->Encoding, Tok.
getString(), S, getContext()));
3095template <
bool IsSVEPrefetch>
3101 if (IsSVEPrefetch) {
3102 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(
N))
3103 return std::optional<unsigned>(Res->Encoding);
3104 }
else if (
auto Res = AArch64PRFM::lookupPRFMByName(
N))
3105 return std::optional<unsigned>(Res->Encoding);
3106 return std::optional<unsigned>();
3109 auto LookupByEncoding = [](
unsigned E) {
3110 if (IsSVEPrefetch) {
3111 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(
E))
3112 return std::optional<StringRef>(Res->Name);
3113 }
else if (
auto Res = AArch64PRFM::lookupPRFMByEncoding(
E))
3114 return std::optional<StringRef>(Res->Name);
3115 return std::optional<StringRef>();
3117 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3124 if (getParser().parseExpression(ImmVal))
3129 return TokError(
"immediate value expected for prefetch operand");
3132 return TokError(
"prefetch operand out of range, [0," + utostr(MaxVal) +
3135 auto PRFM = LookupByEncoding(MCE->
getValue());
3136 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(
""),
3142 return TokError(
"prefetch hint expected");
3144 auto PRFM = LookupByName(Tok.
getString());
3146 return TokError(
"prefetch hint expected");
3148 Operands.push_back(AArch64Operand::CreatePrefetch(
3149 *PRFM, Tok.
getString(), S, getContext()));
3159 return TokError(
"invalid operand for instruction");
3161 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.
getString());
3163 return TokError(
"invalid operand for instruction");
3165 Operands.push_back(AArch64Operand::CreatePSBHint(
3166 PSB->Encoding, Tok.
getString(), S, getContext()));
3172 SMLoc StartLoc = getLoc();
3178 auto RegTok = getTok();
3179 if (!tryParseScalarRegister(RegNum).isSuccess())
3182 if (RegNum != AArch64::XZR) {
3183 getLexer().UnLex(RegTok);
3190 if (!tryParseScalarRegister(RegNum).isSuccess())
3191 return TokError(
"expected register operand");
3193 if (RegNum != AArch64::XZR)
3194 return TokError(
"xzr must be followed by xzr");
3198 Operands.push_back(AArch64Operand::CreateReg(
3199 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3209 return TokError(
"invalid operand for instruction");
3211 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.
getString());
3213 return TokError(
"invalid operand for instruction");
3215 Operands.push_back(AArch64Operand::CreateBTIHint(
3216 BTI->Encoding, Tok.
getString(), S, getContext()));
3225 const MCExpr *Expr =
nullptr;
3231 if (parseSymbolicImmVal(Expr))
3237 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3247 return Error(S,
"gotpage label reference not allowed an addend");
3257 return Error(S,
"page or gotpage label reference expected");
3265 Operands.push_back(AArch64Operand::CreateImm(Expr, S,
E, getContext()));
3274 const MCExpr *Expr =
nullptr;
3283 if (parseSymbolicImmVal(Expr))
3289 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3296 return Error(S,
"unexpected adr label");
3301 Operands.push_back(AArch64Operand::CreateImm(Expr, S,
E, getContext()));
3306template <
bool AddFPZeroAsLiteral>
3319 return TokError(
"invalid floating point immediate");
3324 if (Tok.
getIntVal() > 255 || isNegative)
3325 return TokError(
"encoded floating point value out of range");
3329 AArch64Operand::CreateFPImm(
F,
true, S, getContext()));
3332 APFloat RealVal(APFloat::IEEEdouble());
3334 RealVal.convertFromString(Tok.
getString(), APFloat::rmTowardZero);
3336 return TokError(
"invalid floating point representation");
3339 RealVal.changeSign();
3341 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3342 Operands.push_back(AArch64Operand::CreateToken(
"#0", S, getContext()));
3343 Operands.push_back(AArch64Operand::CreateToken(
".0", S, getContext()));
3345 Operands.push_back(AArch64Operand::CreateFPImm(
3346 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3371 if (parseSymbolicImmVal(Imm))
3375 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3382 if (!parseOptionalVGOperand(
Operands, VecGroup)) {
3384 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3386 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3392 !getTok().getIdentifier().equals_insensitive(
"lsl"))
3393 return Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3401 return Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3403 int64_t ShiftAmount = getTok().getIntVal();
3405 if (ShiftAmount < 0)
3406 return Error(getLoc(),
"positive shift amount required");
3410 if (ShiftAmount == 0 && Imm !=
nullptr) {
3412 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3416 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3417 getLoc(), getContext()));
3424AArch64AsmParser::parseCondCodeString(
StringRef Cond, std::string &Suggestion) {
3461 Suggestion =
"nfrst";
3468 bool invertCondCode) {
3474 std::string Suggestion;
3477 std::string Msg =
"invalid condition code";
3478 if (!Suggestion.empty())
3479 Msg +=
", did you mean " + Suggestion +
"?";
3480 return TokError(Msg);
3484 if (invertCondCode) {
3486 return TokError(
"condition codes AL and NV are invalid for this instruction");
3491 AArch64Operand::CreateCondCode(
CC, S, getLoc(), getContext()));
3500 return TokError(
"invalid operand for instruction");
3502 unsigned PStateImm = -1;
3503 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.
getString());
3506 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3507 PStateImm = SVCR->Encoding;
3510 AArch64Operand::CreateSVCR(PStateImm, Tok.
getString(), S, getContext()));
3521 if (
Name.equals_insensitive(
"za") ||
Name.starts_with_insensitive(
"za.")) {
3523 unsigned ElementWidth = 0;
3524 auto DotPosition =
Name.find(
'.');
3526 const auto &KindRes =
3530 "Expected the register to be followed by element width suffix");
3531 ElementWidth = KindRes->second;
3533 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3534 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3539 if (parseOperand(
Operands,
false,
false))
3546 unsigned Reg = matchRegisterNameAlias(
Name, RegKind::Matrix);
3550 size_t DotPosition =
Name.find(
'.');
3558 .
Case(
"h", MatrixKind::Row)
3559 .
Case(
"v", MatrixKind::Col)
3566 "Expected the register to be followed by element width suffix");
3567 unsigned ElementWidth = KindRes->second;
3571 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3572 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3577 if (parseOperand(
Operands,
false,
false))
3619 return TokError(
"expected #imm after shift specifier");
3625 AArch64Operand::CreateShiftExtend(ShOp, 0,
false, S,
E, getContext()));
3634 return Error(
E,
"expected integer shift amount");
3637 if (getParser().parseExpression(ImmVal))
3642 return Error(
E,
"expected constant '#imm' after shift specifier");
3645 Operands.push_back(AArch64Operand::CreateShiftExtend(
3646 ShOp, MCE->
getValue(),
true, S,
E, getContext()));
3654 {
"crc", {AArch64::FeatureCRC}},
3655 {
"sm4", {AArch64::FeatureSM4}},
3656 {
"sha3", {AArch64::FeatureSHA3}},
3657 {
"sha2", {AArch64::FeatureSHA2}},
3658 {
"aes", {AArch64::FeatureAES}},
3659 {
"crypto", {AArch64::FeatureCrypto}},
3660 {
"fp", {AArch64::FeatureFPARMv8}},
3661 {
"simd", {AArch64::FeatureNEON}},
3662 {
"ras", {AArch64::FeatureRAS}},
3663 {
"rasv2", {AArch64::FeatureRASv2}},
3664 {
"lse", {AArch64::FeatureLSE}},
3665 {
"predres", {AArch64::FeaturePredRes}},
3666 {
"predres2", {AArch64::FeatureSPECRES2}},
3667 {
"ccdp", {AArch64::FeatureCacheDeepPersist}},
3668 {
"mte", {AArch64::FeatureMTE}},
3669 {
"memtag", {AArch64::FeatureMTE}},
3670 {
"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3671 {
"pan", {AArch64::FeaturePAN}},
3672 {
"pan-rwv", {AArch64::FeaturePAN_RWV}},
3673 {
"ccpp", {AArch64::FeatureCCPP}},
3674 {
"rcpc", {AArch64::FeatureRCPC}},
3675 {
"rng", {AArch64::FeatureRandGen}},
3676 {
"sve", {AArch64::FeatureSVE}},
3677 {
"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3678 {
"sve2", {AArch64::FeatureSVE2}},
3679 {
"sve2-aes", {AArch64::FeatureSVE2AES}},
3680 {
"sve2-sm4", {AArch64::FeatureSVE2SM4}},
3681 {
"sve2-sha3", {AArch64::FeatureSVE2SHA3}},
3682 {
"sve2-bitperm", {AArch64::FeatureSVE2BitPerm}},
3683 {
"sve2p1", {AArch64::FeatureSVE2p1}},
3684 {
"ls64", {AArch64::FeatureLS64}},
3685 {
"xs", {AArch64::FeatureXS}},
3686 {
"pauth", {AArch64::FeaturePAuth}},
3687 {
"flagm", {AArch64::FeatureFlagM}},
3688 {
"rme", {AArch64::FeatureRME}},
3689 {
"sme", {AArch64::FeatureSME}},
3690 {
"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3691 {
"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3692 {
"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3693 {
"sme2", {AArch64::FeatureSME2}},
3694 {
"sme2p1", {AArch64::FeatureSME2p1}},
3695 {
"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3696 {
"hbc", {AArch64::FeatureHBC}},
3697 {
"mops", {AArch64::FeatureMOPS}},
3698 {
"mec", {AArch64::FeatureMEC}},
3699 {
"the", {AArch64::FeatureTHE}},
3700 {
"d128", {AArch64::FeatureD128}},
3701 {
"lse128", {AArch64::FeatureLSE128}},
3702 {
"ite", {AArch64::FeatureITE}},
3703 {
"cssc", {AArch64::FeatureCSSC}},
3704 {
"rcpc3", {AArch64::FeatureRCPC3}},
3705 {
"gcs", {AArch64::FeatureGCS}},
3706 {
"bf16", {AArch64::FeatureBF16}},
3707 {
"compnum", {AArch64::FeatureComplxNum}},
3708 {
"dotprod", {AArch64::FeatureDotProd}},
3709 {
"f32mm", {AArch64::FeatureMatMulFP32}},
3710 {
"f64mm", {AArch64::FeatureMatMulFP64}},
3711 {
"fp16", {AArch64::FeatureFullFP16}},
3712 {
"fp16fml", {AArch64::FeatureFP16FML}},
3713 {
"i8mm", {AArch64::FeatureMatMulInt8}},
3714 {
"lor", {AArch64::FeatureLOR}},
3715 {
"profile", {AArch64::FeatureSPE}},
3719 {
"rdm", {AArch64::FeatureRDM}},
3720 {
"rdma", {AArch64::FeatureRDM}},
3721 {
"sb", {AArch64::FeatureSB}},
3722 {
"ssbs", {AArch64::FeatureSSBS}},
3723 {
"tme", {AArch64::FeatureTME}},
3724 {
"fp8", {AArch64::FeatureFP8}},
3725 {
"faminmax", {AArch64::FeatureFAMINMAX}},
3726 {
"fp8fma", {AArch64::FeatureFP8FMA}},
3727 {
"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3728 {
"fp8dot2", {AArch64::FeatureFP8DOT2}},
3729 {
"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3730 {
"fp8dot4", {AArch64::FeatureFP8DOT4}},
3731 {
"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3732 {
"lut", {AArch64::FeatureLUT}},
3733 {
"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3734 {
"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3735 {
"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3736 {
"sme-fa64", {AArch64::FeatureSMEFA64}},
3737 {
"cpa", {AArch64::FeatureCPA}},
3738 {
"tlbiw", {AArch64::FeatureTLBIW}},
3742 if (FBS[AArch64::HasV8_0aOps])
3744 if (FBS[AArch64::HasV8_1aOps])
3746 else if (FBS[AArch64::HasV8_2aOps])
3748 else if (FBS[AArch64::HasV8_3aOps])
3750 else if (FBS[AArch64::HasV8_4aOps])
3752 else if (FBS[AArch64::HasV8_5aOps])
3754 else if (FBS[AArch64::HasV8_6aOps])
3756 else if (FBS[AArch64::HasV8_7aOps])
3758 else if (FBS[AArch64::HasV8_8aOps])
3760 else if (FBS[AArch64::HasV8_9aOps])
3762 else if (FBS[AArch64::HasV9_0aOps])
3764 else if (FBS[AArch64::HasV9_1aOps])
3766 else if (FBS[AArch64::HasV9_2aOps])
3768 else if (FBS[AArch64::HasV9_3aOps])
3770 else if (FBS[AArch64::HasV9_4aOps])
3772 else if (FBS[AArch64::HasV9_5aOps])
3774 else if (FBS[AArch64::HasV8_0rOps])
3783 Str += !ExtMatches.
empty() ? llvm::join(ExtMatches,
", ") :
"(unknown)";
3790 const uint16_t Cm = (Encoding & 0x78) >> 3;
3791 const uint16_t Cn = (Encoding & 0x780) >> 7;
3792 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3797 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3799 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3801 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3804 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3811 if (
Name.contains(
'.'))
3812 return TokError(
"invalid operand");
3815 Operands.push_back(AArch64Operand::CreateToken(
"sys", NameLoc, getContext()));
3821 if (Mnemonic ==
"ic") {
3824 return TokError(
"invalid operand for IC instruction");
3825 else if (!IC->
haveFeatures(getSTI().getFeatureBits())) {
3826 std::string Str(
"IC " + std::string(IC->
Name) +
" requires: ");
3828 return TokError(Str);
3831 }
else if (Mnemonic ==
"dc") {
3834 return TokError(
"invalid operand for DC instruction");
3835 else if (!DC->
haveFeatures(getSTI().getFeatureBits())) {
3836 std::string Str(
"DC " + std::string(DC->
Name) +
" requires: ");
3838 return TokError(Str);
3841 }
else if (Mnemonic ==
"at") {
3844 return TokError(
"invalid operand for AT instruction");
3845 else if (!AT->
haveFeatures(getSTI().getFeatureBits())) {
3846 std::string Str(
"AT " + std::string(AT->
Name) +
" requires: ");
3848 return TokError(Str);
3851 }
else if (Mnemonic ==
"tlbi") {
3854 return TokError(
"invalid operand for TLBI instruction");
3855 else if (!TLBI->
haveFeatures(getSTI().getFeatureBits())) {
3856 std::string Str(
"TLBI " + std::string(TLBI->
Name) +
" requires: ");
3858 return TokError(Str);
3861 }
else if (Mnemonic ==
"cfp" || Mnemonic ==
"dvp" || Mnemonic ==
"cpp" || Mnemonic ==
"cosp") {
3863 if (
Op.lower() !=
"rctx")
3864 return TokError(
"invalid operand for prediction restriction instruction");
3866 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3867 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3868 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3870 if (Mnemonic ==
"cosp" && !hasSpecres2)
3871 return TokError(
"COSP requires: predres2");
3873 return TokError(Mnemonic.
upper() +
"RCTX requires: predres");
3875 uint16_t PRCTX_Op2 = Mnemonic ==
"cfp" ? 0b100
3876 : Mnemonic ==
"dvp" ? 0b101
3877 : Mnemonic ==
"cosp" ? 0b110
3878 : Mnemonic ==
"cpp" ? 0b111
3881 "Invalid mnemonic for prediction restriction instruction");
3882 const auto SYS_3_7_3 = 0b01101110011;
3883 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3885 createSysAlias(Encoding,
Operands, S);
3890 bool ExpectRegister = !
Op.contains_insensitive(
"all");
3891 bool HasRegister =
false;
3896 return TokError(
"expected register operand");
3900 if (ExpectRegister && !HasRegister)
3901 return TokError(
"specified " + Mnemonic +
" op requires a register");
3902 else if (!ExpectRegister && HasRegister)
3903 return TokError(
"specified " + Mnemonic +
" op does not use a register");
3915 if (
Name.contains(
'.'))
3916 return TokError(
"invalid operand");
3920 AArch64Operand::CreateToken(
"sysp", NameLoc, getContext()));
3926 if (Mnemonic ==
"tlbip") {
3927 bool HasnXSQualifier =
Op.ends_with_insensitive(
"nXS");
3928 if (HasnXSQualifier) {
3929 Op =
Op.drop_back(3);
3933 return TokError(
"invalid operand for TLBIP instruction");
3935 TLBIorig->
Name, TLBIorig->
Encoding | (HasnXSQualifier ? (1 << 7) : 0),
3942 std::string(TLBI.
Name) + (HasnXSQualifier ?
"nXS" :
"");
3943 std::string Str(
"TLBIP " +
Name +
" requires: ");
3945 return TokError(Str);
3956 return TokError(
"expected register identifier");
3961 return TokError(
"specified " + Mnemonic +
3962 " op requires a pair of registers");
3975 return TokError(
"'csync' operand expected");
3979 SMLoc ExprLoc = getLoc();
3981 if (getParser().parseExpression(ImmVal))
3985 return Error(ExprLoc,
"immediate value expected for barrier operand");
3987 if (Mnemonic ==
"dsb" &&
Value > 15) {
3994 if (Value < 0 || Value > 15)
3995 return Error(ExprLoc,
"barrier operand out of range");
3996 auto DB = AArch64DB::lookupDBByEncoding(
Value);
3997 Operands.push_back(AArch64Operand::CreateBarrier(
Value, DB ?
DB->Name :
"",
3998 ExprLoc, getContext(),
4004 return TokError(
"invalid operand for instruction");
4007 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4008 auto DB = AArch64DB::lookupDBByName(Operand);
4010 if (Mnemonic ==
"isb" && (!DB ||
DB->Encoding != AArch64DB::sy))
4011 return TokError(
"'sy' or #imm operand expected");
4013 if (Mnemonic ==
"tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4014 return TokError(
"'csync' operand expected");
4016 if (Mnemonic ==
"dsb") {
4021 return TokError(
"invalid barrier option name");
4024 Operands.push_back(AArch64Operand::CreateBarrier(
4025 DB ?
DB->Encoding : TSB->Encoding, Tok.
getString(), getLoc(),
4026 getContext(),
false ));
4036 assert(Mnemonic ==
"dsb" &&
"Instruction does not accept nXS operands");
4037 if (Mnemonic !=
"dsb")
4043 SMLoc ExprLoc = getLoc();
4044 if (getParser().parseExpression(ImmVal))
4048 return Error(ExprLoc,
"immediate value expected for barrier operand");
4053 return Error(ExprLoc,
"barrier operand out of range");
4054 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(
Value);
4055 Operands.push_back(AArch64Operand::CreateBarrier(
DB->Encoding,
DB->Name,
4056 ExprLoc, getContext(),
4062 return TokError(
"invalid operand for instruction");
4065 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4068 return TokError(
"invalid barrier option name");
4071 AArch64Operand::CreateBarrier(
DB->Encoding, Tok.
getString(), getLoc(),
4072 getContext(),
true ));
4084 if (AArch64SVCR::lookupSVCRByName(Tok.
getString()))
4089 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4090 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4091 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4095 unsigned PStateImm = -1;
4096 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.
getString());
4097 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4098 PStateImm = PState15->Encoding;
4100 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.
getString());
4101 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4102 PStateImm = PState1->Encoding;
4106 AArch64Operand::CreateSysReg(Tok.
getString(), getLoc(), MRSReg, MSRReg,
4107 PStateImm, getContext()));
4122 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4130 unsigned ElementWidth = KindRes->second;
4132 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4133 S, getLoc(), getContext()));
4138 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4140 return tryParseVectorIndex(
Operands).isFailure();
4144 SMLoc SIdx = getLoc();
4147 if (getParser().parseExpression(ImmVal))
4151 return TokError(
"immediate value expected for vector index");
4158 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->
getValue(), SIdx,
4171 RegKind MatchKind) {
4180 size_t Start = 0, Next =
Name.find(
'.');
4182 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4188 return TokError(
"invalid vector kind qualifier");
4199ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4202 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(
Operands);
4204 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(
Operands);
4209template <RegKind RK>
4213 const SMLoc S = getLoc();
4216 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4224 unsigned ElementWidth = KindRes->second;
4225 Operands.push_back(AArch64Operand::CreateVectorReg(
4226 RegNum, RK, ElementWidth, S,
4227 getLoc(), getContext()));
4230 if (RK == RegKind::SVEPredicateAsCounter) {
4237 if (parseOperand(
Operands,
false,
false))
4248 return Error(S,
"not expecting size suffix");
4251 Operands.push_back(AArch64Operand::CreateToken(
"/", getLoc(), getContext()));
4256 auto Pred = getTok().getString().lower();
4257 if (RK == RegKind::SVEPredicateAsCounter && Pred !=
"z")
4258 return Error(getLoc(),
"expecting 'z' predication");
4260 if (RK == RegKind::SVEPredicateVector && Pred !=
"z" && Pred !=
"m")
4261 return Error(getLoc(),
"expecting 'm' or 'z' predication");
4264 const char *ZM = Pred ==
"z" ?
"z" :
"m";
4265 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4274 if (!tryParseNeonVectorRegister(
Operands))
4277 if (tryParseZTOperand(
Operands).isSuccess())
4281 if (tryParseGPROperand<false>(
Operands).isSuccess())
4287bool AArch64AsmParser::parseSymbolicImmVal(
const MCExpr *&ImmVal) {
4288 bool HasELFModifier =
false;
4292 HasELFModifier =
true;
4295 return TokError(
"expect relocation specifier in operand after ':'");
4297 std::string LowerCase = getTok().getIdentifier().lower();
4348 return TokError(
"expect relocation specifier in operand after ':'");
4352 if (parseToken(
AsmToken::Colon,
"expect ':' after relocation specifier"))
4356 if (getParser().parseExpression(ImmVal))
4369 auto ParseMatrixTile = [
this](
unsigned &
Reg,
4372 size_t DotPosition =
Name.find(
'.');
4381 const std::optional<std::pair<int, int>> &KindRes =
4385 "Expected the register to be followed by element width suffix");
4386 ElementWidth = KindRes->second;
4393 auto LCurly = getTok();
4398 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4399 0, S, getLoc(), getContext()));
4404 if (getTok().getString().equals_insensitive(
"za")) {
4410 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4411 0xFF, S, getLoc(), getContext()));
4415 SMLoc TileLoc = getLoc();
4417 unsigned FirstReg, ElementWidth;
4418 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4419 if (!ParseRes.isSuccess()) {
4420 getLexer().UnLex(LCurly);
4426 unsigned PrevReg = FirstReg;
4429 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4432 SeenRegs.
insert(FirstReg);
4436 unsigned Reg, NextElementWidth;
4437 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4438 if (!ParseRes.isSuccess())
4442 if (ElementWidth != NextElementWidth)
4443 return Error(TileLoc,
"mismatched register size suffix");
4446 Warning(TileLoc,
"tile list not in ascending order");
4449 Warning(TileLoc,
"duplicate tile in list");
4452 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4461 unsigned RegMask = 0;
4462 for (
auto Reg : DRegs)
4466 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4471template <RegKind VectorKind>
4481 auto RegTok = getTok();
4482 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4483 if (ParseRes.isSuccess()) {
4490 RegTok.getString().equals_insensitive(
"zt0"))
4494 (ParseRes.isNoMatch() && NoMatchIsError &&
4495 !RegTok.getString().starts_with_insensitive(
"za")))
4496 return Error(Loc,
"vector register expected");
4501 int NumRegs = getNumRegsForRegKind(VectorKind);
4503 auto LCurly = getTok();
4508 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4512 if (ParseRes.isNoMatch())
4515 if (!ParseRes.isSuccess())
4518 int64_t PrevReg = FirstReg;
4523 SMLoc Loc = getLoc();
4527 ParseRes = ParseVector(Reg, NextKind, getLoc(),
true);
4528 if (!ParseRes.isSuccess())
4532 if (Kind != NextKind)
4533 return Error(Loc,
"mismatched register size suffix");
4536 (PrevReg <
Reg) ? (Reg - PrevReg) : (
Reg + NumRegs - PrevReg);
4538 if (Space == 0 || Space > 3)
4539 return Error(Loc,
"invalid number of vectors");
4544 bool HasCalculatedStride =
false;
4546 SMLoc Loc = getLoc();
4549 ParseRes = ParseVector(Reg, NextKind, getLoc(),
true);
4550 if (!ParseRes.isSuccess())
4554 if (Kind != NextKind)
4555 return Error(Loc,
"mismatched register size suffix");
4557 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4558 unsigned PrevRegVal =
4559 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4560 if (!HasCalculatedStride) {
4561 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4562 : (RegVal + NumRegs - PrevRegVal);
4563 HasCalculatedStride =
true;
4567 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4568 return Error(Loc,
"registers must have the same sequential stride");
4579 return Error(S,
"invalid number of vectors");
4581 unsigned NumElements = 0;
4582 unsigned ElementWidth = 0;
4583 if (!
Kind.empty()) {
4585 std::tie(NumElements, ElementWidth) = *VK;
4588 Operands.push_back(AArch64Operand::CreateVectorList(
4589 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4590 getLoc(), getContext()));
4597 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(
Operands,
true);
4598 if (!ParseRes.isSuccess())
4601 return tryParseVectorIndex(
Operands).isFailure();
4605 SMLoc StartLoc = getLoc();
4613 Operands.push_back(AArch64Operand::CreateReg(
4614 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4621 return Error(getLoc(),
"index must be absent or #0");
4624 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4625 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4626 return Error(getLoc(),
"index must be absent or #0");
4628 Operands.push_back(AArch64Operand::CreateReg(
4629 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4634 SMLoc StartLoc = getLoc();
4638 unsigned RegNum = matchRegisterNameAlias(
Name, RegKind::LookupTable);
4643 Operands.push_back(AArch64Operand::CreateReg(
4644 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4650 AArch64Operand::CreateToken(
"[", getLoc(), getContext()));
4652 if (getParser().parseExpression(ImmVal))
4656 return TokError(
"immediate value expected for vector index");
4657 Operands.push_back(AArch64Operand::CreateImm(
4659 getLoc(), getContext()));
4661 if (parseOptionalMulOperand(
Operands))
4666 AArch64Operand::CreateToken(
"]", getLoc(), getContext()));
4671template <
bool ParseShiftExtend, RegConstra
intEqualityTy EqTy>
4673 SMLoc StartLoc = getLoc();
4682 Operands.push_back(AArch64Operand::CreateReg(
4683 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4692 Res = tryParseOptionalShiftExtend(ExtOpnd);
4696 auto Ext =
static_cast<AArch64Operand*
>(ExtOpnd.
back().get());
4697 Operands.push_back(AArch64Operand::CreateReg(
4698 RegNum, RegKind::Scalar, StartLoc,
Ext->getEndLoc(), getContext(), EqTy,
4699 Ext->getShiftExtendType(),
Ext->getShiftExtendAmount(),
4700 Ext->hasShiftExtendAmount()));
4714 if (!getTok().getString().equals_insensitive(
"mul") ||
4715 !(NextIsVL || NextIsHash))
4719 AArch64Operand::CreateToken(
"mul", getLoc(), getContext()));
4724 AArch64Operand::CreateToken(
"vl", getLoc(), getContext()));
4736 if (
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4737 Operands.push_back(AArch64Operand::CreateImm(
4744 return Error(getLoc(),
"expected 'vl' or '#<imm>'");
4750 auto Tok = Parser.
getTok();
4755 .
Case(
"vgx2",
"vgx2")
4756 .
Case(
"vgx4",
"vgx4")
4768 auto Tok = getTok();
4778 AArch64Operand::CreateToken(Keyword, Tok.
getLoc(), getContext()));
4787 bool invertCondCode) {
4791 MatchOperandParserImpl(
Operands, Mnemonic,
true);
4805 auto parseOptionalShiftExtend = [&](
AsmToken SavedTok) {
4810 getLexer().UnLex(SavedTok);
4814 switch (getLexer().getKind()) {
4818 if (parseSymbolicImmVal(Expr))
4819 return Error(S,
"invalid operand");
4822 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4823 return parseOptionalShiftExtend(getTok());
4827 AArch64Operand::CreateToken(
"[", getLoc(), getContext()));
4832 return parseOperand(
Operands,
false,
false);
4835 if (!parseNeonVectorList(
Operands))
4839 AArch64Operand::CreateToken(
"{", getLoc(), getContext()));
4844 return parseOperand(
Operands,
false,
false);
4849 if (!parseOptionalVGOperand(
Operands, VecGroup)) {
4851 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4856 return parseCondCode(
Operands, invertCondCode);
4870 Res = tryParseOptionalShiftExtend(
Operands);
4873 getLexer().UnLex(SavedTok);
4880 if (!parseOptionalMulOperand(
Operands))
4885 if (Mnemonic ==
"brb" || Mnemonic ==
"smstart" || Mnemonic ==
"smstop" ||
4887 return parseKeywordOperand(
Operands);
4893 if (getParser().parseExpression(IdVal))
4896 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
4908 bool isNegative =
false;