72 SVEPredicateAsCounter,
78enum class MatrixKind { Array, Tile, Row, Col };
80enum RegConstraintEqualityTy {
91 StringMap<std::pair<RegKind, MCRegister>> RegisterReqs;
95 static PrefixInfo CreateFromInst(
const MCInst &Inst, uint64_t TSFlags) {
98 case AArch64::MOVPRFX_ZZ:
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
110 "No destructive element size set for movprfx");
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
122 "No destructive element size set for movprfx");
133 PrefixInfo() =
default;
134 bool isActive()
const {
return Active; }
136 unsigned getElementSize()
const {
140 MCRegister getDstReg()
const {
return Dst; }
141 MCRegister getPgReg()
const {
148 bool Predicated =
false;
149 unsigned ElementSize;
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &
>(TS);
159 SMLoc getLoc()
const {
return getParser().getTok().getLoc(); }
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc,
OperandVector &Operands);
162 bool parseSyslAlias(StringRef Name, SMLoc NameLoc,
OperandVector &Operands);
163 bool parseSyspAlias(StringRef Name, SMLoc NameLoc,
OperandVector &Operands);
164 void createSysAlias(uint16_t Encoding,
OperandVector &Operands, SMLoc S);
166 std::string &Suggestion);
167 bool parseCondCode(
OperandVector &Operands,
bool invertCondCode);
168 MCRegister matchRegisterNameAlias(StringRef Name, RegKind Kind);
170 bool parseSymbolicImmVal(
const MCExpr *&ImmVal);
173 bool parseOptionalVGOperand(
OperandVector &Operands, StringRef &VecGroup);
176 bool invertCondCode);
177 bool parseImmExpr(int64_t &Out);
179 bool parseRegisterInRange(
unsigned &Out,
unsigned Base,
unsigned First,
182 bool showMatchError(SMLoc Loc,
unsigned ErrCode, uint64_t ErrorInfo,
185 bool parseDataExpr(
const MCExpr *&Res)
override;
186 bool parseAuthExpr(
const MCExpr *&Res, SMLoc &EndLoc);
188 bool parseDirectiveArch(SMLoc L);
189 bool parseDirectiveArchExtension(SMLoc L);
190 bool parseDirectiveCPU(SMLoc L);
191 bool parseDirectiveInst(SMLoc L);
193 bool parseDirectiveTLSDescCall(SMLoc L);
195 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
196 bool parseDirectiveLtorg(SMLoc L);
198 bool parseDirectiveReq(StringRef Name, SMLoc L);
199 bool parseDirectiveUnreq(SMLoc L);
200 bool parseDirectiveCFINegateRAState();
201 bool parseDirectiveCFINegateRAStateWithPC();
202 bool parseDirectiveCFIBKeyFrame();
203 bool parseDirectiveCFIMTETaggedFrame();
205 bool parseDirectiveVariantPCS(SMLoc L);
207 bool parseDirectiveSEHAllocStack(SMLoc L);
208 bool parseDirectiveSEHPrologEnd(SMLoc L);
209 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
210 bool parseDirectiveSEHSaveFPLR(SMLoc L);
211 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
212 bool parseDirectiveSEHSaveReg(SMLoc L);
213 bool parseDirectiveSEHSaveRegX(SMLoc L);
214 bool parseDirectiveSEHSaveRegP(SMLoc L);
215 bool parseDirectiveSEHSaveRegPX(SMLoc L);
216 bool parseDirectiveSEHSaveLRPair(SMLoc L);
217 bool parseDirectiveSEHSaveFReg(SMLoc L);
218 bool parseDirectiveSEHSaveFRegX(SMLoc L);
219 bool parseDirectiveSEHSaveFRegP(SMLoc L);
220 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
221 bool parseDirectiveSEHSetFP(SMLoc L);
222 bool parseDirectiveSEHAddFP(SMLoc L);
223 bool parseDirectiveSEHNop(SMLoc L);
224 bool parseDirectiveSEHSaveNext(SMLoc L);
225 bool parseDirectiveSEHEpilogStart(SMLoc L);
226 bool parseDirectiveSEHEpilogEnd(SMLoc L);
227 bool parseDirectiveSEHTrapFrame(SMLoc L);
228 bool parseDirectiveSEHMachineFrame(SMLoc L);
229 bool parseDirectiveSEHContext(SMLoc L);
230 bool parseDirectiveSEHECContext(SMLoc L);
231 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
232 bool parseDirectiveSEHPACSignLR(SMLoc L);
233 bool parseDirectiveSEHSaveAnyReg(SMLoc L,
bool Paired,
bool Writeback);
234 bool parseDirectiveSEHAllocZ(SMLoc L);
235 bool parseDirectiveSEHSaveZReg(SMLoc L);
236 bool parseDirectiveSEHSavePReg(SMLoc L);
237 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
238 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
240 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
241 SmallVectorImpl<SMLoc> &Loc);
242 unsigned getNumRegsForRegKind(RegKind K);
243 bool matchAndEmitInstruction(SMLoc IDLoc,
unsigned &Opcode,
246 bool MatchingInlineAsm)
override;
250#define GET_ASSEMBLER_HEADER
251#include "AArch64GenAsmMatcher.inc"
265 template <
bool IsSVEPrefetch = false>
274 template <
bool AddFPZeroAsLiteral>
282 template <
bool ParseShiftExtend,
283 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
286 template <
bool ParseShiftExtend,
bool ParseSuffix>
288 template <RegKind RK>
291 tryParseSVEPredicateOrPredicateAsCounterVector(
OperandVector &Operands);
292 template <RegKind VectorKind>
294 bool ExpectMatch =
false);
304 enum AArch64MatchResultTy {
305 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
306#define GET_OPERAND_DIAGNOSTIC_TYPES
307#include "AArch64GenAsmMatcher.inc"
310 bool IsWindowsArm64EC;
312 AArch64AsmParser(
const MCSubtargetInfo &STI, MCAsmParser &Parser,
313 const MCInstrInfo &MII,
const MCTargetOptions &
Options)
314 : MCTargetAsmParser(
Options, STI, MII) {
318 MCStreamer &S = getParser().getStreamer();
320 new AArch64TargetStreamer(S);
332 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
335 bool areEqualRegs(
const MCParsedAsmOperand &Op1,
336 const MCParsedAsmOperand &Op2)
const override;
337 bool parseInstruction(ParseInstructionInfo &
Info, StringRef Name,
339 bool parseRegister(MCRegister &
Reg, SMLoc &StartLoc, SMLoc &EndLoc)
override;
340 ParseStatus tryParseRegister(MCRegister &
Reg, SMLoc &StartLoc,
341 SMLoc &EndLoc)
override;
342 bool ParseDirective(AsmToken DirectiveID)
override;
343 unsigned validateTargetOperandClass(MCParsedAsmOperand &
Op,
344 unsigned Kind)
override;
380 SMLoc StartLoc, EndLoc;
389 struct ShiftExtendOp {
392 bool HasExplicitAmount;
402 RegConstraintEqualityTy EqualityTy;
418 ShiftExtendOp ShiftExtend;
423 unsigned ElementWidth;
427 struct MatrixTileListOp {
428 unsigned RegMask = 0;
431 struct VectorListOp {
435 unsigned NumElements;
436 unsigned ElementWidth;
437 RegKind RegisterKind;
440 struct VectorIndexOp {
448 struct ShiftedImmOp {
450 unsigned ShiftAmount;
479 uint32_t PStateField;
507 struct CMHPriorityHintOp {
512 struct TIndexHintOp {
521 unsigned PStateField;
527 struct MatrixRegOp MatrixReg;
528 struct MatrixTileListOp MatrixTileList;
529 struct VectorListOp VectorList;
530 struct VectorIndexOp VectorIndex;
532 struct ShiftedImmOp ShiftedImm;
533 struct ImmRangeOp ImmRange;
535 struct FPImmOp FPImm;
537 struct SysRegOp SysReg;
538 struct SysCRImmOp SysCRImm;
540 struct PSBHintOp PSBHint;
541 struct PHintOp PHint;
542 struct BTIHintOp BTIHint;
543 struct CMHPriorityHintOp CMHPriorityHint;
544 struct TIndexHintOp TIndexHint;
545 struct ShiftExtendOp ShiftExtend;
554 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(
K), Ctx(Ctx) {}
556 AArch64Operand(
const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(
o.Ctx) {
558 StartLoc =
o.StartLoc;
568 ShiftedImm =
o.ShiftedImm;
571 ImmRange =
o.ImmRange;
585 case k_MatrixRegister:
586 MatrixReg =
o.MatrixReg;
588 case k_MatrixTileList:
589 MatrixTileList =
o.MatrixTileList;
592 VectorList =
o.VectorList;
595 VectorIndex =
o.VectorIndex;
601 SysCRImm =
o.SysCRImm;
615 case k_CMHPriorityHint:
616 CMHPriorityHint =
o.CMHPriorityHint;
619 TIndexHint =
o.TIndexHint;
622 ShiftExtend =
o.ShiftExtend;
631 SMLoc getStartLoc()
const override {
return StartLoc; }
633 SMLoc getEndLoc()
const override {
return EndLoc; }
636 assert(Kind == k_Token &&
"Invalid access!");
637 return StringRef(Tok.Data, Tok.Length);
640 bool isTokenSuffix()
const {
641 assert(Kind == k_Token &&
"Invalid access!");
645 const MCExpr *
getImm()
const {
646 assert(Kind == k_Immediate &&
"Invalid access!");
650 const MCExpr *getShiftedImmVal()
const {
651 assert(Kind == k_ShiftedImm &&
"Invalid access!");
652 return ShiftedImm.Val;
655 unsigned getShiftedImmShift()
const {
656 assert(Kind == k_ShiftedImm &&
"Invalid access!");
657 return ShiftedImm.ShiftAmount;
660 unsigned getFirstImmVal()
const {
661 assert(Kind == k_ImmRange &&
"Invalid access!");
662 return ImmRange.First;
665 unsigned getLastImmVal()
const {
666 assert(Kind == k_ImmRange &&
"Invalid access!");
667 return ImmRange.Last;
671 assert(Kind == k_CondCode &&
"Invalid access!");
676 assert (Kind == k_FPImm &&
"Invalid access!");
677 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val,
true));
680 bool getFPImmIsExact()
const {
681 assert (Kind == k_FPImm &&
"Invalid access!");
682 return FPImm.IsExact;
685 unsigned getBarrier()
const {
686 assert(Kind == k_Barrier &&
"Invalid access!");
690 StringRef getBarrierName()
const {
691 assert(Kind == k_Barrier &&
"Invalid access!");
695 bool getBarriernXSModifier()
const {
696 assert(Kind == k_Barrier &&
"Invalid access!");
700 MCRegister
getReg()
const override {
701 assert(Kind == k_Register &&
"Invalid access!");
705 MCRegister getMatrixReg()
const {
706 assert(Kind == k_MatrixRegister &&
"Invalid access!");
707 return MatrixReg.Reg;
710 unsigned getMatrixElementWidth()
const {
711 assert(Kind == k_MatrixRegister &&
"Invalid access!");
712 return MatrixReg.ElementWidth;
715 MatrixKind getMatrixKind()
const {
716 assert(Kind == k_MatrixRegister &&
"Invalid access!");
717 return MatrixReg.Kind;
720 unsigned getMatrixTileListRegMask()
const {
721 assert(isMatrixTileList() &&
"Invalid access!");
722 return MatrixTileList.RegMask;
725 RegConstraintEqualityTy getRegEqualityTy()
const {
726 assert(Kind == k_Register &&
"Invalid access!");
727 return Reg.EqualityTy;
730 MCRegister getVectorListStart()
const {
731 assert(Kind == k_VectorList &&
"Invalid access!");
732 return VectorList.Reg;
735 unsigned getVectorListCount()
const {
736 assert(Kind == k_VectorList &&
"Invalid access!");
737 return VectorList.Count;
740 unsigned getVectorListStride()
const {
741 assert(Kind == k_VectorList &&
"Invalid access!");
742 return VectorList.Stride;
745 int getVectorIndex()
const {
746 assert(Kind == k_VectorIndex &&
"Invalid access!");
747 return VectorIndex.Val;
750 StringRef getSysReg()
const {
751 assert(Kind == k_SysReg &&
"Invalid access!");
752 return StringRef(SysReg.Data, SysReg.Length);
755 unsigned getSysCR()
const {
756 assert(Kind == k_SysCR &&
"Invalid access!");
760 unsigned getPrefetch()
const {
761 assert(Kind == k_Prefetch &&
"Invalid access!");
765 unsigned getPSBHint()
const {
766 assert(Kind == k_PSBHint &&
"Invalid access!");
770 unsigned getPHint()
const {
771 assert(Kind == k_PHint &&
"Invalid access!");
775 StringRef getPSBHintName()
const {
776 assert(Kind == k_PSBHint &&
"Invalid access!");
777 return StringRef(PSBHint.Data, PSBHint.Length);
780 StringRef getPHintName()
const {
781 assert(Kind == k_PHint &&
"Invalid access!");
782 return StringRef(PHint.Data, PHint.Length);
785 unsigned getBTIHint()
const {
786 assert(Kind == k_BTIHint &&
"Invalid access!");
790 StringRef getBTIHintName()
const {
791 assert(Kind == k_BTIHint &&
"Invalid access!");
792 return StringRef(BTIHint.Data, BTIHint.Length);
795 unsigned getCMHPriorityHint()
const {
796 assert(Kind == k_CMHPriorityHint &&
"Invalid access!");
797 return CMHPriorityHint.Val;
800 StringRef getCMHPriorityHintName()
const {
801 assert(Kind == k_CMHPriorityHint &&
"Invalid access!");
802 return StringRef(CMHPriorityHint.Data, CMHPriorityHint.Length);
805 unsigned getTIndexHint()
const {
806 assert(Kind == k_TIndexHint &&
"Invalid access!");
807 return TIndexHint.Val;
810 StringRef getTIndexHintName()
const {
811 assert(Kind == k_TIndexHint &&
"Invalid access!");
812 return StringRef(TIndexHint.Data, TIndexHint.Length);
815 StringRef getSVCR()
const {
816 assert(Kind == k_SVCR &&
"Invalid access!");
817 return StringRef(SVCR.Data, SVCR.Length);
820 StringRef getPrefetchName()
const {
821 assert(Kind == k_Prefetch &&
"Invalid access!");
826 if (Kind == k_ShiftExtend)
827 return ShiftExtend.Type;
828 if (Kind == k_Register)
829 return Reg.ShiftExtend.Type;
833 unsigned getShiftExtendAmount()
const {
834 if (Kind == k_ShiftExtend)
835 return ShiftExtend.Amount;
836 if (Kind == k_Register)
837 return Reg.ShiftExtend.Amount;
841 bool hasShiftExtendAmount()
const {
842 if (Kind == k_ShiftExtend)
843 return ShiftExtend.HasExplicitAmount;
844 if (Kind == k_Register)
845 return Reg.ShiftExtend.HasExplicitAmount;
849 bool isImm()
const override {
return Kind == k_Immediate; }
850 bool isMem()
const override {
return false; }
852 bool isUImm6()
const {
859 return (Val >= 0 && Val < 64);
862 template <
int W
idth>
bool isSImm()
const {
863 return bool(isSImmScaled<Width, 1>());
866 template <
int Bits,
int Scale> DiagnosticPredicate isSImmScaled()
const {
867 return isImmScaled<Bits, Scale>(
true);
870 template <
int Bits,
int Scale,
int Offset = 0,
bool IsRange = false>
871 DiagnosticPredicate isUImmScaled()
const {
872 if (IsRange && isImmRange() &&
873 (getLastImmVal() != getFirstImmVal() +
Offset))
876 return isImmScaled<Bits, Scale, IsRange>(
false);
879 template <
int Bits,
int Scale,
bool IsRange = false>
880 DiagnosticPredicate isImmScaled(
bool Signed)
const {
881 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
882 (isImmRange() && !IsRange))
887 Val = getFirstImmVal();
895 int64_t MinVal, MaxVal;
897 int64_t Shift =
Bits - 1;
898 MinVal = (int64_t(1) << Shift) * -Scale;
899 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
902 MaxVal = ((int64_t(1) <<
Bits) - 1) * Scale;
905 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
911 DiagnosticPredicate isSVEPattern()
const {
918 if (Val >= 0 && Val < 32)
923 DiagnosticPredicate isSVEVecLenSpecifier()
const {
930 if (Val >= 0 && Val <= 1)
935 bool isSymbolicUImm12Offset(
const MCExpr *Expr)
const {
939 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
968 template <
int Scale>
bool isUImm12Offset()
const {
974 return isSymbolicUImm12Offset(
getImm());
977 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
980 template <
int N,
int M>
981 bool isImmInRange()
const {
988 return (Val >=
N && Val <= M);
993 template <
typename T>
994 bool isLogicalImm()
const {
1003 uint64_t
Upper = UINT64_C(-1) << (
sizeof(
T) * 4) << (
sizeof(
T) * 4);
1011 bool isShiftedImm()
const {
return Kind == k_ShiftedImm; }
1013 bool isImmRange()
const {
return Kind == k_ImmRange; }
1018 template <
unsigned W
idth>
1019 std::optional<std::pair<int64_t, unsigned>> getShiftedVal()
const {
1020 if (isShiftedImm() && Width == getShiftedImmShift())
1022 return std::make_pair(
CE->getValue(), Width);
1026 int64_t Val =
CE->getValue();
1027 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
1028 return std::make_pair(Val >> Width, Width);
1030 return std::make_pair(Val, 0u);
1036 bool isAddSubImm()
const {
1037 if (!isShiftedImm() && !isImm())
1043 if (isShiftedImm()) {
1044 unsigned Shift = ShiftedImm.ShiftAmount;
1045 Expr = ShiftedImm.Val;
1046 if (Shift != 0 && Shift != 12)
1055 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1071 if (
auto ShiftedVal = getShiftedVal<12>())
1072 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1079 bool isAddSubImmNeg()
const {
1080 if (!isShiftedImm() && !isImm())
1084 if (
auto ShiftedVal = getShiftedVal<12>())
1085 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1095 template <
typename T>
1096 DiagnosticPredicate isSVECpyImm()
const {
1100 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1101 std::is_same<int8_t, T>::value;
1102 if (
auto ShiftedImm = getShiftedVal<8>())
1103 if (!(IsByte && ShiftedImm->second) &&
1105 << ShiftedImm->second))
1114 template <
typename T> DiagnosticPredicate isSVEAddSubImm()
const {
1118 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1119 std::is_same<int8_t, T>::value;
1120 if (
auto ShiftedImm = getShiftedVal<8>())
1121 if (!(IsByte && ShiftedImm->second) &&
1123 << ShiftedImm->second))
1129 template <
typename T> DiagnosticPredicate isSVEPreferredLogicalImm()
const {
1130 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1135 bool isCondCode()
const {
return Kind == k_CondCode; }
1137 bool isSIMDImmType10()
const {
1147 bool isBranchTarget()
const {
1156 assert(
N > 0 &&
"Branch target immediate cannot be 0 bits!");
1157 return (Val >= -((1<<(
N-1)) << 2) && Val <= (((1<<(
N-1))-1) << 2));
1167 if (!AArch64AsmParser::classifySymbolRef(
getImm(), ELFSpec, DarwinSpec,
1177 bool isMovWSymbolG3()
const {
1181 bool isMovWSymbolG2()
const {
1188 bool isMovWSymbolG1()
const {
1196 bool isMovWSymbolG0()
const {
1204 template<
int RegW
idth,
int Shift>
1205 bool isMOVZMovAlias()
const {
1206 if (!isImm())
return false;
1210 uint64_t
Value =
CE->getValue();
1219 template<
int RegW
idth,
int Shift>
1220 bool isMOVNMovAlias()
const {
1221 if (!isImm())
return false;
1224 if (!CE)
return false;
1225 uint64_t
Value =
CE->getValue();
1230 bool isFPImm()
const {
1231 return Kind == k_FPImm &&
1235 bool isBarrier()
const {
1236 return Kind == k_Barrier && !getBarriernXSModifier();
1238 bool isBarriernXS()
const {
1239 return Kind == k_Barrier && getBarriernXSModifier();
1241 bool isSysReg()
const {
return Kind == k_SysReg; }
1243 bool isMRSSystemRegister()
const {
1244 if (!isSysReg())
return false;
1246 return SysReg.MRSReg != -1U;
1249 bool isMSRSystemRegister()
const {
1250 if (!isSysReg())
return false;
1251 return SysReg.MSRReg != -1U;
1254 bool isSystemPStateFieldWithImm0_1()
const {
1255 if (!isSysReg())
return false;
1256 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1259 bool isSystemPStateFieldWithImm0_15()
const {
1262 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1265 bool isSVCR()
const {
1268 return SVCR.PStateField != -1U;
1271 bool isReg()
const override {
1272 return Kind == k_Register;
1275 bool isVectorList()
const {
return Kind == k_VectorList; }
1277 bool isScalarReg()
const {
1278 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar;
1281 bool isNeonVectorReg()
const {
1282 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector;
1285 bool isNeonVectorRegLo()
const {
1286 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1287 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1289 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1293 bool isNeonVectorReg0to7()
const {
1294 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1295 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1299 bool isMatrix()
const {
return Kind == k_MatrixRegister; }
1300 bool isMatrixTileList()
const {
return Kind == k_MatrixTileList; }
1302 template <
unsigned Class>
bool isSVEPredicateAsCounterReg()
const {
1305 case AArch64::PPRRegClassID:
1306 case AArch64::PPR_3bRegClassID:
1307 case AArch64::PPR_p8to15RegClassID:
1308 case AArch64::PNRRegClassID:
1309 case AArch64::PNR_p8to15RegClassID:
1310 case AArch64::PPRorPNRRegClassID:
1311 RK = RegKind::SVEPredicateAsCounter;
1317 return (Kind == k_Register &&
Reg.Kind == RK) &&
1318 AArch64MCRegisterClasses[
Class].contains(
getReg());
1321 template <
unsigned Class>
bool isSVEVectorReg()
const {
1324 case AArch64::ZPRRegClassID:
1325 case AArch64::ZPR_3bRegClassID:
1326 case AArch64::ZPR_4bRegClassID:
1327 case AArch64::ZPRMul2_LoRegClassID:
1328 case AArch64::ZPRMul2_HiRegClassID:
1329 case AArch64::ZPR_KRegClassID:
1330 RK = RegKind::SVEDataVector;
1332 case AArch64::PPRRegClassID:
1333 case AArch64::PPR_3bRegClassID:
1334 case AArch64::PPR_p8to15RegClassID:
1335 case AArch64::PNRRegClassID:
1336 case AArch64::PNR_p8to15RegClassID:
1337 case AArch64::PPRorPNRRegClassID:
1338 RK = RegKind::SVEPredicateVector;
1344 return (Kind == k_Register &&
Reg.Kind == RK) &&
1345 AArch64MCRegisterClasses[
Class].contains(
getReg());
1348 template <
unsigned Class>
bool isFPRasZPR()
const {
1349 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1350 AArch64MCRegisterClasses[
Class].contains(
getReg());
1353 template <
int ElementW
idth,
unsigned Class>
1354 DiagnosticPredicate isSVEPredicateVectorRegOfWidth()
const {
1355 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateVector)
1358 if (isSVEVectorReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1364 template <
int ElementW
idth,
unsigned Class>
1365 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth()
const {
1366 if (Kind != k_Register || (
Reg.Kind != RegKind::SVEPredicateAsCounter &&
1367 Reg.Kind != RegKind::SVEPredicateVector))
1370 if ((isSVEPredicateAsCounterReg<Class>() ||
1371 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1372 Reg.ElementWidth == ElementWidth)
1378 template <
int ElementW
idth,
unsigned Class>
1379 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth()
const {
1380 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateAsCounter)
1383 if (isSVEPredicateAsCounterReg<Class>() && (
Reg.ElementWidth == ElementWidth))
1389 template <
int ElementW
idth,
unsigned Class>
1390 DiagnosticPredicate isSVEDataVectorRegOfWidth()
const {
1391 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEDataVector)
1394 if (isSVEVectorReg<Class>() &&
Reg.ElementWidth == ElementWidth)
1400 template <
int ElementWidth,
unsigned Class,
1402 bool ShiftWidthAlwaysSame>
1403 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend()
const {
1404 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1405 if (!VectorMatch.isMatch())
1411 bool MatchShift = getShiftExtendAmount() ==
Log2_32(ShiftWidth / 8);
1414 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1417 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1423 bool isGPR32as64()
const {
1424 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1425 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(
Reg.Reg);
1428 bool isGPR64as32()
const {
1429 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1430 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(
Reg.Reg);
1433 bool isGPR64x8()
const {
1434 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1435 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1439 bool isWSeqPair()
const {
1440 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1441 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1445 bool isXSeqPair()
const {
1446 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1447 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1451 bool isSyspXzrPair()
const {
1452 return isGPR64<AArch64::GPR64RegClassID>() &&
Reg.Reg == AArch64::XZR;
1455 template<
int64_t Angle,
int64_t Remainder>
1456 DiagnosticPredicate isComplexRotation()
const {
1463 uint64_t
Value =
CE->getValue();
1465 if (
Value % Angle == Remainder &&
Value <= 270)
1470 template <
unsigned RegClassID>
bool isGPR64()
const {
1471 return Kind == k_Register &&
Reg.Kind == RegKind::Scalar &&
1472 AArch64MCRegisterClasses[RegClassID].contains(
getReg());
1475 template <
unsigned RegClassID,
int ExtW
idth>
1476 DiagnosticPredicate isGPR64WithShiftExtend()
const {
1477 if (Kind != k_Register ||
Reg.Kind != RegKind::Scalar)
1480 if (isGPR64<RegClassID>() && getShiftExtendType() ==
AArch64_AM::LSL &&
1481 getShiftExtendAmount() ==
Log2_32(ExtWidth / 8))
1488 template <RegKind VectorKind,
unsigned NumRegs,
bool IsConsecutive = false>
1489 bool isImplicitlyTypedVectorList()
const {
1490 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1491 VectorList.NumElements == 0 &&
1492 VectorList.RegisterKind == VectorKind &&
1493 (!IsConsecutive || (VectorList.Stride == 1));
1496 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1497 unsigned ElementWidth,
unsigned Stride = 1>
1498 bool isTypedVectorList()
const {
1499 if (Kind != k_VectorList)
1501 if (VectorList.Count != NumRegs)
1503 if (VectorList.RegisterKind != VectorKind)
1505 if (VectorList.ElementWidth != ElementWidth)
1507 if (VectorList.Stride != Stride)
1509 return VectorList.NumElements == NumElements;
1512 template <RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1513 unsigned ElementWidth,
unsigned RegClass>
1514 DiagnosticPredicate isTypedVectorListMultiple()
const {
1516 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1519 if (!AArch64MCRegisterClasses[RegClass].
contains(VectorList.Reg))
1524 template <RegKind VectorKind,
unsigned NumRegs,
unsigned Stride,
1525 unsigned ElementWidth>
1526 DiagnosticPredicate isTypedVectorListStrided()
const {
1527 bool Res = isTypedVectorList<VectorKind, NumRegs, 0,
1528 ElementWidth, Stride>();
1531 if ((VectorList.Reg < (AArch64::Z0 + Stride)) ||
1532 ((VectorList.Reg >= AArch64::Z16) &&
1533 (VectorList.Reg < (AArch64::Z16 + Stride))))
1538 template <
int Min,
int Max>
1539 DiagnosticPredicate isVectorIndex()
const {
1540 if (Kind != k_VectorIndex)
1542 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1547 bool isToken()
const override {
return Kind == k_Token; }
1549 bool isTokenEqual(StringRef Str)
const {
1550 return Kind == k_Token &&
getToken() == Str;
1552 bool isSysCR()
const {
return Kind == k_SysCR; }
1553 bool isPrefetch()
const {
return Kind == k_Prefetch; }
1554 bool isPSBHint()
const {
return Kind == k_PSBHint; }
1555 bool isPHint()
const {
return Kind == k_PHint; }
1556 bool isBTIHint()
const {
return Kind == k_BTIHint; }
1557 bool isCMHPriorityHint()
const {
return Kind == k_CMHPriorityHint; }
1558 bool isTIndexHint()
const {
return Kind == k_TIndexHint; }
1559 bool isShiftExtend()
const {
return Kind == k_ShiftExtend; }
1560 bool isShifter()
const {
1561 if (!isShiftExtend())
1570 template <
unsigned ImmEnum> DiagnosticPredicate isExactFPImm()
const {
1571 if (Kind != k_FPImm)
1574 if (getFPImmIsExact()) {
1576 auto *
Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1580 APFloat RealVal(APFloat::IEEEdouble());
1582 RealVal.convertFromString(
Desc->Repr, APFloat::rmTowardZero);
1583 if (
errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1586 if (
getFPImm().bitwiseIsEqual(RealVal))
1593 template <
unsigned ImmA,
unsigned ImmB>
1594 DiagnosticPredicate isExactFPImm()
const {
1596 if ((Res = isExactFPImm<ImmA>()))
1598 if ((Res = isExactFPImm<ImmB>()))
1603 bool isExtend()
const {
1604 if (!isShiftExtend())
1613 getShiftExtendAmount() <= 4;
1616 bool isExtend64()
const {
1626 bool isExtendLSL64()
const {
1632 getShiftExtendAmount() <= 4;
1635 bool isLSLImm3Shift()
const {
1636 if (!isShiftExtend())
1642 template<
int W
idth>
bool isMemXExtend()
const {
1647 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1648 getShiftExtendAmount() == 0);
1651 template<
int W
idth>
bool isMemWExtend()
const {
1656 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1657 getShiftExtendAmount() == 0);
1660 template <
unsigned w
idth>
1661 bool isArithmeticShifter()
const {
1671 template <
unsigned w
idth>
1672 bool isLogicalShifter()
const {
1680 getShiftExtendAmount() < width;
1683 bool isMovImm32Shifter()
const {
1691 uint64_t Val = getShiftExtendAmount();
1692 return (Val == 0 || Val == 16);
1695 bool isMovImm64Shifter()
const {
1703 uint64_t Val = getShiftExtendAmount();
1704 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1707 bool isLogicalVecShifter()
const {
1712 unsigned Shift = getShiftExtendAmount();
1714 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1717 bool isLogicalVecHalfWordShifter()
const {
1718 if (!isLogicalVecShifter())
1722 unsigned Shift = getShiftExtendAmount();
1724 (Shift == 0 || Shift == 8);
1727 bool isMoveVecShifter()
const {
1728 if (!isShiftExtend())
1732 unsigned Shift = getShiftExtendAmount();
1734 (Shift == 8 || Shift == 16);
1743 bool isSImm9OffsetFB()
const {
1744 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1747 bool isAdrpLabel()
const {
1754 int64_t Val =
CE->getValue();
1755 int64_t Min = - (4096 * (1LL << (21 - 1)));
1756 int64_t
Max = 4096 * ((1LL << (21 - 1)) - 1);
1757 return (Val % 4096) == 0 && Val >= Min && Val <=
Max;
1763 bool isAdrLabel()
const {
1770 int64_t Val =
CE->getValue();
1771 int64_t Min = - (1LL << (21 - 1));
1772 int64_t
Max = ((1LL << (21 - 1)) - 1);
1773 return Val >= Min && Val <=
Max;
1779 template <MatrixKind Kind,
unsigned EltSize,
unsigned RegClass>
1780 DiagnosticPredicate isMatrixRegOperand()
const {
1783 if (getMatrixKind() != Kind ||
1784 !AArch64MCRegisterClasses[RegClass].
contains(getMatrixReg()) ||
1785 EltSize != getMatrixElementWidth())
1790 bool isPAuthPCRelLabel16Operand()
const {
1802 return (Val <= 0) && (Val > -(1 << 18));
1805 void addExpr(MCInst &Inst,
const MCExpr *Expr)
const {
1815 void addRegOperands(MCInst &Inst,
unsigned N)
const {
1816 assert(
N == 1 &&
"Invalid number of operands!");
1820 void addMatrixOperands(MCInst &Inst,
unsigned N)
const {
1821 assert(
N == 1 &&
"Invalid number of operands!");
1825 void addGPR32as64Operands(MCInst &Inst,
unsigned N)
const {
1826 assert(
N == 1 &&
"Invalid number of operands!");
1828 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].
contains(
getReg()));
1830 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1837 void addGPR64as32Operands(MCInst &Inst,
unsigned N)
const {
1838 assert(
N == 1 &&
"Invalid number of operands!");
1840 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].
contains(
getReg()));
1842 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1849 template <
int W
idth>
1850 void addFPRasZPRRegOperands(MCInst &Inst,
unsigned N)
const {
1853 case 8:
Base = AArch64::B0;
break;
1854 case 16:
Base = AArch64::H0;
break;
1855 case 32:
Base = AArch64::S0;
break;
1856 case 64:
Base = AArch64::D0;
break;
1857 case 128:
Base = AArch64::Q0;
break;
1864 void addPPRorPNRRegOperands(MCInst &Inst,
unsigned N)
const {
1865 assert(
N == 1 &&
"Invalid number of operands!");
1868 if (
Reg >= AArch64::PN0 &&
Reg <= AArch64::PN15)
1869 Reg =
Reg - AArch64::PN0 + AArch64::P0;
1873 void addPNRasPPRRegOperands(MCInst &Inst,
unsigned N)
const {
1874 assert(
N == 1 &&
"Invalid number of operands!");
1879 void addVectorReg64Operands(MCInst &Inst,
unsigned N)
const {
1880 assert(
N == 1 &&
"Invalid number of operands!");
1882 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1886 void addVectorReg128Operands(MCInst &Inst,
unsigned N)
const {
1887 assert(
N == 1 &&
"Invalid number of operands!");
1889 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1893 void addVectorRegLoOperands(MCInst &Inst,
unsigned N)
const {
1894 assert(
N == 1 &&
"Invalid number of operands!");
1898 void addVectorReg0to7Operands(MCInst &Inst,
unsigned N)
const {
1899 assert(
N == 1 &&
"Invalid number of operands!");
1903 enum VecListIndexType {
1904 VecListIdx_DReg = 0,
1905 VecListIdx_QReg = 1,
1906 VecListIdx_ZReg = 2,
1907 VecListIdx_PReg = 3,
1910 template <VecListIndexType RegTy,
unsigned NumRegs,
1911 bool IsConsecutive =
false>
1912 void addVectorListOperands(MCInst &Inst,
unsigned N)
const {
1913 assert(
N == 1 &&
"Invalid number of operands!");
1914 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1915 "Expected consecutive registers");
1916 static const unsigned FirstRegs[][5] = {
1918 AArch64::D0, AArch64::D0_D1,
1919 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1921 AArch64::Q0, AArch64::Q0_Q1,
1922 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1924 AArch64::Z0, AArch64::Z0_Z1,
1925 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1927 AArch64::P0, AArch64::P0_P1 }
1930 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1931 " NumRegs must be <= 4 for ZRegs");
1933 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1934 " NumRegs must be <= 2 for PRegs");
1936 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1938 FirstRegs[(
unsigned)RegTy][0]));
1941 template <
unsigned NumRegs>
1942 void addStridedVectorListOperands(MCInst &Inst,
unsigned N)
const {
1943 assert(
N == 1 &&
"Invalid number of operands!");
1944 assert((NumRegs == 2 || NumRegs == 4) &&
" NumRegs must be 2 or 4");
1948 if (getVectorListStart() < AArch64::Z16) {
1949 assert((getVectorListStart() < AArch64::Z8) &&
1950 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1952 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1954 assert((getVectorListStart() < AArch64::Z24) &&
1955 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1957 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1961 if (getVectorListStart() < AArch64::Z16) {
1962 assert((getVectorListStart() < AArch64::Z4) &&
1963 (getVectorListStart() >= AArch64::Z0) &&
"Invalid Register");
1965 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1967 assert((getVectorListStart() < AArch64::Z20) &&
1968 (getVectorListStart() >= AArch64::Z16) &&
"Invalid Register");
1970 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1978 void addMatrixTileListOperands(MCInst &Inst,
unsigned N)
const {
1979 assert(
N == 1 &&
"Invalid number of operands!");
1980 unsigned RegMask = getMatrixTileListRegMask();
1981 assert(RegMask <= 0xFF &&
"Invalid mask!");
1985 void addVectorIndexOperands(MCInst &Inst,
unsigned N)
const {
1986 assert(
N == 1 &&
"Invalid number of operands!");
1990 template <
unsigned ImmIs0,
unsigned ImmIs1>
1991 void addExactFPImmOperands(MCInst &Inst,
unsigned N)
const {
1992 assert(
N == 1 &&
"Invalid number of operands!");
1993 assert(
bool(isExactFPImm<ImmIs0, ImmIs1>()) &&
"Invalid operand");
1997 void addImmOperands(MCInst &Inst,
unsigned N)
const {
1998 assert(
N == 1 &&
"Invalid number of operands!");
2005 template <
int Shift>
2006 void addImmWithOptionalShiftOperands(MCInst &Inst,
unsigned N)
const {
2007 assert(
N == 2 &&
"Invalid number of operands!");
2008 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
2011 }
else if (isShiftedImm()) {
2012 addExpr(Inst, getShiftedImmVal());
2020 template <
int Shift>
2021 void addImmNegWithOptionalShiftOperands(MCInst &Inst,
unsigned N)
const {
2022 assert(
N == 2 &&
"Invalid number of operands!");
2023 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
2030 void addCondCodeOperands(MCInst &Inst,
unsigned N)
const {
2031 assert(
N == 1 &&
"Invalid number of operands!");
2035 void addAdrpLabelOperands(MCInst &Inst,
unsigned N)
const {
2036 assert(
N == 1 &&
"Invalid number of operands!");
2044 void addAdrLabelOperands(MCInst &Inst,
unsigned N)
const {
2045 addImmOperands(Inst,
N);
2049 void addUImm12OffsetOperands(MCInst &Inst,
unsigned N)
const {
2050 assert(
N == 1 &&
"Invalid number of operands!");
2060 void addUImm6Operands(MCInst &Inst,
unsigned N)
const {
2061 assert(
N == 1 &&
"Invalid number of operands!");
2066 template <
int Scale>
2067 void addImmScaledOperands(MCInst &Inst,
unsigned N)
const {
2068 assert(
N == 1 &&
"Invalid number of operands!");
2073 template <
int Scale>
2074 void addImmScaledRangeOperands(MCInst &Inst,
unsigned N)
const {
2075 assert(
N == 1 &&
"Invalid number of operands!");
2079 template <
typename T>
2080 void addLogicalImmOperands(MCInst &Inst,
unsigned N)
const {
2081 assert(
N == 1 &&
"Invalid number of operands!");
2083 std::make_unsigned_t<T> Val = MCE->
getValue();
2088 template <
typename T>
2089 void addLogicalImmNotOperands(MCInst &Inst,
unsigned N)
const {
2090 assert(
N == 1 &&
"Invalid number of operands!");
2092 std::make_unsigned_t<T> Val = ~MCE->getValue();
2097 void addSIMDImmType10Operands(MCInst &Inst,
unsigned N)
const {
2098 assert(
N == 1 &&
"Invalid number of operands!");
2104 void addBranchTarget26Operands(MCInst &Inst,
unsigned N)
const {
2108 assert(
N == 1 &&
"Invalid number of operands!");
2114 assert(MCE &&
"Invalid constant immediate operand!");
2118 void addPAuthPCRelLabel16Operands(MCInst &Inst,
unsigned N)
const {
2122 assert(
N == 1 &&
"Invalid number of operands!");
2131 void addPCRelLabel19Operands(MCInst &Inst,
unsigned N)
const {
2135 assert(
N == 1 &&
"Invalid number of operands!");
2141 assert(MCE &&
"Invalid constant immediate operand!");
2145 void addPCRelLabel9Operands(MCInst &Inst,
unsigned N)
const {
2149 assert(
N == 1 &&
"Invalid number of operands!");
2155 assert(MCE &&
"Invalid constant immediate operand!");
2159 void addBranchTarget14Operands(MCInst &Inst,
unsigned N)
const {
2163 assert(
N == 1 &&
"Invalid number of operands!");
2169 assert(MCE &&
"Invalid constant immediate operand!");
2173 void addFPImmOperands(MCInst &Inst,
unsigned N)
const {
2174 assert(
N == 1 &&
"Invalid number of operands!");
2179 void addBarrierOperands(MCInst &Inst,
unsigned N)
const {
2180 assert(
N == 1 &&
"Invalid number of operands!");
2184 void addBarriernXSOperands(MCInst &Inst,
unsigned N)
const {
2185 assert(
N == 1 &&
"Invalid number of operands!");
2189 void addMRSSystemRegisterOperands(MCInst &Inst,
unsigned N)
const {
2190 assert(
N == 1 &&
"Invalid number of operands!");
2195 void addMSRSystemRegisterOperands(MCInst &Inst,
unsigned N)
const {
2196 assert(
N == 1 &&
"Invalid number of operands!");
2201 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst,
unsigned N)
const {
2202 assert(
N == 1 &&
"Invalid number of operands!");
2207 void addSVCROperands(MCInst &Inst,
unsigned N)
const {
2208 assert(
N == 1 &&
"Invalid number of operands!");
2213 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst,
unsigned N)
const {
2214 assert(
N == 1 &&
"Invalid number of operands!");
2219 void addSysCROperands(MCInst &Inst,
unsigned N)
const {
2220 assert(
N == 1 &&
"Invalid number of operands!");
2224 void addPrefetchOperands(MCInst &Inst,
unsigned N)
const {
2225 assert(
N == 1 &&
"Invalid number of operands!");
2229 void addPSBHintOperands(MCInst &Inst,
unsigned N)
const {
2230 assert(
N == 1 &&
"Invalid number of operands!");
2234 void addPHintOperands(MCInst &Inst,
unsigned N)
const {
2235 assert(
N == 1 &&
"Invalid number of operands!");
2239 void addBTIHintOperands(MCInst &Inst,
unsigned N)
const {
2240 assert(
N == 1 &&
"Invalid number of operands!");
2244 void addCMHPriorityHintOperands(MCInst &Inst,
unsigned N)
const {
2245 assert(
N == 1 &&
"Invalid number of operands!");
2249 void addTIndexHintOperands(MCInst &Inst,
unsigned N)
const {
2250 assert(
N == 1 &&
"Invalid number of operands!");
2254 void addShifterOperands(MCInst &Inst,
unsigned N)
const {
2255 assert(
N == 1 &&
"Invalid number of operands!");
2261 void addLSLImm3ShifterOperands(MCInst &Inst,
unsigned N)
const {
2262 assert(
N == 1 &&
"Invalid number of operands!");
2263 unsigned Imm = getShiftExtendAmount();
2267 void addSyspXzrPairOperand(MCInst &Inst,
unsigned N)
const {
2268 assert(
N == 1 &&
"Invalid number of operands!");
2273 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2276 if (
Reg != AArch64::XZR)
2282 void addExtendOperands(MCInst &Inst,
unsigned N)
const {
2283 assert(
N == 1 &&
"Invalid number of operands!");
2290 void addExtend64Operands(MCInst &Inst,
unsigned N)
const {
2291 assert(
N == 1 &&
"Invalid number of operands!");
2298 void addMemExtendOperands(MCInst &Inst,
unsigned N)
const {
2299 assert(
N == 2 &&
"Invalid number of operands!");
2310 void addMemExtend8Operands(MCInst &Inst,
unsigned N)
const {
2311 assert(
N == 2 &&
"Invalid number of operands!");
2319 void addMOVZMovAliasOperands(MCInst &Inst,
unsigned N)
const {
2320 assert(
N == 1 &&
"Invalid number of operands!");
2324 uint64_t
Value =
CE->getValue();
2332 void addMOVNMovAliasOperands(MCInst &Inst,
unsigned N)
const {
2333 assert(
N == 1 &&
"Invalid number of operands!");
2336 uint64_t
Value =
CE->getValue();
2340 void addComplexRotationEvenOperands(MCInst &Inst,
unsigned N)
const {
2341 assert(
N == 1 &&
"Invalid number of operands!");
2346 void addComplexRotationOddOperands(MCInst &Inst,
unsigned N)
const {
2347 assert(
N == 1 &&
"Invalid number of operands!");
2352 void print(raw_ostream &OS,
const MCAsmInfo &MAI)
const override;
2354 static std::unique_ptr<AArch64Operand>
2355 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx,
bool IsSuffix =
false) {
2356 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2357 Op->Tok.Data = Str.data();
2358 Op->Tok.Length = Str.size();
2359 Op->Tok.IsSuffix = IsSuffix;
2365 static std::unique_ptr<AArch64Operand>
2366 CreateReg(MCRegister
Reg, RegKind Kind, SMLoc S, SMLoc
E, MCContext &Ctx,
2367 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2369 unsigned ShiftAmount = 0,
unsigned HasExplicitAmount =
false) {
2370 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2372 Op->Reg.Kind = Kind;
2373 Op->Reg.ElementWidth = 0;
2374 Op->Reg.EqualityTy = EqTy;
2375 Op->Reg.ShiftExtend.Type = ExtTy;
2376 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2377 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2383 static std::unique_ptr<AArch64Operand> CreateVectorReg(
2384 MCRegister
Reg, RegKind Kind,
unsigned ElementWidth, SMLoc S, SMLoc
E,
2386 unsigned ShiftAmount = 0,
unsigned HasExplicitAmount =
false) {
2387 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2388 Kind == RegKind::SVEPredicateVector ||
2389 Kind == RegKind::SVEPredicateAsCounter) &&
2390 "Invalid vector kind");
2391 auto Op = CreateReg(
Reg, Kind, S,
E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2393 Op->Reg.ElementWidth = ElementWidth;
2397 static std::unique_ptr<AArch64Operand>
2398 CreateVectorList(MCRegister
Reg,
unsigned Count,
unsigned Stride,
2399 unsigned NumElements,
unsigned ElementWidth,
2400 RegKind RegisterKind, SMLoc S, SMLoc
E, MCContext &Ctx) {
2401 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2402 Op->VectorList.Reg =
Reg;
2404 Op->VectorList.Stride = Stride;
2405 Op->VectorList.NumElements = NumElements;
2406 Op->VectorList.ElementWidth = ElementWidth;
2407 Op->VectorList.RegisterKind = RegisterKind;
2413 static std::unique_ptr<AArch64Operand>
2414 CreateVectorIndex(
int Idx, SMLoc S, SMLoc
E, MCContext &Ctx) {
2415 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2416 Op->VectorIndex.Val = Idx;
2422 static std::unique_ptr<AArch64Operand>
2423 CreateMatrixTileList(
unsigned RegMask, SMLoc S, SMLoc
E, MCContext &Ctx) {
2424 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2425 Op->MatrixTileList.RegMask = RegMask;
2431 static void ComputeRegsForAlias(
unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2432 const unsigned ElementWidth) {
2433 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2435 {{0, AArch64::ZAB0},
2436 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2437 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2438 {{8, AArch64::ZAB0},
2439 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2440 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2441 {{16, AArch64::ZAH0},
2442 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2443 {{16, AArch64::ZAH1},
2444 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2445 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2446 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2447 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2448 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2451 if (ElementWidth == 64)
2454 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth,
Reg)];
2455 assert(!Regs.empty() &&
"Invalid tile or element width!");
2460 static std::unique_ptr<AArch64Operand> CreateImm(
const MCExpr *Val, SMLoc S,
2461 SMLoc
E, MCContext &Ctx) {
2462 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2469 static std::unique_ptr<AArch64Operand> CreateShiftedImm(
const MCExpr *Val,
2470 unsigned ShiftAmount,
2473 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2474 Op->ShiftedImm .Val = Val;
2475 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2481 static std::unique_ptr<AArch64Operand> CreateImmRange(
unsigned First,
2482 unsigned Last, SMLoc S,
2485 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2487 Op->ImmRange.Last =
Last;
2492 static std::unique_ptr<AArch64Operand>
2494 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2495 Op->CondCode.Code =
Code;
2501 static std::unique_ptr<AArch64Operand>
2502 CreateFPImm(APFloat Val,
bool IsExact, SMLoc S, MCContext &Ctx) {
2503 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2505 Op->FPImm.IsExact = IsExact;
2511 static std::unique_ptr<AArch64Operand> CreateBarrier(
unsigned Val,
2515 bool HasnXSModifier) {
2516 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2517 Op->Barrier.Val = Val;
2518 Op->Barrier.Data = Str.data();
2519 Op->Barrier.Length = Str.size();
2520 Op->Barrier.HasnXSModifier = HasnXSModifier;
2526 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2529 uint32_t PStateField,
2531 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2532 Op->SysReg.Data = Str.data();
2533 Op->SysReg.Length = Str.size();
2534 Op->SysReg.MRSReg = MRSReg;
2535 Op->SysReg.MSRReg = MSRReg;
2536 Op->SysReg.PStateField = PStateField;
2542 static std::unique_ptr<AArch64Operand>
2543 CreatePHintInst(
unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2544 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2545 Op->PHint.Val = Val;
2546 Op->PHint.Data = Str.data();
2547 Op->PHint.Length = Str.size();
2553 static std::unique_ptr<AArch64Operand> CreateSysCR(
unsigned Val, SMLoc S,
2554 SMLoc
E, MCContext &Ctx) {
2555 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2556 Op->SysCRImm.Val = Val;
2562 static std::unique_ptr<AArch64Operand> CreatePrefetch(
unsigned Val,
2566 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2567 Op->Prefetch.Val = Val;
2568 Op->Barrier.Data = Str.data();
2569 Op->Barrier.Length = Str.size();
2575 static std::unique_ptr<AArch64Operand> CreatePSBHint(
unsigned Val,
2579 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2580 Op->PSBHint.Val = Val;
2581 Op->PSBHint.Data = Str.data();
2582 Op->PSBHint.Length = Str.size();
2588 static std::unique_ptr<AArch64Operand> CreateBTIHint(
unsigned Val,
2592 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2593 Op->BTIHint.Val = Val | 32;
2594 Op->BTIHint.Data = Str.data();
2595 Op->BTIHint.Length = Str.size();
2601 static std::unique_ptr<AArch64Operand>
2602 CreateCMHPriorityHint(
unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2603 auto Op = std::make_unique<AArch64Operand>(k_CMHPriorityHint, Ctx);
2604 Op->CMHPriorityHint.Val = Val;
2605 Op->CMHPriorityHint.Data = Str.data();
2606 Op->CMHPriorityHint.Length = Str.size();
2612 static std::unique_ptr<AArch64Operand>
2613 CreateTIndexHint(
unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2614 auto Op = std::make_unique<AArch64Operand>(k_TIndexHint, Ctx);
2615 Op->TIndexHint.Val = Val;
2616 Op->TIndexHint.Data = Str.data();
2617 Op->TIndexHint.Length = Str.size();
2623 static std::unique_ptr<AArch64Operand>
2624 CreateMatrixRegister(MCRegister
Reg,
unsigned ElementWidth, MatrixKind Kind,
2625 SMLoc S, SMLoc
E, MCContext &Ctx) {
2626 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2627 Op->MatrixReg.Reg =
Reg;
2628 Op->MatrixReg.ElementWidth = ElementWidth;
2629 Op->MatrixReg.Kind = Kind;
2635 static std::unique_ptr<AArch64Operand>
2636 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2637 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2638 Op->SVCR.PStateField = PStateField;
2639 Op->SVCR.Data = Str.data();
2640 Op->SVCR.Length = Str.size();
2646 static std::unique_ptr<AArch64Operand>
2648 bool HasExplicitAmount, SMLoc S, SMLoc
E, MCContext &Ctx) {
2649 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2650 Op->ShiftExtend.Type = ShOp;
2651 Op->ShiftExtend.Amount = Val;
2652 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2664 OS <<
"<fpimm " <<
getFPImm().bitcastToAPInt().getZExtValue();
2665 if (!getFPImmIsExact())
2670 StringRef
Name = getBarrierName();
2672 OS <<
"<barrier " <<
Name <<
">";
2674 OS <<
"<barrier invalid #" << getBarrier() <<
">";
2680 case k_ShiftedImm: {
2681 unsigned Shift = getShiftedImmShift();
2682 OS <<
"<shiftedimm ";
2689 OS << getFirstImmVal();
2690 OS <<
":" << getLastImmVal() <<
">";
2696 case k_VectorList: {
2697 OS <<
"<vectorlist ";
2698 MCRegister
Reg = getVectorListStart();
2699 for (
unsigned i = 0, e = getVectorListCount(); i !=
e; ++i)
2700 OS <<
Reg.
id() + i * getVectorListStride() <<
" ";
2705 OS <<
"<vectorindex " << getVectorIndex() <<
">";
2708 OS <<
"<sysreg: " << getSysReg() <<
'>';
2714 OS <<
"c" << getSysCR();
2717 StringRef
Name = getPrefetchName();
2719 OS <<
"<prfop " <<
Name <<
">";
2721 OS <<
"<prfop invalid #" << getPrefetch() <<
">";
2725 OS << getPSBHintName();
2728 OS << getPHintName();
2731 OS << getBTIHintName();
2733 case k_CMHPriorityHint:
2734 OS << getCMHPriorityHintName();
2737 OS << getTIndexHintName();
2739 case k_MatrixRegister:
2740 OS <<
"<matrix " << getMatrixReg().id() <<
">";
2742 case k_MatrixTileList: {
2743 OS <<
"<matrixlist ";
2744 unsigned RegMask = getMatrixTileListRegMask();
2745 unsigned MaxBits = 8;
2746 for (
unsigned I = MaxBits;
I > 0; --
I)
2747 OS << ((RegMask & (1 << (
I - 1))) >> (
I - 1));
2756 OS <<
"<register " <<
getReg().
id() <<
">";
2757 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2762 << getShiftExtendAmount();
2763 if (!hasShiftExtendAmount())
2779 .
Case(
"v0", AArch64::Q0)
2780 .
Case(
"v1", AArch64::Q1)
2781 .
Case(
"v2", AArch64::Q2)
2782 .
Case(
"v3", AArch64::Q3)
2783 .
Case(
"v4", AArch64::Q4)
2784 .
Case(
"v5", AArch64::Q5)
2785 .
Case(
"v6", AArch64::Q6)
2786 .
Case(
"v7", AArch64::Q7)
2787 .
Case(
"v8", AArch64::Q8)
2788 .
Case(
"v9", AArch64::Q9)
2789 .
Case(
"v10", AArch64::Q10)
2790 .
Case(
"v11", AArch64::Q11)
2791 .
Case(
"v12", AArch64::Q12)
2792 .
Case(
"v13", AArch64::Q13)
2793 .
Case(
"v14", AArch64::Q14)
2794 .
Case(
"v15", AArch64::Q15)
2795 .
Case(
"v16", AArch64::Q16)
2796 .
Case(
"v17", AArch64::Q17)
2797 .
Case(
"v18", AArch64::Q18)
2798 .
Case(
"v19", AArch64::Q19)
2799 .
Case(
"v20", AArch64::Q20)
2800 .
Case(
"v21", AArch64::Q21)
2801 .
Case(
"v22", AArch64::Q22)
2802 .
Case(
"v23", AArch64::Q23)
2803 .
Case(
"v24", AArch64::Q24)
2804 .
Case(
"v25", AArch64::Q25)
2805 .
Case(
"v26", AArch64::Q26)
2806 .
Case(
"v27", AArch64::Q27)
2807 .
Case(
"v28", AArch64::Q28)
2808 .
Case(
"v29", AArch64::Q29)
2809 .
Case(
"v30", AArch64::Q30)
2810 .
Case(
"v31", AArch64::Q31)
2819 RegKind VectorKind) {
2820 std::pair<int, int> Res = {-1, -1};
2822 switch (VectorKind) {
2823 case RegKind::NeonVector:
2826 .Case(
".1d", {1, 64})
2827 .Case(
".1q", {1, 128})
2829 .Case(
".2h", {2, 16})
2830 .Case(
".2b", {2, 8})
2831 .Case(
".2s", {2, 32})
2832 .Case(
".2d", {2, 64})
2835 .Case(
".4b", {4, 8})
2836 .Case(
".4h", {4, 16})
2837 .Case(
".4s", {4, 32})
2838 .Case(
".8b", {8, 8})
2839 .Case(
".8h", {8, 16})
2840 .Case(
".16b", {16, 8})
2845 .Case(
".h", {0, 16})
2846 .Case(
".s", {0, 32})
2847 .Case(
".d", {0, 64})
2850 case RegKind::SVEPredicateAsCounter:
2851 case RegKind::SVEPredicateVector:
2852 case RegKind::SVEDataVector:
2853 case RegKind::Matrix:
2857 .Case(
".h", {0, 16})
2858 .Case(
".s", {0, 32})
2859 .Case(
".d", {0, 64})
2860 .Case(
".q", {0, 128})
2867 if (Res == std::make_pair(-1, -1))
2868 return std::nullopt;
2870 return std::optional<std::pair<int, int>>(Res);
2879 .
Case(
"z0", AArch64::Z0)
2880 .
Case(
"z1", AArch64::Z1)
2881 .
Case(
"z2", AArch64::Z2)
2882 .
Case(
"z3", AArch64::Z3)
2883 .
Case(
"z4", AArch64::Z4)
2884 .
Case(
"z5", AArch64::Z5)
2885 .
Case(
"z6", AArch64::Z6)
2886 .
Case(
"z7", AArch64::Z7)
2887 .
Case(
"z8", AArch64::Z8)
2888 .
Case(
"z9", AArch64::Z9)
2889 .
Case(
"z10", AArch64::Z10)
2890 .
Case(
"z11", AArch64::Z11)
2891 .
Case(
"z12", AArch64::Z12)
2892 .
Case(
"z13", AArch64::Z13)
2893 .
Case(
"z14", AArch64::Z14)
2894 .
Case(
"z15", AArch64::Z15)
2895 .
Case(
"z16", AArch64::Z16)
2896 .
Case(
"z17", AArch64::Z17)
2897 .
Case(
"z18", AArch64::Z18)
2898 .
Case(
"z19", AArch64::Z19)
2899 .
Case(
"z20", AArch64::Z20)
2900 .
Case(
"z21", AArch64::Z21)
2901 .
Case(
"z22", AArch64::Z22)
2902 .
Case(
"z23", AArch64::Z23)
2903 .
Case(
"z24", AArch64::Z24)
2904 .
Case(
"z25", AArch64::Z25)
2905 .
Case(
"z26", AArch64::Z26)
2906 .
Case(
"z27", AArch64::Z27)
2907 .
Case(
"z28", AArch64::Z28)
2908 .
Case(
"z29", AArch64::Z29)
2909 .
Case(
"z30", AArch64::Z30)
2910 .
Case(
"z31", AArch64::Z31)
2916 .
Case(
"p0", AArch64::P0)
2917 .
Case(
"p1", AArch64::P1)
2918 .
Case(
"p2", AArch64::P2)
2919 .
Case(
"p3", AArch64::P3)
2920 .
Case(
"p4", AArch64::P4)
2921 .
Case(
"p5", AArch64::P5)
2922 .
Case(
"p6", AArch64::P6)
2923 .
Case(
"p7", AArch64::P7)
2924 .
Case(
"p8", AArch64::P8)
2925 .
Case(
"p9", AArch64::P9)
2926 .
Case(
"p10", AArch64::P10)
2927 .
Case(
"p11", AArch64::P11)
2928 .
Case(
"p12", AArch64::P12)
2929 .
Case(
"p13", AArch64::P13)
2930 .
Case(
"p14", AArch64::P14)
2931 .
Case(
"p15", AArch64::P15)
2937 .
Case(
"pn0", AArch64::PN0)
2938 .
Case(
"pn1", AArch64::PN1)
2939 .
Case(
"pn2", AArch64::PN2)
2940 .
Case(
"pn3", AArch64::PN3)
2941 .
Case(
"pn4", AArch64::PN4)
2942 .
Case(
"pn5", AArch64::PN5)
2943 .
Case(
"pn6", AArch64::PN6)
2944 .
Case(
"pn7", AArch64::PN7)
2945 .
Case(
"pn8", AArch64::PN8)
2946 .
Case(
"pn9", AArch64::PN9)
2947 .
Case(
"pn10", AArch64::PN10)
2948 .
Case(
"pn11", AArch64::PN11)
2949 .
Case(
"pn12", AArch64::PN12)
2950 .
Case(
"pn13", AArch64::PN13)
2951 .
Case(
"pn14", AArch64::PN14)
2952 .
Case(
"pn15", AArch64::PN15)
2958 .
Case(
"za0.d", AArch64::ZAD0)
2959 .
Case(
"za1.d", AArch64::ZAD1)
2960 .
Case(
"za2.d", AArch64::ZAD2)
2961 .
Case(
"za3.d", AArch64::ZAD3)
2962 .
Case(
"za4.d", AArch64::ZAD4)
2963 .
Case(
"za5.d", AArch64::ZAD5)
2964 .
Case(
"za6.d", AArch64::ZAD6)
2965 .
Case(
"za7.d", AArch64::ZAD7)
2966 .
Case(
"za0.s", AArch64::ZAS0)
2967 .
Case(
"za1.s", AArch64::ZAS1)
2968 .
Case(
"za2.s", AArch64::ZAS2)
2969 .
Case(
"za3.s", AArch64::ZAS3)
2970 .
Case(
"za0.h", AArch64::ZAH0)
2971 .
Case(
"za1.h", AArch64::ZAH1)
2972 .
Case(
"za0.b", AArch64::ZAB0)
2978 .
Case(
"za", AArch64::ZA)
2979 .
Case(
"za0.q", AArch64::ZAQ0)
2980 .
Case(
"za1.q", AArch64::ZAQ1)
2981 .
Case(
"za2.q", AArch64::ZAQ2)
2982 .
Case(
"za3.q", AArch64::ZAQ3)
2983 .
Case(
"za4.q", AArch64::ZAQ4)
2984 .
Case(
"za5.q", AArch64::ZAQ5)
2985 .
Case(
"za6.q", AArch64::ZAQ6)
2986 .
Case(
"za7.q", AArch64::ZAQ7)
2987 .
Case(
"za8.q", AArch64::ZAQ8)
2988 .
Case(
"za9.q", AArch64::ZAQ9)
2989 .
Case(
"za10.q", AArch64::ZAQ10)
2990 .
Case(
"za11.q", AArch64::ZAQ11)
2991 .
Case(
"za12.q", AArch64::ZAQ12)
2992 .
Case(
"za13.q", AArch64::ZAQ13)
2993 .
Case(
"za14.q", AArch64::ZAQ14)
2994 .
Case(
"za15.q", AArch64::ZAQ15)
2995 .
Case(
"za0.d", AArch64::ZAD0)
2996 .
Case(
"za1.d", AArch64::ZAD1)
2997 .
Case(
"za2.d", AArch64::ZAD2)
2998 .
Case(
"za3.d", AArch64::ZAD3)
2999 .
Case(
"za4.d", AArch64::ZAD4)
3000 .
Case(
"za5.d", AArch64::ZAD5)
3001 .
Case(
"za6.d", AArch64::ZAD6)
3002 .
Case(
"za7.d", AArch64::ZAD7)
3003 .
Case(
"za0.s", AArch64::ZAS0)
3004 .
Case(
"za1.s", AArch64::ZAS1)
3005 .
Case(
"za2.s", AArch64::ZAS2)
3006 .
Case(
"za3.s", AArch64::ZAS3)
3007 .
Case(
"za0.h", AArch64::ZAH0)
3008 .
Case(
"za1.h", AArch64::ZAH1)
3009 .
Case(
"za0.b", AArch64::ZAB0)
3010 .
Case(
"za0h.q", AArch64::ZAQ0)
3011 .
Case(
"za1h.q", AArch64::ZAQ1)
3012 .
Case(
"za2h.q", AArch64::ZAQ2)
3013 .
Case(
"za3h.q", AArch64::ZAQ3)
3014 .
Case(
"za4h.q", AArch64::ZAQ4)
3015 .
Case(
"za5h.q", AArch64::ZAQ5)
3016 .
Case(
"za6h.q", AArch64::ZAQ6)
3017 .
Case(
"za7h.q", AArch64::ZAQ7)
3018 .
Case(
"za8h.q", AArch64::ZAQ8)
3019 .
Case(
"za9h.q", AArch64::ZAQ9)
3020 .
Case(
"za10h.q", AArch64::ZAQ10)
3021 .
Case(
"za11h.q", AArch64::ZAQ11)
3022 .
Case(
"za12h.q", AArch64::ZAQ12)
3023 .
Case(
"za13h.q", AArch64::ZAQ13)
3024 .
Case(
"za14h.q", AArch64::ZAQ14)
3025 .
Case(
"za15h.q", AArch64::ZAQ15)
3026 .
Case(
"za0h.d", AArch64::ZAD0)
3027 .
Case(
"za1h.d", AArch64::ZAD1)
3028 .
Case(
"za2h.d", AArch64::ZAD2)
3029 .
Case(
"za3h.d", AArch64::ZAD3)
3030 .
Case(
"za4h.d", AArch64::ZAD4)
3031 .
Case(
"za5h.d", AArch64::ZAD5)
3032 .
Case(
"za6h.d", AArch64::ZAD6)
3033 .
Case(
"za7h.d", AArch64::ZAD7)
3034 .
Case(
"za0h.s", AArch64::ZAS0)
3035 .
Case(
"za1h.s", AArch64::ZAS1)
3036 .
Case(
"za2h.s", AArch64::ZAS2)
3037 .
Case(
"za3h.s", AArch64::ZAS3)
3038 .
Case(
"za0h.h", AArch64::ZAH0)
3039 .
Case(
"za1h.h", AArch64::ZAH1)
3040 .
Case(
"za0h.b", AArch64::ZAB0)
3041 .
Case(
"za0v.q", AArch64::ZAQ0)
3042 .
Case(
"za1v.q", AArch64::ZAQ1)
3043 .
Case(
"za2v.q", AArch64::ZAQ2)
3044 .
Case(
"za3v.q", AArch64::ZAQ3)
3045 .
Case(
"za4v.q", AArch64::ZAQ4)
3046 .
Case(
"za5v.q", AArch64::ZAQ5)
3047 .
Case(
"za6v.q", AArch64::ZAQ6)
3048 .
Case(
"za7v.q", AArch64::ZAQ7)
3049 .
Case(
"za8v.q", AArch64::ZAQ8)
3050 .
Case(
"za9v.q", AArch64::ZAQ9)
3051 .
Case(
"za10v.q", AArch64::ZAQ10)
3052 .
Case(
"za11v.q", AArch64::ZAQ11)
3053 .
Case(
"za12v.q", AArch64::ZAQ12)
3054 .
Case(
"za13v.q", AArch64::ZAQ13)
3055 .
Case(
"za14v.q", AArch64::ZAQ14)
3056 .
Case(
"za15v.q", AArch64::ZAQ15)
3057 .
Case(
"za0v.d", AArch64::ZAD0)
3058 .
Case(
"za1v.d", AArch64::ZAD1)
3059 .
Case(
"za2v.d", AArch64::ZAD2)
3060 .
Case(
"za3v.d", AArch64::ZAD3)
3061 .
Case(
"za4v.d", AArch64::ZAD4)
3062 .
Case(
"za5v.d", AArch64::ZAD5)
3063 .
Case(
"za6v.d", AArch64::ZAD6)
3064 .
Case(
"za7v.d", AArch64::ZAD7)
3065 .
Case(
"za0v.s", AArch64::ZAS0)
3066 .
Case(
"za1v.s", AArch64::ZAS1)
3067 .
Case(
"za2v.s", AArch64::ZAS2)
3068 .
Case(
"za3v.s", AArch64::ZAS3)
3069 .
Case(
"za0v.h", AArch64::ZAH0)
3070 .
Case(
"za1v.h", AArch64::ZAH1)
3071 .
Case(
"za0v.b", AArch64::ZAB0)
3075bool AArch64AsmParser::parseRegister(MCRegister &
Reg, SMLoc &StartLoc,
3077 return !tryParseRegister(
Reg, StartLoc, EndLoc).isSuccess();
3080ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &
Reg, SMLoc &StartLoc,
3082 StartLoc = getLoc();
3083 ParseStatus Res = tryParseScalarRegister(
Reg);
3089MCRegister AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3091 MCRegister
Reg = MCRegister();
3093 return Kind == RegKind::SVEDataVector ?
Reg : MCRegister();
3096 return Kind == RegKind::SVEPredicateVector ?
Reg : MCRegister();
3099 return Kind == RegKind::SVEPredicateAsCounter ?
Reg : MCRegister();
3102 return Kind == RegKind::NeonVector ?
Reg : MCRegister();
3105 return Kind == RegKind::Matrix ?
Reg : MCRegister();
3107 if (
Name.equals_insensitive(
"zt0"))
3108 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3112 return (Kind == RegKind::Scalar) ?
Reg : MCRegister();
3116 if (MCRegister
Reg = StringSwitch<unsigned>(
Name.lower())
3117 .Case(
"fp", AArch64::FP)
3118 .Case(
"lr", AArch64::LR)
3119 .Case(
"x31", AArch64::XZR)
3120 .Case(
"w31", AArch64::WZR)
3122 return Kind == RegKind::Scalar ?
Reg : MCRegister();
3128 if (Entry == RegisterReqs.
end())
3129 return MCRegister();
3132 if (Kind ==
Entry->getValue().first)
3138unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3140 case RegKind::Scalar:
3141 case RegKind::NeonVector:
3142 case RegKind::SVEDataVector:
3144 case RegKind::Matrix:
3145 case RegKind::SVEPredicateVector:
3146 case RegKind::SVEPredicateAsCounter:
3148 case RegKind::LookupTable:
3157ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3158 const AsmToken &Tok = getTok();
3163 MCRegister
Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3173ParseStatus AArch64AsmParser::tryParseSysCROperand(
OperandVector &Operands) {
3177 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3180 if (Tok[0] !=
'c' && Tok[0] !=
'C')
3181 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3185 if (BadNum || CRNum > 15)
3186 return Error(S,
"Expected cN operand where 0 <= N <= 15");
3190 AArch64Operand::CreateSysCR(CRNum, S, getLoc(),
getContext()));
3195ParseStatus AArch64AsmParser::tryParseRPRFMOperand(
OperandVector &Operands) {
3197 const AsmToken &Tok = getTok();
3199 unsigned MaxVal = 63;
3204 const MCExpr *ImmVal;
3205 if (getParser().parseExpression(ImmVal))
3210 return TokError(
"immediate value expected for prefetch operand");
3213 return TokError(
"prefetch operand out of range, [0," +
utostr(MaxVal) +
3216 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->
getValue());
3217 Operands.
push_back(AArch64Operand::CreatePrefetch(
3218 prfop, RPRFM ? RPRFM->Name :
"", S,
getContext()));
3223 return TokError(
"prefetch hint expected");
3225 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.
getString());
3227 return TokError(
"prefetch hint expected");
3229 Operands.
push_back(AArch64Operand::CreatePrefetch(
3236template <
bool IsSVEPrefetch>
3237ParseStatus AArch64AsmParser::tryParsePrefetch(
OperandVector &Operands) {
3239 const AsmToken &Tok = getTok();
3241 auto LookupByName = [](StringRef
N) {
3242 if (IsSVEPrefetch) {
3243 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(
N))
3244 return std::optional<unsigned>(Res->Encoding);
3245 }
else if (
auto Res = AArch64PRFM::lookupPRFMByName(
N))
3246 return std::optional<unsigned>(Res->Encoding);
3247 return std::optional<unsigned>();
3250 auto LookupByEncoding = [](
unsigned E) {
3251 if (IsSVEPrefetch) {
3252 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(
E))
3253 return std::optional<StringRef>(Res->Name);
3254 }
else if (
auto Res = AArch64PRFM::lookupPRFMByEncoding(
E))
3255 return std::optional<StringRef>(Res->Name);
3256 return std::optional<StringRef>();
3258 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3264 const MCExpr *ImmVal;
3265 if (getParser().parseExpression(ImmVal))
3270 return TokError(
"immediate value expected for prefetch operand");
3273 return TokError(
"prefetch operand out of range, [0," +
utostr(MaxVal) +
3276 auto PRFM = LookupByEncoding(MCE->
getValue());
3277 Operands.
push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(
""),
3283 return TokError(
"prefetch hint expected");
3285 auto PRFM = LookupByName(Tok.
getString());
3287 return TokError(
"prefetch hint expected");
3289 Operands.
push_back(AArch64Operand::CreatePrefetch(
3296ParseStatus AArch64AsmParser::tryParsePSBHint(
OperandVector &Operands) {
3298 const AsmToken &Tok = getTok();
3300 return TokError(
"invalid operand for instruction");
3302 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.
getString());
3304 return TokError(
"invalid operand for instruction");
3306 Operands.
push_back(AArch64Operand::CreatePSBHint(
3312ParseStatus AArch64AsmParser::tryParseSyspXzrPair(
OperandVector &Operands) {
3313 SMLoc StartLoc = getLoc();
3319 auto RegTok = getTok();
3320 if (!tryParseScalarRegister(RegNum).isSuccess())
3323 if (RegNum != AArch64::XZR) {
3324 getLexer().UnLex(RegTok);
3331 if (!tryParseScalarRegister(RegNum).isSuccess())
3332 return TokError(
"expected register operand");
3334 if (RegNum != AArch64::XZR)
3335 return TokError(
"xzr must be followed by xzr");
3339 Operands.
push_back(AArch64Operand::CreateReg(
3340 RegNum, RegKind::Scalar, StartLoc, getLoc(),
getContext()));
3346ParseStatus AArch64AsmParser::tryParseBTIHint(
OperandVector &Operands) {
3348 const AsmToken &Tok = getTok();
3350 return TokError(
"invalid operand for instruction");
3352 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.
getString());
3354 return TokError(
"invalid operand for instruction");
3356 Operands.
push_back(AArch64Operand::CreateBTIHint(
3363ParseStatus AArch64AsmParser::tryParseCMHPriorityHint(
OperandVector &Operands) {
3365 const AsmToken &Tok = getTok();
3367 return TokError(
"invalid operand for instruction");
3370 AArch64CMHPriorityHint::lookupCMHPriorityHintByName(Tok.
getString());
3372 return TokError(
"invalid operand for instruction");
3374 Operands.
push_back(AArch64Operand::CreateCMHPriorityHint(
3381ParseStatus AArch64AsmParser::tryParseTIndexHint(
OperandVector &Operands) {
3383 const AsmToken &Tok = getTok();
3385 return TokError(
"invalid operand for instruction");
3387 auto TIndex = AArch64TIndexHint::lookupTIndexByName(Tok.
getString());
3389 return TokError(
"invalid operand for instruction");
3391 Operands.
push_back(AArch64Operand::CreateTIndexHint(
3399ParseStatus AArch64AsmParser::tryParseAdrpLabel(
OperandVector &Operands) {
3401 const MCExpr *Expr =
nullptr;
3407 if (parseSymbolicImmVal(Expr))
3413 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3422 return Error(S,
"gotpage label reference not allowed an addend");
3434 return Error(S,
"page or gotpage label reference expected");
3449ParseStatus AArch64AsmParser::tryParseAdrLabel(
OperandVector &Operands) {
3451 const MCExpr *Expr =
nullptr;
3460 if (parseSymbolicImmVal(Expr))
3466 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3478 return Error(S,
"unexpected adr label");
3488template <
bool AddFPZeroAsLiteral>
3489ParseStatus AArch64AsmParser::tryParseFPImm(
OperandVector &Operands) {
3497 const AsmToken &Tok = getTok();
3501 return TokError(
"invalid floating point immediate");
3506 if (Tok.
getIntVal() > 255 || isNegative)
3507 return TokError(
"encoded floating point value out of range");
3511 AArch64Operand::CreateFPImm(
F,
true, S,
getContext()));
3514 APFloat RealVal(APFloat::IEEEdouble());
3516 RealVal.convertFromString(Tok.
getString(), APFloat::rmTowardZero);
3518 return TokError(
"invalid floating point representation");
3521 RealVal.changeSign();
3523 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3527 Operands.
push_back(AArch64Operand::CreateFPImm(
3528 RealVal, *StatusOrErr == APFloat::opOK, S,
getContext()));
3539AArch64AsmParser::tryParseImmWithOptionalShift(
OperandVector &Operands) {
3550 return tryParseImmRange(Operands);
3552 const MCExpr *
Imm =
nullptr;
3553 if (parseSymbolicImmVal(Imm))
3557 AArch64Operand::CreateImm(Imm, S, getLoc(),
getContext()));
3564 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3566 AArch64Operand::CreateImm(Imm, S, getLoc(),
getContext()));
3568 AArch64Operand::CreateToken(VecGroup, getLoc(),
getContext()));
3574 !getTok().getIdentifier().equals_insensitive(
"lsl"))
3575 return Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3583 return Error(getLoc(),
"only 'lsl #+N' valid after immediate");
3585 int64_t ShiftAmount = getTok().getIntVal();
3587 if (ShiftAmount < 0)
3588 return Error(getLoc(),
"positive shift amount required");
3592 if (ShiftAmount == 0 && Imm !=
nullptr) {
3594 AArch64Operand::CreateImm(Imm, S, getLoc(),
getContext()));
3598 Operands.
push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3606AArch64AsmParser::parseCondCodeString(StringRef
Cond, std::string &Suggestion) {
3640 Suggestion =
"nfrst";
3646bool AArch64AsmParser::parseCondCode(
OperandVector &Operands,
3647 bool invertCondCode) {
3649 const AsmToken &Tok = getTok();
3653 std::string Suggestion;
3656 std::string Msg =
"invalid condition code";
3657 if (!Suggestion.empty())
3658 Msg +=
", did you mean " + Suggestion +
"?";
3659 return TokError(Msg);
3663 if (invertCondCode) {
3665 return TokError(
"condition codes AL and NV are invalid for this instruction");
3670 AArch64Operand::CreateCondCode(CC, S, getLoc(),
getContext()));
3674ParseStatus AArch64AsmParser::tryParseSVCR(
OperandVector &Operands) {
3675 const AsmToken &Tok = getTok();
3679 return TokError(
"invalid operand for instruction");
3681 unsigned PStateImm = -1;
3682 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.
getString());
3685 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3686 PStateImm = SVCR->Encoding;
3694ParseStatus AArch64AsmParser::tryParseMatrixRegister(
OperandVector &Operands) {
3695 const AsmToken &Tok = getTok();
3700 if (
Name.equals_insensitive(
"za") ||
Name.starts_with_insensitive(
"za.")) {
3702 unsigned ElementWidth = 0;
3703 auto DotPosition =
Name.find(
'.');
3705 const auto &KindRes =
3709 "Expected the register to be followed by element width suffix");
3710 ElementWidth = KindRes->second;
3712 Operands.
push_back(AArch64Operand::CreateMatrixRegister(
3713 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3718 if (parseOperand(Operands,
false,
false))
3725 MCRegister
Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3729 size_t DotPosition =
Name.find(
'.');
3732 StringRef Head =
Name.take_front(DotPosition);
3733 StringRef
Tail =
Name.drop_front(DotPosition);
3734 StringRef RowOrColumn = Head.
take_back();
3736 MatrixKind
Kind = StringSwitch<MatrixKind>(RowOrColumn.
lower())
3737 .Case(
"h", MatrixKind::Row)
3738 .Case(
"v", MatrixKind::Col)
3739 .Default(MatrixKind::Tile);
3745 "Expected the register to be followed by element width suffix");
3746 unsigned ElementWidth = KindRes->second;
3750 Operands.
push_back(AArch64Operand::CreateMatrixRegister(
3756 if (parseOperand(Operands,
false,
false))
3765AArch64AsmParser::tryParseOptionalShiftExtend(
OperandVector &Operands) {
3766 const AsmToken &Tok = getTok();
3769 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3798 return TokError(
"expected #imm after shift specifier");
3804 AArch64Operand::CreateShiftExtend(ShOp, 0,
false, S,
E,
getContext()));
3813 return Error(
E,
"expected integer shift amount");
3815 const MCExpr *ImmVal;
3816 if (getParser().parseExpression(ImmVal))
3821 return Error(
E,
"expected constant '#imm' after shift specifier");
3824 Operands.
push_back(AArch64Operand::CreateShiftExtend(
3833 {
"crc", {AArch64::FeatureCRC}},
3834 {
"sm4", {AArch64::FeatureSM4}},
3835 {
"sha3", {AArch64::FeatureSHA3}},
3836 {
"sha2", {AArch64::FeatureSHA2}},
3837 {
"aes", {AArch64::FeatureAES}},
3838 {
"crypto", {AArch64::FeatureCrypto}},
3839 {
"fp", {AArch64::FeatureFPARMv8}},
3840 {
"simd", {AArch64::FeatureNEON}},
3841 {
"ras", {AArch64::FeatureRAS}},
3842 {
"rasv2", {AArch64::FeatureRASv2}},
3843 {
"lse", {AArch64::FeatureLSE}},
3844 {
"predres", {AArch64::FeaturePredRes}},
3845 {
"predres2", {AArch64::FeatureSPECRES2}},
3846 {
"ccdp", {AArch64::FeatureCacheDeepPersist}},
3847 {
"mte", {AArch64::FeatureMTE}},
3848 {
"memtag", {AArch64::FeatureMTE}},
3849 {
"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3850 {
"pan", {AArch64::FeaturePAN}},
3851 {
"pan-rwv", {AArch64::FeaturePAN_RWV}},
3852 {
"ccpp", {AArch64::FeatureCCPP}},
3853 {
"rcpc", {AArch64::FeatureRCPC}},
3854 {
"rng", {AArch64::FeatureRandGen}},
3855 {
"sve", {AArch64::FeatureSVE}},
3856 {
"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3857 {
"sve2", {AArch64::FeatureSVE2}},
3858 {
"sve-aes", {AArch64::FeatureSVEAES}},
3859 {
"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3860 {
"sve-sm4", {AArch64::FeatureSVESM4}},
3861 {
"sve2-sm4", {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3862 {
"sve-sha3", {AArch64::FeatureSVESHA3}},
3863 {
"sve2-sha3", {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3864 {
"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3866 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3867 AArch64::FeatureSVE2}},
3868 {
"sve2p1", {AArch64::FeatureSVE2p1}},
3869 {
"ls64", {AArch64::FeatureLS64}},
3870 {
"xs", {AArch64::FeatureXS}},
3871 {
"pauth", {AArch64::FeaturePAuth}},
3872 {
"flagm", {AArch64::FeatureFlagM}},
3873 {
"rme", {AArch64::FeatureRME}},
3874 {
"sme", {AArch64::FeatureSME}},
3875 {
"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3876 {
"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3877 {
"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3878 {
"sme2", {AArch64::FeatureSME2}},
3879 {
"sme2p1", {AArch64::FeatureSME2p1}},
3880 {
"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3881 {
"hbc", {AArch64::FeatureHBC}},
3882 {
"mops", {AArch64::FeatureMOPS}},
3883 {
"mec", {AArch64::FeatureMEC}},
3884 {
"the", {AArch64::FeatureTHE}},
3885 {
"d128", {AArch64::FeatureD128}},
3886 {
"lse128", {AArch64::FeatureLSE128}},
3887 {
"ite", {AArch64::FeatureITE}},
3888 {
"cssc", {AArch64::FeatureCSSC}},
3889 {
"rcpc3", {AArch64::FeatureRCPC3}},
3890 {
"gcs", {AArch64::FeatureGCS}},
3891 {
"bf16", {AArch64::FeatureBF16}},
3892 {
"compnum", {AArch64::FeatureComplxNum}},
3893 {
"dotprod", {AArch64::FeatureDotProd}},
3894 {
"f32mm", {AArch64::FeatureMatMulFP32}},
3895 {
"f64mm", {AArch64::FeatureMatMulFP64}},
3896 {
"fp16", {AArch64::FeatureFullFP16}},
3897 {
"fp16fml", {AArch64::FeatureFP16FML}},
3898 {
"i8mm", {AArch64::FeatureMatMulInt8}},
3899 {
"lor", {AArch64::FeatureLOR}},
3900 {
"profile", {AArch64::FeatureSPE}},
3904 {
"rdm", {AArch64::FeatureRDM}},
3905 {
"rdma", {AArch64::FeatureRDM}},
3906 {
"sb", {AArch64::FeatureSB}},
3907 {
"ssbs", {AArch64::FeatureSSBS}},
3908 {
"fp8", {AArch64::FeatureFP8}},
3909 {
"faminmax", {AArch64::FeatureFAMINMAX}},
3910 {
"fp8fma", {AArch64::FeatureFP8FMA}},
3911 {
"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3912 {
"fp8dot2", {AArch64::FeatureFP8DOT2}},
3913 {
"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3914 {
"fp8dot4", {AArch64::FeatureFP8DOT4}},
3915 {
"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3916 {
"lut", {AArch64::FeatureLUT}},
3917 {
"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3918 {
"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3919 {
"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3920 {
"sme-fa64", {AArch64::FeatureSMEFA64}},
3921 {
"cpa", {AArch64::FeatureCPA}},
3922 {
"tlbiw", {AArch64::FeatureTLBIW}},
3923 {
"pops", {AArch64::FeaturePoPS}},
3924 {
"cmpbr", {AArch64::FeatureCMPBR}},
3925 {
"f8f32mm", {AArch64::FeatureF8F32MM}},
3926 {
"f8f16mm", {AArch64::FeatureF8F16MM}},
3927 {
"fprcvt", {AArch64::FeatureFPRCVT}},
3928 {
"lsfe", {AArch64::FeatureLSFE}},
3929 {
"sme2p2", {AArch64::FeatureSME2p2}},
3930 {
"ssve-aes", {AArch64::FeatureSSVE_AES}},
3931 {
"sve2p2", {AArch64::FeatureSVE2p2}},
3932 {
"sve-aes2", {AArch64::FeatureSVEAES2}},
3933 {
"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3934 {
"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3935 {
"lsui", {AArch64::FeatureLSUI}},
3936 {
"occmo", {AArch64::FeatureOCCMO}},
3937 {
"pcdphint", {AArch64::FeaturePCDPHINT}},
3938 {
"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3939 {
"sme-mop4", {AArch64::FeatureSME_MOP4}},
3940 {
"sme-tmop", {AArch64::FeatureSME_TMOP}},
3941 {
"cmh", {AArch64::FeatureCMH}},
3942 {
"lscp", {AArch64::FeatureLSCP}},
3943 {
"tlbid", {AArch64::FeatureTLBID}},
3944 {
"mpamv2", {AArch64::FeatureMPAMv2}},
3945 {
"mtetc", {AArch64::FeatureMTETC}},
3946 {
"gcie", {AArch64::FeatureGCIE}},
3947 {
"sme2p3", {AArch64::FeatureSME2p3}},
3948 {
"sve2p3", {AArch64::FeatureSVE2p3}},
3949 {
"sve-b16mm", {AArch64::FeatureSVE_B16MM}},
3950 {
"f16mm", {AArch64::FeatureF16MM}},
3951 {
"f16f32dot", {AArch64::FeatureF16F32DOT}},
3952 {
"f16f32mm", {AArch64::FeatureF16F32MM}},
3953 {
"mops-go", {AArch64::FeatureMOPS_GO}},
3954 {
"poe2", {AArch64::FeatureS1POE2}},
3955 {
"tev", {AArch64::FeatureTEV}},
3956 {
"btie", {AArch64::FeatureBTIE}},
3960 if (FBS[AArch64::HasV8_0aOps])
3962 if (FBS[AArch64::HasV8_1aOps])
3964 else if (FBS[AArch64::HasV8_2aOps])
3966 else if (FBS[AArch64::HasV8_3aOps])
3968 else if (FBS[AArch64::HasV8_4aOps])
3970 else if (FBS[AArch64::HasV8_5aOps])
3972 else if (FBS[AArch64::HasV8_6aOps])
3974 else if (FBS[AArch64::HasV8_7aOps])
3976 else if (FBS[AArch64::HasV8_8aOps])
3978 else if (FBS[AArch64::HasV8_9aOps])
3980 else if (FBS[AArch64::HasV9_0aOps])
3982 else if (FBS[AArch64::HasV9_1aOps])
3984 else if (FBS[AArch64::HasV9_2aOps])
3986 else if (FBS[AArch64::HasV9_3aOps])
3988 else if (FBS[AArch64::HasV9_4aOps])
3990 else if (FBS[AArch64::HasV9_5aOps])
3992 else if (FBS[AArch64::HasV9_6aOps])
3994 else if (FBS[AArch64::HasV9_7aOps])
3996 else if (FBS[AArch64::HasV8_0rOps])
4005 Str += !ExtMatches.
empty() ?
llvm::join(ExtMatches,
", ") :
"(unknown)";
4009void AArch64AsmParser::createSysAlias(uint16_t Encoding,
OperandVector &Operands,
4011 const uint16_t Op2 = Encoding & 7;
4012 const uint16_t Cm = (Encoding & 0x78) >> 3;
4013 const uint16_t Cn = (Encoding & 0x780) >> 7;
4014 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
4019 AArch64Operand::CreateImm(Expr, S, getLoc(),
getContext()));
4021 AArch64Operand::CreateSysCR(Cn, S, getLoc(),
getContext()));
4023 AArch64Operand::CreateSysCR(Cm, S, getLoc(),
getContext()));
4026 AArch64Operand::CreateImm(Expr, S, getLoc(),
getContext()));
4032bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
4034 if (
Name.contains(
'.'))
4035 return TokError(
"invalid operand");
4040 const AsmToken &Tok = getTok();
4043 bool ExpectRegister =
true;
4044 bool OptionalRegister =
false;
4045 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
4046 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4048 if (Mnemonic ==
"ic") {
4049 const AArch64IC::IC *IC = AArch64IC::lookupICByName(
Op);
4051 return TokError(
"invalid operand for IC instruction");
4052 else if (!IC->
haveFeatures(getSTI().getFeatureBits())) {
4053 std::string Str(
"IC " + std::string(IC->
Name) +
" requires: ");
4055 return TokError(Str);
4058 createSysAlias(IC->
Encoding, Operands, S);
4059 }
else if (Mnemonic ==
"dc") {
4060 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(
Op);
4062 return TokError(
"invalid operand for DC instruction");
4063 else if (!DC->
haveFeatures(getSTI().getFeatureBits())) {
4064 std::string Str(
"DC " + std::string(DC->
Name) +
" requires: ");
4066 return TokError(Str);
4068 createSysAlias(DC->
Encoding, Operands, S);
4069 }
else if (Mnemonic ==
"at") {
4070 const AArch64AT::AT *AT = AArch64AT::lookupATByName(
Op);
4072 return TokError(
"invalid operand for AT instruction");
4073 else if (!AT->
haveFeatures(getSTI().getFeatureBits())) {
4074 std::string Str(
"AT " + std::string(AT->
Name) +
" requires: ");
4076 return TokError(Str);
4078 createSysAlias(AT->
Encoding, Operands, S);
4079 }
else if (Mnemonic ==
"tlbi") {
4080 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(
Op);
4082 return TokError(
"invalid operand for TLBI instruction");
4083 else if (!TLBI->
haveFeatures(getSTI().getFeatureBits())) {
4084 std::string Str(
"TLBI " + std::string(TLBI->
Name) +
" requires: ");
4086 return TokError(Str);
4089 bool hasTLBID = getSTI().hasFeature(AArch64::FeatureTLBID);
4090 if (hasAll || hasTLBID) {
4093 createSysAlias(TLBI->
Encoding, Operands, S);
4094 }
else if (Mnemonic ==
"mlbi") {
4095 const AArch64MLBI::MLBI *MLBI = AArch64MLBI::lookupMLBIByName(
Op);
4097 return TokError(
"invalid operand for MLBI instruction");
4098 else if (!MLBI->
haveFeatures(getSTI().getFeatureBits())) {
4099 std::string Str(
"MLBI " + std::string(MLBI->
Name) +
" requires: ");
4101 return TokError(Str);
4104 createSysAlias(MLBI->
Encoding, Operands, S);
4105 }
else if (Mnemonic ==
"gic") {
4106 const AArch64GIC::GIC *GIC = AArch64GIC::lookupGICByName(
Op);
4108 return TokError(
"invalid operand for GIC instruction");
4109 else if (!GIC->
haveFeatures(getSTI().getFeatureBits())) {
4110 std::string Str(
"GIC " + std::string(GIC->
Name) +
" requires: ");
4112 return TokError(Str);
4115 createSysAlias(GIC->
Encoding, Operands, S);
4116 }
else if (Mnemonic ==
"gsb") {
4117 const AArch64GSB::GSB *GSB = AArch64GSB::lookupGSBByName(
Op);
4119 return TokError(
"invalid operand for GSB instruction");
4120 else if (!GSB->
haveFeatures(getSTI().getFeatureBits())) {
4121 std::string Str(
"GSB " + std::string(GSB->
Name) +
" requires: ");
4123 return TokError(Str);
4125 ExpectRegister =
false;
4126 createSysAlias(GSB->
Encoding, Operands, S);
4127 }
else if (Mnemonic ==
"plbi") {
4128 const AArch64PLBI::PLBI *PLBI = AArch64PLBI::lookupPLBIByName(
Op);
4130 return TokError(
"invalid operand for PLBI instruction");
4131 else if (!PLBI->
haveFeatures(getSTI().getFeatureBits())) {
4132 std::string Str(
"PLBI " + std::string(PLBI->
Name) +
" requires: ");
4134 return TokError(Str);
4137 if (hasAll || hasTLBID) {
4140 createSysAlias(PLBI->
Encoding, Operands, S);
4141 }
else if (Mnemonic ==
"cfp" || Mnemonic ==
"dvp" || Mnemonic ==
"cpp" ||
4142 Mnemonic ==
"cosp") {
4144 if (
Op.lower() !=
"rctx")
4145 return TokError(
"invalid operand for prediction restriction instruction");
4147 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
4148 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
4150 if (Mnemonic ==
"cosp" && !hasSpecres2)
4151 return TokError(
"COSP requires: predres2");
4153 return TokError(Mnemonic.
upper() +
"RCTX requires: predres");
4155 uint16_t PRCTX_Op2 = Mnemonic ==
"cfp" ? 0b100
4156 : Mnemonic ==
"dvp" ? 0b101
4157 : Mnemonic ==
"cosp" ? 0b110
4158 : Mnemonic ==
"cpp" ? 0b111
4161 "Invalid mnemonic for prediction restriction instruction");
4162 const auto SYS_3_7_3 = 0b01101110011;
4163 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
4165 createSysAlias(Encoding, Operands, S);
4170 bool HasRegister =
false;
4175 return TokError(
"expected register operand");
4179 if (!OptionalRegister) {
4180 if (ExpectRegister && !HasRegister)
4181 return TokError(
"specified " + Mnemonic +
" op requires a register");
4182 else if (!ExpectRegister && HasRegister)
4183 return TokError(
"specified " + Mnemonic +
" op does not use a register");
4195bool AArch64AsmParser::parseSyslAlias(StringRef Name, SMLoc NameLoc,
4200 AArch64Operand::CreateToken(
"sysl", NameLoc,
getContext()));
4203 SMLoc startLoc = getLoc();
4204 const AsmToken ®Tok = getTok();
4206 MCRegister
Reg = matchRegisterNameAlias(reg.
lower(), RegKind::Scalar);
4208 return TokError(
"expected register operand");
4210 Operands.
push_back(AArch64Operand::CreateReg(
4211 Reg, RegKind::Scalar, startLoc, getLoc(),
getContext(), EqualsReg));
4218 const AsmToken &operandTok = getTok();
4220 SMLoc S2 = operandTok.
getLoc();
4223 if (Mnemonic ==
"gicr") {
4224 const AArch64GICR::GICR *GICR = AArch64GICR::lookupGICRByName(
Op);
4226 return Error(S2,
"invalid operand for GICR instruction");
4227 else if (!GICR->
haveFeatures(getSTI().getFeatureBits())) {
4228 std::string Str(
"GICR " + std::string(GICR->
Name) +
" requires: ");
4230 return Error(S2, Str);
4232 createSysAlias(GICR->
Encoding, Operands, S2);
4243bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4245 if (
Name.contains(
'.'))
4246 return TokError(
"invalid operand");
4250 AArch64Operand::CreateToken(
"sysp", NameLoc,
getContext()));
4252 const AsmToken &Tok = getTok();
4256 if (Mnemonic ==
"tlbip") {
4257 bool HasnXSQualifier =
Op.ends_with_insensitive(
"nXS");
4258 if (HasnXSQualifier) {
4259 Op =
Op.drop_back(3);
4261 const AArch64TLBIP::TLBIP *TLBIPorig = AArch64TLBIP::lookupTLBIPByName(
Op);
4263 return TokError(
"invalid operand for TLBIP instruction");
4264 const AArch64TLBIP::TLBIP TLBIP(
4265 TLBIPorig->
Name, TLBIPorig->
Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4270 if (!TLBIP.haveFeatures(getSTI().getFeatureBits())) {
4272 std::string(TLBIP.Name) + (HasnXSQualifier ?
"nXS" :
"");
4273 std::string Str(
"TLBIP " + Name +
" requires: ");
4275 return TokError(Str);
4277 createSysAlias(TLBIP.Encoding, Operands, S);
4286 return TokError(
"expected register identifier");
4287 auto Result = tryParseSyspXzrPair(Operands);
4289 Result = tryParseGPRSeqPair(Operands);
4291 return TokError(
"specified " + Mnemonic +
4292 " op requires a pair of registers");
4300ParseStatus AArch64AsmParser::tryParseBarrierOperand(
OperandVector &Operands) {
4301 MCAsmParser &Parser = getParser();
4302 const AsmToken &Tok = getTok();
4305 return TokError(
"'csync' operand expected");
4308 const MCExpr *ImmVal;
4309 SMLoc ExprLoc = getLoc();
4310 AsmToken IntTok = Tok;
4311 if (getParser().parseExpression(ImmVal))
4315 return Error(ExprLoc,
"immediate value expected for barrier operand");
4317 if (Mnemonic ==
"dsb" &&
Value > 15) {
4325 return Error(ExprLoc,
"barrier operand out of range");
4326 auto DB = AArch64DB::lookupDBByEncoding(
Value);
4327 Operands.
push_back(AArch64Operand::CreateBarrier(
Value, DB ?
DB->Name :
"",
4334 return TokError(
"invalid operand for instruction");
4337 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4338 auto DB = AArch64DB::lookupDBByName(Operand);
4340 if (Mnemonic ==
"isb" && (!DB ||
DB->Encoding != AArch64DB::sy))
4341 return TokError(
"'sy' or #imm operand expected");
4343 if (Mnemonic ==
"tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4344 return TokError(
"'csync' operand expected");
4346 if (Mnemonic ==
"dsb") {
4351 return TokError(
"invalid barrier option name");
4354 Operands.
push_back(AArch64Operand::CreateBarrier(
4355 DB ?
DB->Encoding : TSB->Encoding, Tok.
getString(), getLoc(),
4363AArch64AsmParser::tryParseBarriernXSOperand(
OperandVector &Operands) {
4364 const AsmToken &Tok = getTok();
4366 assert(Mnemonic ==
"dsb" &&
"Instruction does not accept nXS operands");
4367 if (Mnemonic !=
"dsb")
4372 const MCExpr *ImmVal;
4373 SMLoc ExprLoc = getLoc();
4374 if (getParser().parseExpression(ImmVal))
4378 return Error(ExprLoc,
"immediate value expected for barrier operand");
4383 return Error(ExprLoc,
"barrier operand out of range");
4384 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(
Value);
4385 Operands.
push_back(AArch64Operand::CreateBarrier(
DB->Encoding,
DB->Name,
4392 return TokError(
"invalid operand for instruction");
4395 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4398 return TokError(
"invalid barrier option name");
4401 AArch64Operand::CreateBarrier(
DB->Encoding, Tok.
getString(), getLoc(),
4408ParseStatus AArch64AsmParser::tryParseSysReg(
OperandVector &Operands) {
4409 const AsmToken &Tok = getTok();
4414 if (AArch64SVCR::lookupSVCRByName(Tok.
getString()))
4418 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.
getString());
4419 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4420 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4421 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4425 unsigned PStateImm = -1;
4426 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.
getString());
4427 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4428 PStateImm = PState15->Encoding;
4430 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.
getString());
4431 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4432 PStateImm = PState1->Encoding;
4436 AArch64Operand::CreateSysReg(Tok.
getString(), getLoc(), MRSReg, MSRReg,
4444AArch64AsmParser::tryParsePHintInstOperand(
OperandVector &Operands) {
4446 const AsmToken &Tok = getTok();
4448 return TokError(
"invalid operand for instruction");
4452 return TokError(
"invalid operand for instruction");
4454 Operands.
push_back(AArch64Operand::CreatePHintInst(
4461bool AArch64AsmParser::tryParseNeonVectorRegister(
OperandVector &Operands) {
4469 ParseStatus Res = tryParseVectorRegister(
Reg, Kind, RegKind::NeonVector);
4477 unsigned ElementWidth = KindRes->second;
4479 AArch64Operand::CreateVectorReg(
Reg, RegKind::NeonVector, ElementWidth,
4487 return tryParseVectorIndex(Operands).isFailure();
4490ParseStatus AArch64AsmParser::tryParseVectorIndex(
OperandVector &Operands) {
4491 SMLoc SIdx = getLoc();
4493 const MCExpr *ImmVal;
4494 if (getParser().parseExpression(ImmVal))
4498 return TokError(
"immediate value expected for vector index");
4516ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &
Reg,
4518 RegKind MatchKind) {
4519 const AsmToken &Tok = getTok();
4528 StringRef Head =
Name.slice(Start,
Next);
4529 MCRegister RegNum = matchRegisterNameAlias(Head, MatchKind);
4535 return TokError(
"invalid vector kind qualifier");
4546ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4548 ParseStatus Status =
4549 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4551 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4556template <RegKind RK>
4558AArch64AsmParser::tryParseSVEPredicateVector(
OperandVector &Operands) {
4560 const SMLoc S = getLoc();
4563 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4571 unsigned ElementWidth = KindRes->second;
4572 Operands.
push_back(AArch64Operand::CreateVectorReg(
4573 RegNum, RK, ElementWidth, S,
4577 if (RK == RegKind::SVEPredicateAsCounter) {
4578 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4584 if (parseOperand(Operands,
false,
false))
4595 return Error(S,
"not expecting size suffix");
4603 auto Pred = getTok().getString().lower();
4604 if (RK == RegKind::SVEPredicateAsCounter && Pred !=
"z")
4605 return Error(getLoc(),
"expecting 'z' predication");
4607 if (RK == RegKind::SVEPredicateVector && Pred !=
"z" && Pred !=
"m")
4608 return Error(getLoc(),
"expecting 'm' or 'z' predication");
4611 const char *ZM = Pred ==
"z" ?
"z" :
"m";
4619bool AArch64AsmParser::parseRegister(
OperandVector &Operands) {
4621 if (!tryParseNeonVectorRegister(Operands))
4624 if (tryParseZTOperand(Operands).isSuccess())
4628 if (tryParseGPROperand<false>(Operands).isSuccess())
4634bool AArch64AsmParser::parseSymbolicImmVal(
const MCExpr *&ImmVal) {
4635 bool HasELFModifier =
false;
4637 SMLoc Loc = getLexer().getLoc();
4639 HasELFModifier =
true;
4642 return TokError(
"expect relocation specifier in operand after ':'");
4644 std::string LowerCase = getTok().getIdentifier().lower();
4645 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4699 return TokError(
"expect relocation specifier in operand after ':'");
4703 if (parseToken(
AsmToken::Colon,
"expect ':' after relocation specifier"))
4707 if (getParser().parseExpression(ImmVal))
4714 if (
getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4715 if (getParser().parseAtSpecifier(ImmVal, EndLoc))
4725 if (getParser().parsePrimaryExpr(Term, EndLoc))
4733ParseStatus AArch64AsmParser::tryParseMatrixTileList(
OperandVector &Operands) {
4737 auto ParseMatrixTile = [
this](
unsigned &
Reg,
4738 unsigned &ElementWidth) -> ParseStatus {
4739 StringRef
Name = getTok().getString();
4740 size_t DotPosition =
Name.find(
'.');
4748 StringRef
Tail =
Name.drop_front(DotPosition);
4749 const std::optional<std::pair<int, int>> &KindRes =
4753 "Expected the register to be followed by element width suffix");
4754 ElementWidth = KindRes->second;
4761 auto LCurly = getTok();
4766 Operands.
push_back(AArch64Operand::CreateMatrixTileList(
4772 if (getTok().getString().equals_insensitive(
"za")) {
4778 Operands.
push_back(AArch64Operand::CreateMatrixTileList(
4783 SMLoc TileLoc = getLoc();
4785 unsigned FirstReg, ElementWidth;
4786 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4787 if (!ParseRes.isSuccess()) {
4788 getLexer().UnLex(LCurly);
4792 const MCRegisterInfo *RI =
getContext().getRegisterInfo();
4794 unsigned PrevReg = FirstReg;
4796 SmallSet<unsigned, 8> DRegs;
4797 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4799 SmallSet<unsigned, 8> SeenRegs;
4800 SeenRegs.
insert(FirstReg);
4804 unsigned Reg, NextElementWidth;
4805 ParseRes = ParseMatrixTile(
Reg, NextElementWidth);
4806 if (!ParseRes.isSuccess())
4810 if (ElementWidth != NextElementWidth)
4811 return Error(TileLoc,
"mismatched register size suffix");
4814 Warning(TileLoc,
"tile list not in ascending order");
4817 Warning(TileLoc,
"duplicate tile in list");
4820 AArch64Operand::ComputeRegsForAlias(
Reg, DRegs, ElementWidth);
4829 unsigned RegMask = 0;
4830 for (
auto Reg : DRegs)
4834 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(),
getContext()));
4839template <RegKind VectorKind>
4840ParseStatus AArch64AsmParser::tryParseVectorList(
OperandVector &Operands,
4842 MCAsmParser &Parser = getParser();
4847 auto ParseVector = [
this](MCRegister &
Reg, StringRef &
Kind, SMLoc Loc,
4848 bool NoMatchIsError) -> ParseStatus {
4849 auto RegTok = getTok();
4850 auto ParseRes = tryParseVectorRegister(
Reg, Kind, VectorKind);
4851 if (ParseRes.isSuccess()) {
4858 RegTok.getString().equals_insensitive(
"zt0"))
4862 (ParseRes.isNoMatch() && NoMatchIsError &&
4863 !RegTok.getString().starts_with_insensitive(
"za")))
4864 return Error(Loc,
"vector register expected");
4869 unsigned NumRegs = getNumRegsForRegKind(VectorKind);
4871 auto LCurly = getTok();
4875 MCRegister FirstReg;
4876 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4880 if (ParseRes.isNoMatch())
4883 if (!ParseRes.isSuccess())
4886 MCRegister PrevReg = FirstReg;
4889 unsigned Stride = 1;
4891 SMLoc Loc = getLoc();
4895 ParseRes = ParseVector(
Reg, NextKind, getLoc(),
true);
4896 if (!ParseRes.isSuccess())
4900 if (Kind != NextKind)
4901 return Error(Loc,
"mismatched register size suffix");
4904 (PrevReg <
Reg) ? (
Reg - PrevReg) : (NumRegs - (PrevReg -
Reg));
4906 if (Space == 0 || Space > 3)
4907 return Error(Loc,
"invalid number of vectors");
4912 bool HasCalculatedStride =
false;
4914 SMLoc Loc = getLoc();
4917 ParseRes = ParseVector(
Reg, NextKind, getLoc(),
true);
4918 if (!ParseRes.isSuccess())
4922 if (Kind != NextKind)
4923 return Error(Loc,
"mismatched register size suffix");
4925 unsigned RegVal =
getContext().getRegisterInfo()->getEncodingValue(
Reg);
4926 unsigned PrevRegVal =
4927 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4928 if (!HasCalculatedStride) {
4929 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4930 : (NumRegs - (PrevRegVal - RegVal));
4931 HasCalculatedStride =
true;
4935 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4936 return Error(Loc,
"registers must have the same sequential stride");
4947 return Error(S,
"invalid number of vectors");
4949 unsigned NumElements = 0;
4950 unsigned ElementWidth = 0;
4951 if (!
Kind.empty()) {
4953 std::tie(NumElements, ElementWidth) = *VK;
4956 Operands.
push_back(AArch64Operand::CreateVectorList(
4957 FirstReg,
Count, Stride, NumElements, ElementWidth, VectorKind, S,
4961 ParseStatus Res = tryParseVectorIndex(Operands);
4971bool AArch64AsmParser::parseNeonVectorList(
OperandVector &Operands) {
4972 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands,
true);
4973 if (!ParseRes.isSuccess())
4976 return tryParseVectorIndex(Operands).isFailure();
4979ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(
OperandVector &Operands) {
4980 SMLoc StartLoc = getLoc();
4983 ParseStatus Res = tryParseScalarRegister(RegNum);
4988 Operands.
push_back(AArch64Operand::CreateReg(
4989 RegNum, RegKind::Scalar, StartLoc, getLoc(),
getContext()));
4996 return Error(getLoc(),
"index must be absent or #0");
4998 const MCExpr *ImmVal;
5001 return Error(getLoc(),
"index must be absent or #0");
5003 Operands.
push_back(AArch64Operand::CreateReg(
5004 RegNum, RegKind::Scalar, StartLoc, getLoc(),
getContext()));
5008ParseStatus AArch64AsmParser::tryParseZTOperand(
OperandVector &Operands) {
5009 SMLoc StartLoc = getLoc();
5010 const AsmToken &Tok = getTok();
5013 MCRegister
Reg = matchRegisterNameAlias(Name, RegKind::LookupTable);
5018 Operands.
push_back(AArch64Operand::CreateReg(
5019 Reg, RegKind::LookupTable, StartLoc, getLoc(),
getContext()));
5025 AArch64Operand::CreateToken(
"[", getLoc(),
getContext()));
5026 const MCExpr *ImmVal;
5027 if (getParser().parseExpression(ImmVal))
5031 return TokError(
"immediate value expected for vector index");
5032 Operands.
push_back(AArch64Operand::CreateImm(
5036 if (parseOptionalMulOperand(Operands))
5041 AArch64Operand::CreateToken(
"]", getLoc(),
getContext()));
5046template <
bool ParseShiftExtend, RegConstra
intEqualityTy EqTy>
5047ParseStatus AArch64AsmParser::tryParseGPROperand(
OperandVector &Operands) {
5048 SMLoc StartLoc = getLoc();
5051 ParseStatus Res = tryParseScalarRegister(RegNum);
5057 Operands.
push_back(AArch64Operand::CreateReg(
5058 RegNum, RegKind::Scalar, StartLoc, getLoc(),
getContext(), EqTy));
5067 Res = tryParseOptionalShiftExtend(ExtOpnd);
5071 auto Ext =
static_cast<AArch64Operand*
>(ExtOpnd.
back().
get());
5072 Operands.
push_back(AArch64Operand::CreateReg(
5073 RegNum, RegKind::Scalar, StartLoc,
Ext->getEndLoc(),
getContext(), EqTy,
5074 Ext->getShiftExtendType(),
Ext->getShiftExtendAmount(),
5075 Ext->hasShiftExtendAmount()));
5080bool AArch64AsmParser::parseOptionalMulOperand(
OperandVector &Operands) {
5081 MCAsmParser &Parser = getParser();
5089 if (!getTok().getString().equals_insensitive(
"mul") ||
5090 !(NextIsVL || NextIsHash))
5094 AArch64Operand::CreateToken(
"mul", getLoc(),
getContext()));
5099 AArch64Operand::CreateToken(
"vl", getLoc(),
getContext()));
5109 const MCExpr *ImmVal;
5112 Operands.
push_back(AArch64Operand::CreateImm(
5119 return Error(getLoc(),
"expected 'vl' or '#<imm>'");
5122bool AArch64AsmParser::parseOptionalVGOperand(
OperandVector &Operands,
5123 StringRef &VecGroup) {
5124 MCAsmParser &Parser = getParser();
5125 auto Tok = Parser.
getTok();
5130 .Case(
"vgx2",
"vgx2")
5131 .Case(
"vgx4",
"vgx4")
5142bool AArch64AsmParser::parseKeywordOperand(
OperandVector &Operands) {
5143 auto Tok = getTok();
5161bool AArch64AsmParser::parseOperand(
OperandVector &Operands,
bool isCondCode,
5162 bool invertCondCode) {
5163 MCAsmParser &Parser = getParser();
5166 MatchOperandParserImpl(Operands, Mnemonic,
true);
5180 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
5182 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
5185 getLexer().UnLex(SavedTok);
5189 switch (getLexer().getKind()) {
5193 if (parseSymbolicImmVal(Expr))
5194 return Error(S,
"invalid operand");
5198 return parseOptionalShiftExtend(getTok());
5202 AArch64Operand::CreateToken(
"[", getLoc(),
getContext()));
5207 return parseOperand(Operands,
false,
false);
5210 if (!parseNeonVectorList(Operands))
5214 AArch64Operand::CreateToken(
"{", getLoc(),
getContext()));
5219 return parseOperand(Operands,
false,
false);
5224 if (!parseOptionalVGOperand(Operands, VecGroup)) {
5226 AArch64Operand::CreateToken(VecGroup, getLoc(),
getContext()));
5231 return parseCondCode(Operands, invertCondCode);
5234 if (!parseRegister(Operands)) {
5236 AsmToken SavedTok = getTok();
5241 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
5245 Res = tryParseOptionalShiftExtend(Operands);
5248 getLexer().UnLex(SavedTok);
5255 if (!parseOptionalMulOperand(Operands))
5260 if (Mnemonic ==
"brb" || Mnemonic ==
"smstart" || Mnemonic ==
"smstop" ||
5262 return parseKeywordOperand(Operands);
5266 const MCExpr *IdVal, *
Term;
5268 if (getParser().parseExpression(IdVal))
5270 if (getParser().parseAtSpecifier(IdVal,
E))
5272 std::optional<MCBinaryExpr::Opcode> Opcode;
5278 if (getParser().parsePrimaryExpr(Term,
E))
5285 return parseOptionalShiftExtend(getTok());
5296 bool isNegative =
false;
5308 const AsmToken &Tok = getTok();
5311 uint64_t
IntVal = RealVal.bitcastToAPInt().getZExtValue();
5312 if (Mnemonic !=
"fcmp" && Mnemonic !=
"fcmpe" && Mnemonic !=
"fcmeq" &&
5313 Mnemonic !=
"fcmge" && Mnemonic !=
"fcmgt" && Mnemonic !=
"fcmle" &&
5314 Mnemonic !=
"fcmlt" && Mnemonic !=
"fcmne")
5315 return TokError(
"unexpected floating point literal");
5316 else if (IntVal != 0 || isNegative)
5317 return TokError(
"expected floating-point constant #0.0");
5325 const MCExpr *ImmVal;
5326 if (parseSymbolicImmVal(ImmVal))
5333 return parseOptionalShiftExtend(Tok);
5336 SMLoc Loc = getLoc();
5337 if (Mnemonic !=
"ldr")
5338 return TokError(
"unexpected token in operand");
5340 const MCExpr *SubExprVal;
5341 if (getParser().parseExpression(SubExprVal))
5344 if (Operands.
size() < 2 ||
5345 !
static_cast<AArch64Operand &
>(*Operands[1]).isScalarReg())
5346 return Error(Loc,
"Only valid when first operand is register");
5349 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5357 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5362 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5363 Operands[0] = AArch64Operand::CreateToken(
"movz", Loc, Ctx);
5364 Operands.
push_back(AArch64Operand::CreateImm(
5368 ShiftAmt,
true, S,
E, Ctx));
5371 APInt Simm = APInt(64, Imm << ShiftAmt);
5374 return Error(Loc,
"Immediate too large for register");
5377 const MCExpr *CPLoc =
5378 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5379 Operands.
push_back(AArch64Operand::CreateImm(CPLoc, S,
E, Ctx));
5385bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5386 const MCExpr *Expr =
nullptr;
5388 if (check(getParser().parseExpression(Expr), L,
"expected expression"))
5391 if (check(!
Value, L,
"expected constant expression"))
5393 Out =
Value->getValue();
5397bool AArch64AsmParser::parseComma() {
5405bool AArch64AsmParser::parseRegisterInRange(
unsigned &Out,
unsigned Base,
5409 if (check(parseRegister(
Reg, Start, End), getLoc(),
"expected register"))
5414 unsigned RangeEnd =
Last;
5415 if (
Base == AArch64::X0) {
5416 if (
Last == AArch64::FP) {
5417 RangeEnd = AArch64::X28;
5418 if (
Reg == AArch64::FP) {
5423 if (
Last == AArch64::LR) {
5424 RangeEnd = AArch64::X28;
5425 if (
Reg == AArch64::FP) {
5428 }
else if (
Reg == AArch64::LR) {
5436 Twine(
"expected register in range ") +
5444bool AArch64AsmParser::areEqualRegs(
const MCParsedAsmOperand &Op1,
5445 const MCParsedAsmOperand &Op2)
const {
5446 auto &AOp1 =
static_cast<const AArch64Operand&
>(Op1);
5447 auto &AOp2 =
static_cast<const AArch64Operand&
>(Op2);
5449 if (AOp1.isVectorList() && AOp2.isVectorList())
5450 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5451 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5452 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5454 if (!AOp1.isReg() || !AOp2.isReg())
5457 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5458 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5461 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5462 "Testing equality of non-scalar registers not supported");
5465 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5467 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5469 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5471 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5478bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &
Info,
5479 StringRef Name, SMLoc NameLoc,
5481 Name = StringSwitch<StringRef>(
Name.lower())
5482 .Case(
"beq",
"b.eq")
5483 .Case(
"bne",
"b.ne")
5484 .Case(
"bhs",
"b.hs")
5485 .Case(
"bcs",
"b.cs")
5486 .Case(
"blo",
"b.lo")
5487 .Case(
"bcc",
"b.cc")
5488 .Case(
"bmi",
"b.mi")
5489 .Case(
"bpl",
"b.pl")
5490 .Case(
"bvs",
"b.vs")
5491 .Case(
"bvc",
"b.vc")
5492 .Case(
"bhi",
"b.hi")
5493 .Case(
"bls",
"b.ls")
5494 .Case(
"bge",
"b.ge")
5495 .Case(
"blt",
"b.lt")
5496 .Case(
"bgt",
"b.gt")
5497 .Case(
"ble",
"b.le")
5498 .Case(
"bal",
"b.al")
5499 .Case(
"bnv",
"b.nv")
5504 getTok().getIdentifier().lower() ==
".req") {
5505 parseDirectiveReq(Name, NameLoc);
5513 StringRef Head =
Name.slice(Start,
Next);
5517 if (Head ==
"ic" || Head ==
"dc" || Head ==
"at" || Head ==
"tlbi" ||
5518 Head ==
"cfp" || Head ==
"dvp" || Head ==
"cpp" || Head ==
"cosp" ||
5519 Head ==
"mlbi" || Head ==
"plbi" || Head ==
"gic" || Head ==
"gsb")
5520 return parseSysAlias(Head, NameLoc, Operands);
5524 return parseSyslAlias(Head, NameLoc, Operands);
5527 if (Head ==
"tlbip")
5528 return parseSyspAlias(Head, NameLoc, Operands);
5537 Head =
Name.slice(Start + 1,
Next);
5541 std::string Suggestion;
5544 std::string Msg =
"invalid condition code";
5545 if (!Suggestion.empty())
5546 Msg +=
", did you mean " + Suggestion +
"?";
5547 return Error(SuffixLoc, Msg);
5552 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc,
getContext()));
5562 Operands.
push_back(AArch64Operand::CreateToken(
5568 bool condCodeFourthOperand =
5569 (Head ==
"ccmp" || Head ==
"ccmn" || Head ==
"fccmp" ||
5570 Head ==
"fccmpe" || Head ==
"fcsel" || Head ==
"csel" ||
5571 Head ==
"csinc" || Head ==
"csinv" || Head ==
"csneg");
5579 bool condCodeSecondOperand = (Head ==
"cset" || Head ==
"csetm");
5580 bool condCodeThirdOperand =
5581 (Head ==
"cinc" || Head ==
"cinv" || Head ==
"cneg");
5589 if (parseOperand(Operands, (
N == 4 && condCodeFourthOperand) ||
5590 (
N == 3 && condCodeThirdOperand) ||
5591 (
N == 2 && condCodeSecondOperand),
5592 condCodeSecondOperand || condCodeThirdOperand)) {
5612 AArch64Operand::CreateToken(
"]", getLoc(),
getContext()));
5615 AArch64Operand::CreateToken(
"!", getLoc(),
getContext()));
5618 AArch64Operand::CreateToken(
"}", getLoc(),
getContext()));
5631 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5632 return (ZReg == ((
Reg - AArch64::B0) + AArch64::Z0)) ||
5633 (ZReg == ((
Reg - AArch64::H0) + AArch64::Z0)) ||
5634 (ZReg == ((
Reg - AArch64::S0) + AArch64::Z0)) ||
5635 (ZReg == ((
Reg - AArch64::D0) + AArch64::Z0)) ||
5636 (ZReg == ((
Reg - AArch64::Q0) + AArch64::Z0)) ||
5637 (ZReg == ((
Reg - AArch64::Z0) + AArch64::Z0));
5643bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5644 SmallVectorImpl<SMLoc> &Loc) {
5645 const MCRegisterInfo *RI =
getContext().getRegisterInfo();
5646 const MCInstrDesc &MCID = MII.get(Inst.
getOpcode());
5652 PrefixInfo
Prefix = NextPrefix;
5653 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.
TSFlags);
5665 return Error(IDLoc,
"instruction is unpredictable when following a"
5666 " movprfx, suggest replacing movprfx with mov");
5670 return Error(Loc[0],
"instruction is unpredictable when following a"
5671 " movprfx writing to a different destination");
5678 return Error(Loc[0],
"instruction is unpredictable when following a"
5679 " movprfx and destination also used as non-destructive"
5683 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5684 if (
Prefix.isPredicated()) {
5698 return Error(IDLoc,
"instruction is unpredictable when following a"
5699 " predicated movprfx, suggest using unpredicated movprfx");
5703 return Error(IDLoc,
"instruction is unpredictable when following a"
5704 " predicated movprfx using a different general predicate");
5708 return Error(IDLoc,
"instruction is unpredictable when following a"
5709 " predicated movprfx with a different element size");
5715 if (IsWindowsArm64EC) {
5721 if ((
Reg == AArch64::W13 ||
Reg == AArch64::X13) ||
5722 (
Reg == AArch64::W14 ||
Reg == AArch64::X14) ||
5723 (
Reg == AArch64::W23 ||
Reg == AArch64::X23) ||
5724 (
Reg == AArch64::W24 ||
Reg == AArch64::X24) ||
5725 (
Reg == AArch64::W28 ||
Reg == AArch64::X28) ||
5726 (
Reg >= AArch64::Q16 &&
Reg <= AArch64::Q31) ||
5727 (
Reg >= AArch64::D16 &&
Reg <= AArch64::D31) ||
5728 (
Reg >= AArch64::S16 &&
Reg <= AArch64::S31) ||
5729 (
Reg >= AArch64::H16 &&
Reg <= AArch64::H31) ||
5730 (
Reg >= AArch64::B16 &&
Reg <= AArch64::B31)) {
5732 " is disallowed on ARM64EC.");
5742 case AArch64::LDPSWpre:
5743 case AArch64::LDPWpost:
5744 case AArch64::LDPWpre:
5745 case AArch64::LDPXpost:
5746 case AArch64::LDPXpre: {
5751 return Error(Loc[0],
"unpredictable LDP instruction, writeback base "
5752 "is also a destination");
5754 return Error(Loc[1],
"unpredictable LDP instruction, writeback base "
5755 "is also a destination");
5758 case AArch64::LDR_ZA:
5759 case AArch64::STR_ZA: {
5762 return Error(Loc[1],
5763 "unpredictable instruction, immediate and offset mismatch.");
5766 case AArch64::LDPDi:
5767 case AArch64::LDPQi:
5768 case AArch64::LDPSi:
5769 case AArch64::LDPSWi:
5770 case AArch64::LDPWi:
5771 case AArch64::LDPXi: {
5775 return Error(Loc[1],
"unpredictable LDP instruction, Rt2==Rt");
5778 case AArch64::LDPDpost:
5779 case AArch64::LDPDpre:
5780 case AArch64::LDPQpost:
5781 case AArch64::LDPQpre:
5782 case AArch64::LDPSpost:
5783 case AArch64::LDPSpre:
5784 case AArch64::LDPSWpost: {
5788 return Error(Loc[1],
"unpredictable LDP instruction, Rt2==Rt");
5791 case AArch64::STPDpost:
5792 case AArch64::STPDpre:
5793 case AArch64::STPQpost:
5794 case AArch64::STPQpre:
5795 case AArch64::STPSpost:
5796 case AArch64::STPSpre:
5797 case AArch64::STPWpost:
5798 case AArch64::STPWpre:
5799 case AArch64::STPXpost:
5800 case AArch64::STPXpre: {
5805 return Error(Loc[0],
"unpredictable STP instruction, writeback base "
5806 "is also a source");
5808 return Error(Loc[1],
"unpredictable STP instruction, writeback base "
5809 "is also a source");
5812 case AArch64::LDRBBpre:
5813 case AArch64::LDRBpre:
5814 case AArch64::LDRHHpre:
5815 case AArch64::LDRHpre:
5816 case AArch64::LDRSBWpre:
5817 case AArch64::LDRSBXpre:
5818 case AArch64::LDRSHWpre:
5819 case AArch64::LDRSHXpre:
5820 case AArch64::LDRSWpre:
5821 case AArch64::LDRWpre:
5822 case AArch64::LDRXpre:
5823 case AArch64::LDRBBpost:
5824 case AArch64::LDRBpost:
5825 case AArch64::LDRHHpost:
5826 case AArch64::LDRHpost:
5827 case AArch64::LDRSBWpost:
5828 case AArch64::LDRSBXpost:
5829 case AArch64::LDRSHWpost:
5830 case AArch64::LDRSHXpost:
5831 case AArch64::LDRSWpost:
5832 case AArch64::LDRWpost:
5833 case AArch64::LDRXpost: {
5837 return Error(Loc[0],
"unpredictable LDR instruction, writeback base "
5838 "is also a source");
5841 case AArch64::STRBBpost:
5842 case AArch64::STRBpost:
5843 case AArch64::STRHHpost:
5844 case AArch64::STRHpost:
5845 case AArch64::STRWpost:
5846 case AArch64::STRXpost:
5847 case AArch64::STRBBpre:
5848 case AArch64::STRBpre:
5849 case AArch64::STRHHpre:
5850 case AArch64::STRHpre:
5851 case AArch64::STRWpre:
5852 case AArch64::STRXpre: {
5856 return Error(Loc[0],
"unpredictable STR instruction, writeback base "
5857 "is also a source");
5860 case AArch64::STXRB:
5861 case AArch64::STXRH:
5862 case AArch64::STXRW:
5863 case AArch64::STXRX:
5864 case AArch64::STLXRB:
5865 case AArch64::STLXRH:
5866 case AArch64::STLXRW:
5867 case AArch64::STLXRX: {
5873 return Error(Loc[0],
5874 "unpredictable STXR instruction, status is also a source");
5877 case AArch64::STXPW:
5878 case AArch64::STXPX:
5879 case AArch64::STLXPW:
5880 case AArch64::STLXPX: {
5887 return Error(Loc[0],
5888 "unpredictable STXP instruction, status is also a source");
5891 case AArch64::LDRABwriteback:
5892 case AArch64::LDRAAwriteback: {
5896 return Error(Loc[0],
5897 "unpredictable LDRA instruction, writeback base"
5898 " is also a destination");
5905 case AArch64::CPYFP:
5906 case AArch64::CPYFPWN:
5907 case AArch64::CPYFPRN:
5908 case AArch64::CPYFPN:
5909 case AArch64::CPYFPWT:
5910 case AArch64::CPYFPWTWN:
5911 case AArch64::CPYFPWTRN:
5912 case AArch64::CPYFPWTN:
5913 case AArch64::CPYFPRT:
5914 case AArch64::CPYFPRTWN:
5915 case AArch64::CPYFPRTRN:
5916 case AArch64::CPYFPRTN:
5917 case AArch64::CPYFPT:
5918 case AArch64::CPYFPTWN:
5919 case AArch64::CPYFPTRN:
5920 case AArch64::CPYFPTN:
5921 case AArch64::CPYFM:
5922 case AArch64::CPYFMWN:
5923 case AArch64::CPYFMRN:
5924 case AArch64::CPYFMN:
5925 case AArch64::CPYFMWT:
5926 case AArch64::CPYFMWTWN:
5927 case AArch64::CPYFMWTRN:
5928 case AArch64::CPYFMWTN:
5929 case AArch64::CPYFMRT:
5930 case AArch64::CPYFMRTWN:
5931 case AArch64::CPYFMRTRN:
5932 case AArch64::CPYFMRTN:
5933 case AArch64::CPYFMT:
5934 case AArch64::CPYFMTWN:
5935 case AArch64::CPYFMTRN:
5936 case AArch64::CPYFMTN:
5937 case AArch64::CPYFE:
5938 case AArch64::CPYFEWN:
5939 case AArch64::CPYFERN:
5940 case AArch64::CPYFEN:
5941 case AArch64::CPYFEWT:
5942 case AArch64::CPYFEWTWN:
5943 case AArch64::CPYFEWTRN:
5944 case AArch64::CPYFEWTN:
5945 case AArch64::CPYFERT:
5946 case AArch64::CPYFERTWN:
5947 case AArch64::CPYFERTRN:
5948 case AArch64::CPYFERTN:
5949 case AArch64::CPYFET:
5950 case AArch64::CPYFETWN:
5951 case AArch64::CPYFETRN:
5952 case AArch64::CPYFETN:
5954 case AArch64::CPYPWN:
5955 case AArch64::CPYPRN:
5956 case AArch64::CPYPN:
5957 case AArch64::CPYPWT:
5958 case AArch64::CPYPWTWN:
5959 case AArch64::CPYPWTRN:
5960 case AArch64::CPYPWTN:
5961 case AArch64::CPYPRT:
5962 case AArch64::CPYPRTWN:
5963 case AArch64::CPYPRTRN:
5964 case AArch64::CPYPRTN:
5965 case AArch64::CPYPT:
5966 case AArch64::CPYPTWN:
5967 case AArch64::CPYPTRN:
5968 case AArch64::CPYPTN:
5970 case AArch64::CPYMWN:
5971 case AArch64::CPYMRN:
5972 case AArch64::CPYMN:
5973 case AArch64::CPYMWT:
5974 case AArch64::CPYMWTWN:
5975 case AArch64::CPYMWTRN:
5976 case AArch64::CPYMWTN:
5977 case AArch64::CPYMRT:
5978 case AArch64::CPYMRTWN:
5979 case AArch64::CPYMRTRN:
5980 case AArch64::CPYMRTN:
5981 case AArch64::CPYMT:
5982 case AArch64::CPYMTWN:
5983 case AArch64::CPYMTRN:
5984 case AArch64::CPYMTN:
5986 case AArch64::CPYEWN:
5987 case AArch64::CPYERN:
5988 case AArch64::CPYEN:
5989 case AArch64::CPYEWT:
5990 case AArch64::CPYEWTWN:
5991 case AArch64::CPYEWTRN:
5992 case AArch64::CPYEWTN:
5993 case AArch64::CPYERT:
5994 case AArch64::CPYERTWN:
5995 case AArch64::CPYERTRN:
5996 case AArch64::CPYERTN:
5997 case AArch64::CPYET:
5998 case AArch64::CPYETWN:
5999 case AArch64::CPYETRN:
6000 case AArch64::CPYETN: {
6011 return Error(Loc[0],
"invalid CPY instruction, destination and source"
6012 " registers are the same");
6014 return Error(Loc[0],
"invalid CPY instruction, destination and size"
6015 " registers are the same");
6017 return Error(Loc[0],
"invalid CPY instruction, source and size"
6018 " registers are the same");
6022 case AArch64::SETPT:
6023 case AArch64::SETPN:
6024 case AArch64::SETPTN:
6026 case AArch64::SETMT:
6027 case AArch64::SETMN:
6028 case AArch64::SETMTN:
6030 case AArch64::SETET:
6031 case AArch64::SETEN:
6032 case AArch64::SETETN:
6033 case AArch64::SETGP:
6034 case AArch64::SETGPT:
6035 case AArch64::SETGPN:
6036 case AArch64::SETGPTN:
6037 case AArch64::SETGM:
6038 case AArch64::SETGMT:
6039 case AArch64::SETGMN:
6040 case AArch64::SETGMTN:
6041 case AArch64::MOPSSETGE:
6042 case AArch64::MOPSSETGET:
6043 case AArch64::MOPSSETGEN:
6044 case AArch64::MOPSSETGETN: {
6054 return Error(Loc[0],
"invalid SET instruction, destination and size"
6055 " registers are the same");
6057 return Error(Loc[0],
"invalid SET instruction, destination and source"
6058 " registers are the same");
6060 return Error(Loc[0],
"invalid SET instruction, source and size"
6061 " registers are the same");
6064 case AArch64::SETGOP:
6065 case AArch64::SETGOPT:
6066 case AArch64::SETGOPN:
6067 case AArch64::SETGOPTN:
6068 case AArch64::SETGOM:
6069 case AArch64::SETGOMT:
6070 case AArch64::SETGOMN:
6071 case AArch64::SETGOMTN:
6072 case AArch64::SETGOE:
6073 case AArch64::SETGOET:
6074 case AArch64::SETGOEN:
6075 case AArch64::SETGOETN: {
6084 return Error(Loc[0],
"invalid SET instruction, destination and size"
6085 " registers are the same");
6094 case AArch64::ADDSWri:
6095 case AArch64::ADDSXri:
6096 case AArch64::ADDWri:
6097 case AArch64::ADDXri:
6098 case AArch64::SUBSWri:
6099 case AArch64::SUBSXri:
6100 case AArch64::SUBWri:
6101 case AArch64::SUBXri: {
6109 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
6134 return Error(Loc.
back(),
"invalid immediate expression");
6147 unsigned VariantID = 0);
6149bool AArch64AsmParser::showMatchError(
SMLoc Loc,
unsigned ErrCode,
6153 case Match_InvalidTiedOperand: {
6154 auto &
Op =
static_cast<const AArch64Operand &
>(*Operands[
ErrorInfo]);
6155 if (
Op.isVectorList())
6156 return Error(
Loc,
"operand must match destination register list");
6158 assert(
Op.isReg() &&
"Unexpected operand type");
6159 switch (
Op.getRegEqualityTy()) {
6160 case RegConstraintEqualityTy::EqualsSubReg:
6161 return Error(
Loc,
"operand must be 64-bit form of destination register");
6162 case RegConstraintEqualityTy::EqualsSuperReg:
6163 return Error(
Loc,
"operand must be 32-bit form of destination register");
6164 case RegConstraintEqualityTy::EqualsReg:
6165 return Error(
Loc,
"operand must match destination register");
6169 case Match_MissingFeature:
6171 "instruction requires a CPU feature not currently enabled");
6172 case Match_InvalidOperand:
6173 return Error(Loc,
"invalid operand for instruction");
6174 case Match_InvalidSuffix:
6175 return Error(Loc,
"invalid type suffix for instruction");
6176 case Match_InvalidCondCode:
6177 return Error(Loc,
"expected AArch64 condition code");
6178 case Match_AddSubRegExtendSmall:
6180 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
6181 case Match_AddSubRegExtendLarge:
6183 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
6184 case Match_AddSubSecondSource:
6186 "expected compatible register, symbol or integer in range [0, 4095]");
6187 case Match_LogicalSecondSource:
6188 return Error(Loc,
"expected compatible register or logical immediate");
6189 case Match_InvalidMovImm32Shift:
6190 return Error(Loc,
"expected 'lsl' with optional integer 0 or 16");
6191 case Match_InvalidMovImm64Shift:
6192 return Error(Loc,
"expected 'lsl' with optional integer 0, 16, 32 or 48");
6193 case Match_AddSubRegShift32:
6195 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
6196 case Match_AddSubRegShift64:
6198 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
6199 case Match_InvalidFPImm:
6201 "expected compatible register or floating-point constant");
6202 case Match_InvalidMemoryIndexedSImm6:
6203 return Error(Loc,
"index must be an integer in range [-32, 31].");
6204 case Match_InvalidMemoryIndexedSImm5:
6205 return Error(Loc,
"index must be an integer in range [-16, 15].");
6206 case Match_InvalidMemoryIndexed1SImm4:
6207 return Error(Loc,
"index must be an integer in range [-8, 7].");
6208 case Match_InvalidMemoryIndexed2SImm4:
6209 return Error(Loc,
"index must be a multiple of 2 in range [-16, 14].");
6210 case Match_InvalidMemoryIndexed3SImm4:
6211 return Error(Loc,
"index must be a multiple of 3 in range [-24, 21].");
6212 case Match_InvalidMemoryIndexed4SImm4:
6213 return Error(Loc,
"index must be a multiple of 4 in range [-32, 28].");
6214 case Match_InvalidMemoryIndexed16SImm4:
6215 return Error(Loc,
"index must be a multiple of 16 in range [-128, 112].");
6216 case Match_InvalidMemoryIndexed32SImm4:
6217 return Error(Loc,
"index must be a multiple of 32 in range [-256, 224].");
6218 case Match_InvalidMemoryIndexed1SImm6:
6219 return Error(Loc,
"index must be an integer in range [-32, 31].");
6220 case Match_InvalidMemoryIndexedSImm8:
6221 return Error(Loc,
"index must be an integer in range [-128, 127].");
6222 case Match_InvalidMemoryIndexedSImm9:
6223 return Error(Loc,
"index must be an integer in range [-256, 255].");
6224 case Match_InvalidMemoryIndexed16SImm9:
6225 return Error(Loc,
"index must be a multiple of 16 in range [-4096, 4080].");
6226 case Match_InvalidMemoryIndexed8SImm10:
6227 return Error(Loc,
"index must be a multiple of 8 in range [-4096, 4088].");
6228 case Match_InvalidMemoryIndexed4SImm7:
6229 return Error(Loc,
"index must be a multiple of 4 in range [-256, 252].");
6230 case Match_InvalidMemoryIndexed8SImm7:
6231 return Error(Loc,
"index must be a multiple of 8 in range [-512, 504].");
6232 case Match_InvalidMemoryIndexed16SImm7:
6233 return Error(Loc,
"index must be a multiple of 16 in range [-1024, 1008].");
6234 case Match_InvalidMemoryIndexed8UImm5:
6235 return Error(Loc,
"index must be a multiple of 8 in range [0, 248].");
6236 case Match_InvalidMemoryIndexed8UImm3:
6237 return Error(Loc,
"index must be a multiple of 8 in range [0, 56].");
6238 case Match_InvalidMemoryIndexed4UImm5:
6239 return Error(Loc,
"index must be a multiple of 4 in range [0, 124].");
6240 case Match_InvalidMemoryIndexed2UImm5:
6241 return Error(Loc,
"index must be a multiple of 2 in range [0, 62].");
6242 case Match_InvalidMemoryIndexed8UImm6:
6243 return Error(Loc,
"index must be a multiple of 8 in range [0, 504].");
6244 case Match_InvalidMemoryIndexed16UImm6:
6245 return Error(Loc,
"index must be a multiple of 16 in range [0, 1008].");
6246 case Match_InvalidMemoryIndexed4UImm6:
6247 return Error(Loc,
"index must be a multiple of 4 in range [0, 252].");
6248 case Match_InvalidMemoryIndexed2UImm6:
6249 return Error(Loc,
"index must be a multiple of 2 in range [0, 126].");
6250 case Match_InvalidMemoryIndexed1UImm6:
6251 return Error(Loc,
"index must be in range [0, 63].");
6252 case Match_InvalidMemoryWExtend8:
6254 "expected 'uxtw' or 'sxtw' with optional shift of #0");
6255 case Match_InvalidMemoryWExtend16:
6257 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
6258 case Match_InvalidMemoryWExtend32:
6260 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
6261 case Match_InvalidMemoryWExtend64:
6263 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
6264 case Match_InvalidMemoryWExtend128:
6266 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6267 case Match_InvalidMemoryXExtend8:
6269 "expected 'lsl' or 'sxtx' with optional shift of #0");
6270 case Match_InvalidMemoryXExtend16:
6272 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6273 case Match_InvalidMemoryXExtend32:
6275 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6276 case Match_InvalidMemoryXExtend64:
6278 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6279 case Match_InvalidMemoryXExtend128:
6281 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6282 case Match_InvalidMemoryIndexed1:
6283 return Error(Loc,
"index must be an integer in range [0, 4095].");
6284 case Match_InvalidMemoryIndexed2:
6285 return Error(Loc,
"index must be a multiple of 2 in range [0, 8190].");
6286 case Match_InvalidMemoryIndexed4:
6287 return Error(Loc,
"index must be a multiple of 4 in range [0, 16380].");
6288 case Match_InvalidMemoryIndexed8:
6289 return Error(Loc,
"index must be a multiple of 8 in range [0, 32760].");
6290 case Match_InvalidMemoryIndexed16:
6291 return Error(Loc,
"index must be a multiple of 16 in range [0, 65520].");
6292 case Match_InvalidImm0_0:
6293 return Error(Loc,
"immediate must be 0.");
6294 case Match_InvalidImm0_1:
6295 return Error(Loc,
"immediate must be an integer in range [0, 1].");
6296 case Match_InvalidImm0_3:
6297 return Error(Loc,
"immediate must be an integer in range [0, 3].");
6298 case Match_InvalidImm0_7:
6299 return Error(Loc,
"immediate must be an integer in range [0, 7].");
6300 case Match_InvalidImm0_15:
6301 return Error(Loc,
"immediate must be an integer in range [0, 15].");
6302 case Match_InvalidImm0_31:
6303 return Error(Loc,
"immediate must be an integer in range [0, 31].");
6304 case Match_InvalidImm0_63:
6305 return Error(Loc,
"immediate must be an integer in range [0, 63].");
6306 case Match_InvalidImm0_127:
6307 return Error(Loc,
"immediate must be an integer in range [0, 127].");
6308 case Match_InvalidImm0_255:
6309 return Error(Loc,
"immediate must be an integer in range [0, 255].");
6310 case Match_InvalidImm0_65535:
6311 return Error(Loc,
"immediate must be an integer in range [0, 65535].");
6312 case Match_InvalidImm1_8:
6313 return Error(Loc,
"immediate must be an integer in range [1, 8].");
6314 case Match_InvalidImm1_16:
6315 return Error(Loc,
"immediate must be an integer in range [1, 16].");
6316 case Match_InvalidImm1_32:
6317 return Error(Loc,
"immediate must be an integer in range [1, 32].");
6318 case Match_InvalidImm1_64:
6319 return Error(Loc,
"immediate must be an integer in range [1, 64].");
6320 case Match_InvalidImmM1_62:
6321 return Error(Loc,
"immediate must be an integer in range [-1, 62].");
6322 case Match_InvalidMemoryIndexedRange2UImm0:
6323 return Error(Loc,
"vector select offset must be the immediate range 0:1.");
6324 case Match_InvalidMemoryIndexedRange2UImm1:
6325 return Error(Loc,
"vector select offset must be an immediate range of the "
6326 "form <immf>:<imml>, where the first "
6327 "immediate is a multiple of 2 in the range [0, 2], and "
6328 "the second immediate is immf + 1.");
6329 case Match_InvalidMemoryIndexedRange2UImm2:
6330 case Match_InvalidMemoryIndexedRange2UImm3:
6333 "vector select offset must be an immediate range of the form "
6335 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6337 "depending on the instruction, and the second immediate is immf + 1.");
6338 case Match_InvalidMemoryIndexedRange4UImm0:
6339 return Error(Loc,
"vector select offset must be the immediate range 0:3.");
6340 case Match_InvalidMemoryIndexedRange4UImm1:
6341 case Match_InvalidMemoryIndexedRange4UImm2:
6344 "vector select offset must be an immediate range of the form "
6346 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6348 "depending on the instruction, and the second immediate is immf + 3.");
6349 case Match_InvalidSVEAddSubImm8:
6350 return Error(Loc,
"immediate must be an integer in range [0, 255]"
6351 " with a shift amount of 0");
6352 case Match_InvalidSVEAddSubImm16:
6353 case Match_InvalidSVEAddSubImm32:
6354 case Match_InvalidSVEAddSubImm64:
6355 return Error(Loc,
"immediate must be an integer in range [0, 255] or a "
6356 "multiple of 256 in range [256, 65280]");
6357 case Match_InvalidSVECpyImm8:
6358 return Error(Loc,
"immediate must be an integer in range [-128, 255]"
6359 " with a shift amount of 0");
6360 case Match_InvalidSVECpyImm16:
6361 return Error(Loc,
"immediate must be an integer in range [-128, 127] or a "
6362 "multiple of 256 in range [-32768, 65280]");
6363 case Match_InvalidSVECpyImm32:
6364 case Match_InvalidSVECpyImm64:
6365 return Error(Loc,
"immediate must be an integer in range [-128, 127] or a "
6366 "multiple of 256 in range [-32768, 32512]");
6367 case Match_InvalidIndexRange0_0:
6368 return Error(Loc,
"expected lane specifier '[0]'");
6369 case Match_InvalidIndexRange1_1:
6370 return Error(Loc,
"expected lane specifier '[1]'");
6371 case Match_InvalidIndexRange0_15:
6372 return Error(Loc,
"vector lane must be an integer in range [0, 15].");
6373 case Match_InvalidIndexRange0_7:
6374 return Error(Loc,
"vector lane must be an integer in range [0, 7].");
6375 case Match_InvalidIndexRange0_3:
6376 return Error(Loc,
"vector lane must be an integer in range [0, 3].");
6377 case Match_InvalidIndexRange0_1:
6378 return Error(Loc,
"vector lane must be an integer in range [0, 1].");
6379 case Match_InvalidSVEIndexRange0_63:
6380 return Error(Loc,
"vector lane must be an integer in range [0, 63].");
6381 case Match_InvalidSVEIndexRange0_31:
6382 return Error(Loc,
"vector lane must be an integer in range [0, 31].");
6383 case Match_InvalidSVEIndexRange0_15:
6384 return Error(Loc,
"vector lane must be an integer in range [0, 15].");
6385 case Match_InvalidSVEIndexRange0_7:
6386 return Error(Loc,
"vector lane must be an integer in range [0, 7].");
6387 case Match_InvalidSVEIndexRange0_3:
6388 return Error(Loc,
"vector lane must be an integer in range [0, 3].");
6389 case Match_InvalidLabel:
6390 return Error(Loc,
"expected label or encodable integer pc offset");
6392 return Error(Loc,
"expected readable system register");
6394 case Match_InvalidSVCR:
6395 return Error(Loc,
"expected writable system register or pstate");
6396 case Match_InvalidComplexRotationEven:
6397 return Error(Loc,
"complex rotation must be 0, 90, 180 or 270.");
6398 case Match_InvalidComplexRotationOdd:
6399 return Error(Loc,
"complex rotation must be 90 or 270.");
6400 case Match_MnemonicFail: {
6402 ((AArch64Operand &)*Operands[0]).
getToken(),
6403 ComputeAvailableFeatures(STI->getFeatureBits()));
6404 return Error(Loc,
"unrecognized instruction mnemonic" + Suggestion);
6406 case Match_InvalidGPR64shifted8:
6407 return Error(Loc,
"register must be x0..x30 or xzr, without shift");
6408 case Match_InvalidGPR64shifted16:
6409 return Error(Loc,
"register must be x0..x30 or xzr, with required shift 'lsl #1'");
6410 case Match_InvalidGPR64shifted32:
6411 return Error(Loc,
"register must be x0..x30 or xzr, with required shift 'lsl #2'");
6412 case Match_InvalidGPR64shifted64:
6413 return Error(Loc,
"register must be x0..x30 or xzr, with required shift 'lsl #3'");
6414 case Match_InvalidGPR64shifted128:
6416 Loc,
"register must be x0..x30 or xzr, with required shift 'lsl #4'");
6417 case Match_InvalidGPR64NoXZRshifted8:
6418 return Error(Loc,
"register must be x0..x30 without shift");
6419 case Match_InvalidGPR64NoXZRshifted16:
6420 return Error(Loc,
"register must be x0..x30 with required shift 'lsl #1'");
6421 case Match_InvalidGPR64NoXZRshifted32:
6422 return Error(Loc,
"register must be x0..x30 with required shift 'lsl #2'");
6423 case Match_InvalidGPR64NoXZRshifted64:
6424 return Error(Loc,
"register must be x0..x30 with required shift 'lsl #3'");
6425 case Match_InvalidGPR64NoXZRshifted128:
6426 return Error(Loc,
"register must be x0..x30 with required shift 'lsl #4'");
6427 case Match_InvalidZPR32UXTW8:
6428 case Match_InvalidZPR32SXTW8:
6429 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6430 case Match_InvalidZPR32UXTW16:
6431 case Match_InvalidZPR32SXTW16:
6432 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6433 case Match_InvalidZPR32UXTW32:
6434 case Match_InvalidZPR32SXTW32:
6435 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6436 case Match_InvalidZPR32UXTW64:
6437 case Match_InvalidZPR32SXTW64:
6438 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6439 case Match_InvalidZPR64UXTW8:
6440 case Match_InvalidZPR64SXTW8:
6441 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6442 case Match_InvalidZPR64UXTW16:
6443 case Match_InvalidZPR64SXTW16:
6444 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6445 case Match_InvalidZPR64UXTW32:
6446 case Match_InvalidZPR64SXTW32:
6447 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6448 case Match_InvalidZPR64UXTW64:
6449 case Match_InvalidZPR64SXTW64:
6450 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6451 case Match_InvalidZPR32LSL8:
6452 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s'");
6453 case Match_InvalidZPR32LSL16:
6454 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6455 case Match_InvalidZPR32LSL32:
6456 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6457 case Match_InvalidZPR32LSL64:
6458 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6459 case Match_InvalidZPR64LSL8:
6460 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d'");
6461 case Match_InvalidZPR64LSL16:
6462 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6463 case Match_InvalidZPR64LSL32:
6464 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6465 case Match_InvalidZPR64LSL64:
6466 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6467 case Match_InvalidZPR0:
6468 return Error(Loc,
"expected register without element width suffix");
6469 case Match_InvalidZPR8:
6470 case Match_InvalidZPR16:
6471 case Match_InvalidZPR32:
6472 case Match_InvalidZPR64:
6473 case Match_InvalidZPR128:
6474 return Error(Loc,
"invalid element width");
6475 case Match_InvalidZPR_3b8:
6476 return Error(Loc,
"Invalid restricted vector register, expected z0.b..z7.b");
6477 case Match_InvalidZPR_3b16:
6478 return Error(Loc,
"Invalid restricted vector register, expected z0.h..z7.h");
6479 case Match_InvalidZPR_3b32:
6480 return Error(Loc,
"Invalid restricted vector register, expected z0.s..z7.s");
6481 case Match_InvalidZPR_4b8:
6483 "Invalid restricted vector register, expected z0.b..z15.b");
6484 case Match_InvalidZPR_4b16:
6485 return Error(Loc,
"Invalid restricted vector register, expected z0.h..z15.h");
6486 case Match_InvalidZPR_4b32:
6487 return Error(Loc,
"Invalid restricted vector register, expected z0.s..z15.s");
6488 case Match_InvalidZPR_4b64:
6489 return Error(Loc,
"Invalid restricted vector register, expected z0.d..z15.d");
6490 case Match_InvalidZPRMul2_Lo8:
6491 return Error(Loc,
"Invalid restricted vector register, expected even "
6492 "register in z0.b..z14.b");
6493 case Match_InvalidZPRMul2_Hi8:
6494 return Error(Loc,
"Invalid restricted vector register, expected even "
6495 "register in z16.b..z30.b");
6496 case Match_InvalidZPRMul2_Lo16:
6497 return Error(Loc,
"Invalid restricted vector register, expected even "
6498 "register in z0.h..z14.h");
6499 case Match_InvalidZPRMul2_Hi16:
6500 return Error(Loc,
"Invalid restricted vector register, expected even "
6501 "register in z16.h..z30.h");
6502 case Match_InvalidZPRMul2_Lo32:
6503 return Error(Loc,
"Invalid restricted vector register, expected even "
6504 "register in z0.s..z14.s");
6505 case Match_InvalidZPRMul2_Hi32:
6506 return Error(Loc,
"Invalid restricted vector register, expected even "
6507 "register in z16.s..z30.s");
6508 case Match_InvalidZPRMul2_Lo64:
6509 return Error(Loc,
"Invalid restricted vector register, expected even "
6510 "register in z0.d..z14.d");
6511 case Match_InvalidZPRMul2_Hi64:
6512 return Error(Loc,
"Invalid restricted vector register, expected even "
6513 "register in z16.d..z30.d");
6514 case Match_InvalidZPR_K0:
6515 return Error(Loc,
"invalid restricted vector register, expected register "
6516 "in z20..z23 or z28..z31");
6517 case Match_InvalidSVEPattern:
6518 return Error(Loc,
"invalid predicate pattern");
6519 case Match_InvalidSVEPPRorPNRAnyReg:
6520 case Match_InvalidSVEPPRorPNRBReg:
6521 case Match_InvalidSVEPredicateAnyReg:
6522 case Match_InvalidSVEPredicateBReg:
6523 case Match_InvalidSVEPredicateHReg:
6524 case Match_InvalidSVEPredicateSReg:
6525 case Match_InvalidSVEPredicateDReg:
6526 return Error(Loc,
"invalid predicate register.");
6527 case Match_InvalidSVEPredicate3bAnyReg:
6528 return Error(Loc,
"invalid restricted predicate register, expected p0..p7 (without element suffix)");
6529 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6530 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6531 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6532 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6533 return Error(Loc,
"Invalid predicate register, expected PN in range "
6534 "pn8..pn15 with element suffix.");
6535 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6536 return Error(Loc,
"invalid restricted predicate-as-counter register "
6537 "expected pn8..pn15");
6538 case Match_InvalidSVEPNPredicateBReg:
6539 case Match_InvalidSVEPNPredicateHReg:
6540 case Match_InvalidSVEPNPredicateSReg:
6541 case Match_InvalidSVEPNPredicateDReg:
6542 return Error(Loc,
"Invalid predicate register, expected PN in range "
6543 "pn0..pn15 with element suffix.");
6544 case Match_InvalidSVEVecLenSpecifier:
6545 return Error(Loc,
"Invalid vector length specifier, expected VLx2 or VLx4");
6546 case Match_InvalidSVEPredicateListMul2x8:
6547 case Match_InvalidSVEPredicateListMul2x16:
6548 case Match_InvalidSVEPredicateListMul2x32:
6549 case Match_InvalidSVEPredicateListMul2x64:
6550 return Error(Loc,
"Invalid vector list, expected list with 2 consecutive "
6551 "predicate registers, where the first vector is a multiple of 2 "
6552 "and with correct element type");
6553 case Match_InvalidSVEExactFPImmOperandHalfOne:
6554 return Error(Loc,
"Invalid floating point constant, expected 0.5 or 1.0.");
6555 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6556 return Error(Loc,
"Invalid floating point constant, expected 0.5 or 2.0.");
6557 case Match_InvalidSVEExactFPImmOperandZeroOne:
6558 return Error(Loc,
"Invalid floating point constant, expected 0.0 or 1.0.");
6559 case Match_InvalidMatrixTileVectorH8:
6560 case Match_InvalidMatrixTileVectorV8:
6561 return Error(Loc,
"invalid matrix operand, expected za0h.b or za0v.b");
6562 case Match_InvalidMatrixTileVectorH16:
6563 case Match_InvalidMatrixTileVectorV16:
6565 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6566 case Match_InvalidMatrixTileVectorH32:
6567 case Match_InvalidMatrixTileVectorV32:
6569 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6570 case Match_InvalidMatrixTileVectorH64:
6571 case Match_InvalidMatrixTileVectorV64:
6573 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6574 case Match_InvalidMatrixTileVectorH128:
6575 case Match_InvalidMatrixTileVectorV128:
6577 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6578 case Match_InvalidMatrixTile16:
6579 return Error(Loc,
"invalid matrix operand, expected za[0-1].h");
6580 case Match_InvalidMatrixTile32:
6581 return Error(Loc,
"invalid matrix operand, expected za[0-3].s");
6582 case Match_InvalidMatrixTile64:
6583 return Error(Loc,
"invalid matrix operand, expected za[0-7].d");
6584 case Match_InvalidMatrix:
6585 return Error(Loc,
"invalid matrix operand, expected za");
6586 case Match_InvalidMatrix8:
6587 return Error(Loc,
"invalid matrix operand, expected suffix .b");
6588 case Match_InvalidMatrix16:
6589 return Error(Loc,
"invalid matrix operand, expected suffix .h");
6590 case Match_InvalidMatrix32:
6591 return Error(Loc,
"invalid matrix operand, expected suffix .s");
6592 case Match_InvalidMatrix64:
6593 return Error(Loc,
"invalid matrix operand, expected suffix .d");
6594 case Match_InvalidMatrixIndexGPR32_12_15:
6595 return Error(Loc,
"operand must be a register in range [w12, w15]");
6596 case Match_InvalidMatrixIndexGPR32_8_11:
6597 return Error(Loc,
"operand must be a register in range [w8, w11]");
6598 case Match_InvalidSVEVectorList2x8Mul2:
6599 case Match_InvalidSVEVectorList2x16Mul2:
6600 case Match_InvalidSVEVectorList2x32Mul2:
6601 case Match_InvalidSVEVectorList2x64Mul2:
6602 case Match_InvalidSVEVectorList2x128Mul2:
6603 return Error(Loc,
"Invalid vector list, expected list with 2 consecutive "
6604 "SVE vectors, where the first vector is a multiple of 2 "
6605 "and with matching element types");
6606 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6607 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6608 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6609 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6610 return Error(Loc,
"Invalid vector list, expected list with 2 consecutive "
6611 "SVE vectors in the range z0-z14, where the first vector "
6612 "is a multiple of 2 "
6613 "and with matching element types");
6614 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6615 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6616 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6617 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6619 "Invalid vector list, expected list with 2 consecutive "
6620 "SVE vectors in the range z16-z30, where the first vector "
6621 "is a multiple of 2 "
6622 "and with matching element types");
6623 case Match_InvalidSVEVectorList4x8Mul4:
6624 case Match_InvalidSVEVectorList4x16Mul4:
6625 case Match_InvalidSVEVectorList4x32Mul4:
6626 case Match_InvalidSVEVectorList4x64Mul4:
6627 case Match_InvalidSVEVectorList4x128Mul4:
6628 return Error(Loc,
"Invalid vector list, expected list with 4 consecutive "
6629 "SVE vectors, where the first vector is a multiple of 4 "
6630 "and with matching element types");
6631 case Match_InvalidLookupTable:
6632 return Error(Loc,
"Invalid lookup table, expected zt0");
6633 case Match_InvalidSVEVectorListStrided2x8:
6634 case Match_InvalidSVEVectorListStrided2x16:
6635 case Match_InvalidSVEVectorListStrided2x32:
6636 case Match_InvalidSVEVectorListStrided2x64:
6639 "Invalid vector list, expected list with each SVE vector in the list "
6640 "8 registers apart, and the first register in the range [z0, z7] or "
6641 "[z16, z23] and with correct element type");
6642 case Match_InvalidSVEVectorListStrided4x8:
6643 case Match_InvalidSVEVectorListStrided4x16:
6644 case Match_InvalidSVEVectorListStrided4x32:
6645 case Match_InvalidSVEVectorListStrided4x64:
6648 "Invalid vector list, expected list with each SVE vector in the list "
6649 "4 registers apart, and the first register in the range [z0, z3] or "
6650 "[z16, z19] and with correct element type");
6651 case Match_AddSubLSLImm3ShiftLarge:
6653 "expected 'lsl' with optional integer in range [0, 7]");
6661bool AArch64AsmParser::matchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
6665 bool MatchingInlineAsm) {
6666 assert(!Operands.
empty() &&
"Unexpected empty operand list!");
6667 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[0]);
6668 assert(
Op.isToken() &&
"Leading operand should always be a mnemonic!");
6671 unsigned NumOperands = Operands.
size();
6673 if (NumOperands == 4 && Tok ==
"lsl") {
6674 AArch64Operand &Op2 =
static_cast<AArch64Operand &
>(*Operands[2]);
6675 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
6676 if (Op2.isScalarReg() && Op3.isImm()) {
6682 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].
contains(
6684 NewOp3Val = (32 - Op3Val) & 0x1f;
6685 NewOp4Val = 31 - Op3Val;
6687 NewOp3Val = (64 - Op3Val) & 0x3f;
6688 NewOp4Val = 63 - Op3Val;
6695 AArch64Operand::CreateToken(
"ubfm",
Op.getStartLoc(),
getContext());
6696 Operands.
push_back(AArch64Operand::CreateImm(
6697 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(),
getContext()));
6698 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6702 }
else if (NumOperands == 4 && Tok ==
"bfc") {
6704 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
6705 AArch64Operand LSBOp =
static_cast<AArch64Operand &
>(*Operands[2]);
6706 AArch64Operand WidthOp =
static_cast<AArch64Operand &
>(*Operands[3]);
6708 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6712 if (LSBCE && WidthCE) {
6714 uint64_t Width = WidthCE->
getValue();
6716 uint64_t RegWidth = 0;
6717 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].
contains(
6723 if (LSB >= RegWidth)
6724 return Error(LSBOp.getStartLoc(),
6725 "expected integer in range [0, 31]");
6726 if (Width < 1 || Width > RegWidth)
6727 return Error(WidthOp.getStartLoc(),
6728 "expected integer in range [1, 32]");
6732 ImmR = (32 - LSB) & 0x1f;
6734 ImmR = (64 - LSB) & 0x3f;
6736 uint64_t ImmS = Width - 1;
6738 if (ImmR != 0 && ImmS >= ImmR)
6739 return Error(WidthOp.getStartLoc(),
6740 "requested insert overflows register");
6745 AArch64Operand::CreateToken(
"bfm",
Op.getStartLoc(),
getContext());
6746 Operands[2] = AArch64Operand::CreateReg(
6747 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6749 Operands[3] = AArch64Operand::CreateImm(
6750 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(),
getContext());
6752 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6756 }
else if (NumOperands == 5) {
6759 if (Tok ==
"bfi" || Tok ==
"sbfiz" || Tok ==
"ubfiz") {
6760 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
6761 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
6762 AArch64Operand &Op4 =
static_cast<AArch64Operand &
>(*Operands[4]);
6764 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6768 if (Op3CE && Op4CE) {
6769 uint64_t Op3Val = Op3CE->
getValue();
6770 uint64_t Op4Val = Op4CE->
getValue();
6772 uint64_t RegWidth = 0;
6773 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].
contains(
6779 if (Op3Val >= RegWidth)
6780 return Error(Op3.getStartLoc(),
6781 "expected integer in range [0, 31]");
6782 if (Op4Val < 1 || Op4Val > RegWidth)
6783 return Error(Op4.getStartLoc(),
6784 "expected integer in range [1, 32]");
6786 uint64_t NewOp3Val = 0;
6788 NewOp3Val = (32 - Op3Val) & 0x1f;
6790 NewOp3Val = (64 - Op3Val) & 0x3f;
6792 uint64_t NewOp4Val = Op4Val - 1;
6794 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6795 return Error(Op4.getStartLoc(),
6796 "requested insert overflows register");
6798 const MCExpr *NewOp3 =
6800 const MCExpr *NewOp4 =
6802 Operands[3] = AArch64Operand::CreateImm(
6803 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(),
getContext());
6804 Operands[4] = AArch64Operand::CreateImm(
6805 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(),
getContext());
6807 Operands[0] = AArch64Operand::CreateToken(
"bfm",
Op.getStartLoc(),
6809 else if (Tok ==
"sbfiz")
6810 Operands[0] = AArch64Operand::CreateToken(
"sbfm",
Op.getStartLoc(),
6812 else if (Tok ==
"ubfiz")
6813 Operands[0] = AArch64Operand::CreateToken(
"ubfm",
Op.getStartLoc(),
6822 }
else if (NumOperands == 5 &&
6823 (Tok ==
"bfxil" || Tok ==
"sbfx" || Tok ==
"ubfx")) {
6824 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
6825 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
6826 AArch64Operand &Op4 =
static_cast<AArch64Operand &
>(*Operands[4]);
6828 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6832 if (Op3CE && Op4CE) {
6833 uint64_t Op3Val = Op3CE->
getValue();
6834 uint64_t Op4Val = Op4CE->
getValue();
6836 uint64_t RegWidth = 0;
6837 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].
contains(
6843 if (Op3Val >= RegWidth)
6844 return Error(Op3.getStartLoc(),
6845 "expected integer in range [0, 31]");
6846 if (Op4Val < 1 || Op4Val > RegWidth)
6847 return Error(Op4.getStartLoc(),
6848 "expected integer in range [1, 32]");
6850 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6852 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6853 return Error(Op4.getStartLoc(),
6854 "requested extract overflows register");
6856 const MCExpr *NewOp4 =
6858 Operands[4] = AArch64Operand::CreateImm(
6859 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(),
getContext());
6861 Operands[0] = AArch64Operand::CreateToken(
"bfm",
Op.getStartLoc(),
6863 else if (Tok ==
"sbfx")
6864 Operands[0] = AArch64Operand::CreateToken(
"sbfm",
Op.getStartLoc(),
6866 else if (Tok ==
"ubfx")
6867 Operands[0] = AArch64Operand::CreateToken(
"ubfm",
Op.getStartLoc(),
6880 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6881 NumOperands == 4 && Tok ==
"movi") {
6882 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
6883 AArch64Operand &Op2 =
static_cast<AArch64Operand &
>(*Operands[2]);
6884 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
6885 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6886 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6887 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6888 if (Suffix.
lower() ==
".2d" &&
6890 Warning(IDLoc,
"instruction movi.2d with immediate #0 may not function"
6891 " correctly on this CPU, converting to equivalent movi.16b");
6893 unsigned Idx = Op1.isToken() ? 1 : 2;
6895 AArch64Operand::CreateToken(
".16b", IDLoc,
getContext());
6903 if (NumOperands == 3 && (Tok ==
"sxtw" || Tok ==
"uxtw")) {
6906 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[2]);
6907 if (
Op.isScalarReg()) {
6909 Operands[2] = AArch64Operand::CreateReg(
Reg, RegKind::Scalar,
6910 Op.getStartLoc(),
Op.getEndLoc(),
6915 else if (NumOperands == 3 && (Tok ==
"sxtb" || Tok ==
"sxth")) {
6916 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[1]);
6917 if (
Op.isScalarReg() &&
6918 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6922 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[2]);
6923 if (
Op.isScalarReg()) {
6925 Operands[2] = AArch64Operand::CreateReg(
Reg, RegKind::Scalar,
6932 else if (NumOperands == 3 && (Tok ==
"uxtb" || Tok ==
"uxth")) {
6933 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[1]);
6934 if (
Op.isScalarReg() &&
6935 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6939 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[1]);
6940 if (
Op.isScalarReg()) {
6942 Operands[1] = AArch64Operand::CreateReg(
Reg, RegKind::Scalar,
6950 FeatureBitset MissingFeatures;
6953 unsigned MatchResult =
6954 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6955 MatchingInlineAsm, 1);
6959 if (MatchResult != Match_Success) {
6962 auto ShortFormNEONErrorInfo = ErrorInfo;
6963 auto ShortFormNEONMatchResult = MatchResult;
6964 auto ShortFormNEONMissingFeatures = MissingFeatures;
6967 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6968 MatchingInlineAsm, 0);
6973 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6974 Operands.
size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6975 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6976 MatchResult = ShortFormNEONMatchResult;
6977 ErrorInfo = ShortFormNEONErrorInfo;
6978 MissingFeatures = ShortFormNEONMissingFeatures;
6982 switch (MatchResult) {
6983 case Match_Success: {
6986 NumOperands = Operands.
size();
6987 for (
unsigned i = 1; i < NumOperands; ++i)
6988 OperandLocs.
push_back(Operands[i]->getStartLoc());
6989 if (validateInstruction(Inst, IDLoc, OperandLocs))
6996 case Match_MissingFeature: {
6997 assert(MissingFeatures.
any() &&
"Unknown missing feature!");
7000 std::string Msg =
"instruction requires:";
7001 for (
unsigned i = 0, e = MissingFeatures.
size(); i != e; ++i) {
7002 if (MissingFeatures[i]) {
7007 return Error(IDLoc, Msg);
7009 case Match_MnemonicFail:
7010 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
7011 case Match_InvalidOperand: {
7012 SMLoc ErrorLoc = IDLoc;
7014 if (ErrorInfo != ~0ULL) {
7015 if (ErrorInfo >= Operands.
size())
7016 return Error(IDLoc,
"too few operands for instruction",
7017 SMRange(IDLoc, getTok().getLoc()));
7019 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7020 if (ErrorLoc == SMLoc())
7025 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
7026 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
7027 MatchResult = Match_InvalidSuffix;
7029 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7031 case Match_InvalidTiedOperand:
7032 case Match_InvalidMemoryIndexed1:
7033 case Match_InvalidMemoryIndexed2:
7034 case Match_InvalidMemoryIndexed4:
7035 case Match_InvalidMemoryIndexed8:
7036 case Match_InvalidMemoryIndexed16:
7037 case Match_InvalidCondCode:
7038 case Match_AddSubLSLImm3ShiftLarge:
7039 case Match_AddSubRegExtendSmall:
7040 case Match_AddSubRegExtendLarge:
7041 case Match_AddSubSecondSource:
7042 case Match_LogicalSecondSource:
7043 case Match_AddSubRegShift32:
7044 case Match_AddSubRegShift64:
7045 case Match_InvalidMovImm32Shift:
7046 case Match_InvalidMovImm64Shift:
7047 case Match_InvalidFPImm:
7048 case Match_InvalidMemoryWExtend8:
7049 case Match_InvalidMemoryWExtend16:
7050 case Match_InvalidMemoryWExtend32:
7051 case Match_InvalidMemoryWExtend64:
7052 case Match_InvalidMemoryWExtend128:
7053 case Match_InvalidMemoryXExtend8:
7054 case Match_InvalidMemoryXExtend16:
7055 case Match_InvalidMemoryXExtend32:
7056 case Match_InvalidMemoryXExtend64:
7057 case Match_InvalidMemoryXExtend128:
7058 case Match_InvalidMemoryIndexed1SImm4:
7059 case Match_InvalidMemoryIndexed2SImm4:
7060 case Match_InvalidMemoryIndexed3SImm4:
7061 case Match_InvalidMemoryIndexed4SImm4:
7062 case Match_InvalidMemoryIndexed1SImm6:
7063 case Match_InvalidMemoryIndexed16SImm4:
7064 case Match_InvalidMemoryIndexed32SImm4:
7065 case Match_InvalidMemoryIndexed4SImm7:
7066 case Match_InvalidMemoryIndexed8SImm7:
7067 case Match_InvalidMemoryIndexed16SImm7:
7068 case Match_InvalidMemoryIndexed8UImm5:
7069 case Match_InvalidMemoryIndexed8UImm3:
7070 case Match_InvalidMemoryIndexed4UImm5:
7071 case Match_InvalidMemoryIndexed2UImm5:
7072 case Match_InvalidMemoryIndexed1UImm6:
7073 case Match_InvalidMemoryIndexed2UImm6:
7074 case Match_InvalidMemoryIndexed4UImm6:
7075 case Match_InvalidMemoryIndexed8UImm6:
7076 case Match_InvalidMemoryIndexed16UImm6:
7077 case Match_InvalidMemoryIndexedSImm6:
7078 case Match_InvalidMemoryIndexedSImm5:
7079 case Match_InvalidMemoryIndexedSImm8:
7080 case Match_InvalidMemoryIndexedSImm9:
7081 case Match_InvalidMemoryIndexed16SImm9:
7082 case Match_InvalidMemoryIndexed8SImm10:
7083 case Match_InvalidImm0_0:
7084 case Match_InvalidImm0_1:
7085 case Match_InvalidImm0_3:
7086 case Match_InvalidImm0_7:
7087 case Match_InvalidImm0_15:
7088 case Match_InvalidImm0_31:
7089 case Match_InvalidImm0_63:
7090 case Match_InvalidImm0_127:
7091 case Match_InvalidImm0_255:
7092 case Match_InvalidImm0_65535:
7093 case Match_InvalidImm1_8:
7094 case Match_InvalidImm1_16:
7095 case Match_InvalidImm1_32:
7096 case Match_InvalidImm1_64:
7097 case Match_InvalidImmM1_62:
7098 case Match_InvalidMemoryIndexedRange2UImm0:
7099 case Match_InvalidMemoryIndexedRange2UImm1:
7100 case Match_InvalidMemoryIndexedRange2UImm2:
7101 case Match_InvalidMemoryIndexedRange2UImm3:
7102 case Match_InvalidMemoryIndexedRange4UImm0:
7103 case Match_InvalidMemoryIndexedRange4UImm1:
7104 case Match_InvalidMemoryIndexedRange4UImm2:
7105 case Match_InvalidSVEAddSubImm8:
7106 case Match_InvalidSVEAddSubImm16:
7107 case Match_InvalidSVEAddSubImm32:
7108 case Match_InvalidSVEAddSubImm64:
7109 case Match_InvalidSVECpyImm8:
7110 case Match_InvalidSVECpyImm16:
7111 case Match_InvalidSVECpyImm32:
7112 case Match_InvalidSVECpyImm64:
7113 case Match_InvalidIndexRange0_0:
7114 case Match_InvalidIndexRange1_1:
7115 case Match_InvalidIndexRange0_15:
7116 case Match_InvalidIndexRange0_7:
7117 case Match_InvalidIndexRange0_3:
7118 case Match_InvalidIndexRange0_1:
7119 case Match_InvalidSVEIndexRange0_63:
7120 case Match_InvalidSVEIndexRange0_31:
7121 case Match_InvalidSVEIndexRange0_15:
7122 case Match_InvalidSVEIndexRange0_7:
7123 case Match_InvalidSVEIndexRange0_3:
7124 case Match_InvalidLabel:
7125 case Match_InvalidComplexRotationEven:
7126 case Match_InvalidComplexRotationOdd:
7127 case Match_InvalidGPR64shifted8:
7128 case Match_InvalidGPR64shifted16:
7129 case Match_InvalidGPR64shifted32:
7130 case Match_InvalidGPR64shifted64:
7131 case Match_InvalidGPR64shifted128:
7132 case Match_InvalidGPR64NoXZRshifted8:
7133 case Match_InvalidGPR64NoXZRshifted16:
7134 case Match_InvalidGPR64NoXZRshifted32:
7135 case Match_InvalidGPR64NoXZRshifted64:
7136 case Match_InvalidGPR64NoXZRshifted128:
7137 case Match_InvalidZPR32UXTW8:
7138 case Match_InvalidZPR32UXTW16:
7139 case Match_InvalidZPR32UXTW32:
7140 case Match_InvalidZPR32UXTW64:
7141 case Match_InvalidZPR32SXTW8:
7142 case Match_InvalidZPR32SXTW16:
7143 case Match_InvalidZPR32SXTW32:
7144 case Match_InvalidZPR32SXTW64:
7145 case Match_InvalidZPR64UXTW8:
7146 case Match_InvalidZPR64SXTW8:
7147 case Match_InvalidZPR64UXTW16:
7148 case Match_InvalidZPR64SXTW16:
7149 case Match_InvalidZPR64UXTW32:
7150 case Match_InvalidZPR64SXTW32:
7151 case Match_InvalidZPR64UXTW64:
7152 case Match_InvalidZPR64SXTW64:
7153 case Match_InvalidZPR32LSL8:
7154 case Match_InvalidZPR32LSL16:
7155 case Match_InvalidZPR32LSL32:
7156 case Match_InvalidZPR32LSL64:
7157 case Match_InvalidZPR64LSL8:
7158 case Match_InvalidZPR64LSL16:
7159 case Match_InvalidZPR64LSL32:
7160 case Match_InvalidZPR64LSL64:
7161 case Match_InvalidZPR0:
7162 case Match_InvalidZPR8:
7163 case Match_InvalidZPR16:
7164 case Match_InvalidZPR32:
7165 case Match_InvalidZPR64:
7166 case Match_InvalidZPR128:
7167 case Match_InvalidZPR_3b8:
7168 case Match_InvalidZPR_3b16:
7169 case Match_InvalidZPR_3b32:
7170 case Match_InvalidZPR_4b8:
7171 case Match_InvalidZPR_4b16:
7172 case Match_InvalidZPR_4b32:
7173 case Match_InvalidZPR_4b64:
7174 case Match_InvalidSVEPPRorPNRAnyReg:
7175 case Match_InvalidSVEPPRorPNRBReg:
7176 case Match_InvalidSVEPredicateAnyReg:
7177 case Match_InvalidSVEPattern:
7178 case Match_InvalidSVEVecLenSpecifier:
7179 case Match_InvalidSVEPredicateBReg:
7180 case Match_InvalidSVEPredicateHReg:
7181 case Match_InvalidSVEPredicateSReg:
7182 case Match_InvalidSVEPredicateDReg:
7183 case Match_InvalidSVEPredicate3bAnyReg:
7184 case Match_InvalidSVEPNPredicateB_p8to15Reg:
7185 case Match_InvalidSVEPNPredicateH_p8to15Reg:
7186 case Match_InvalidSVEPNPredicateS_p8to15Reg:
7187 case Match_InvalidSVEPNPredicateD_p8to15Reg:
7188 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
7189 case Match_InvalidSVEPNPredicateBReg:
7190 case Match_InvalidSVEPNPredicateHReg:
7191 case Match_InvalidSVEPNPredicateSReg:
7192 case Match_InvalidSVEPNPredicateDReg:
7193 case Match_InvalidSVEPredicateListMul2x8:
7194 case Match_InvalidSVEPredicateListMul2x16:
7195 case Match_InvalidSVEPredicateListMul2x32:
7196 case Match_InvalidSVEPredicateListMul2x64:
7197 case Match_InvalidSVEExactFPImmOperandHalfOne:
7198 case Match_InvalidSVEExactFPImmOperandHalfTwo:
7199 case Match_InvalidSVEExactFPImmOperandZeroOne:
7200 case Match_InvalidMatrixTile16:
7201 case Match_InvalidMatrixTile32:
7202 case Match_InvalidMatrixTile64:
7203 case Match_InvalidMatrix:
7204 case Match_InvalidMatrix8:
7205 case Match_InvalidMatrix16:
7206 case Match_InvalidMatrix32:
7207 case Match_InvalidMatrix64:
7208 case Match_InvalidMatrixTileVectorH8:
7209 case Match_InvalidMatrixTileVectorH16:
7210 case Match_InvalidMatrixTileVectorH32:
7211 case Match_InvalidMatrixTileVectorH64:
7212 case Match_InvalidMatrixTileVectorH128:
7213 case Match_InvalidMatrixTileVectorV8:
7214 case Match_InvalidMatrixTileVectorV16:
7215 case Match_InvalidMatrixTileVectorV32:
7216 case Match_InvalidMatrixTileVectorV64:
7217 case Match_InvalidMatrixTileVectorV128:
7218 case Match_InvalidSVCR:
7219 case Match_InvalidMatrixIndexGPR32_12_15:
7220 case Match_InvalidMatrixIndexGPR32_8_11:
7221 case Match_InvalidLookupTable:
7222 case Match_InvalidZPRMul2_Lo8:
7223 case Match_InvalidZPRMul2_Hi8:
7224 case Match_InvalidZPRMul2_Lo16:
7225 case Match_InvalidZPRMul2_Hi16:
7226 case Match_InvalidZPRMul2_Lo32:
7227 case Match_InvalidZPRMul2_Hi32:
7228 case Match_InvalidZPRMul2_Lo64:
7229 case Match_InvalidZPRMul2_Hi64:
7230 case Match_InvalidZPR_K0:
7231 case Match_InvalidSVEVectorList2x8Mul2:
7232 case Match_InvalidSVEVectorList2x16Mul2:
7233 case Match_InvalidSVEVectorList2x32Mul2:
7234 case Match_InvalidSVEVectorList2x64Mul2:
7235 case Match_InvalidSVEVectorList2x128Mul2:
7236 case Match_InvalidSVEVectorList4x8Mul4:
7237 case Match_InvalidSVEVectorList4x16Mul4:
7238 case Match_InvalidSVEVectorList4x32Mul4:
7239 case Match_InvalidSVEVectorList4x64Mul4:
7240 case Match_InvalidSVEVectorList4x128Mul4:
7241 case Match_InvalidSVEVectorList2x8Mul2_Lo:
7242 case Match_InvalidSVEVectorList2x16Mul2_Lo:
7243 case Match_InvalidSVEVectorList2x32Mul2_Lo:
7244 case Match_InvalidSVEVectorList2x64Mul2_Lo:
7245 case Match_InvalidSVEVectorList2x8Mul2_Hi:
7246 case Match_InvalidSVEVectorList2x16Mul2_Hi:
7247 case Match_InvalidSVEVectorList2x32Mul2_Hi:
7248 case Match_InvalidSVEVectorList2x64Mul2_Hi:
7249 case Match_InvalidSVEVectorListStrided2x8:
7250 case Match_InvalidSVEVectorListStrided2x16:
7251 case Match_InvalidSVEVectorListStrided2x32:
7252 case Match_InvalidSVEVectorListStrided2x64:
7253 case Match_InvalidSVEVectorListStrided4x8:
7254 case Match_InvalidSVEVectorListStrided4x16:
7255 case Match_InvalidSVEVectorListStrided4x32:
7256 case Match_InvalidSVEVectorListStrided4x64:
7259 if (ErrorInfo >= Operands.
size())
7260 return Error(IDLoc,
"too few operands for instruction", SMRange(IDLoc, (*Operands.
back()).getEndLoc()));
7263 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
7264 if (ErrorLoc == SMLoc())
7266 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7274bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7281 SMLoc Loc = DirectiveID.
getLoc();
7282 if (IDVal ==
".arch")
7283 parseDirectiveArch(Loc);
7284 else if (IDVal ==
".cpu")
7285 parseDirectiveCPU(Loc);
7286 else if (IDVal ==
".tlsdesccall")
7287 parseDirectiveTLSDescCall(Loc);
7288 else if (IDVal ==
".ltorg" || IDVal ==
".pool")
7289 parseDirectiveLtorg(Loc);
7290 else if (IDVal ==
".unreq")
7291 parseDirectiveUnreq(Loc);
7292 else if (IDVal ==
".inst")
7293 parseDirectiveInst(Loc);
7294 else if (IDVal ==
".cfi_negate_ra_state")
7295 parseDirectiveCFINegateRAState();
7296 else if (IDVal ==
".cfi_negate_ra_state_with_pc")
7297 parseDirectiveCFINegateRAStateWithPC();
7298 else if (IDVal ==
".cfi_b_key_frame")
7299 parseDirectiveCFIBKeyFrame();
7300 else if (IDVal ==
".cfi_mte_tagged_frame")
7301 parseDirectiveCFIMTETaggedFrame();
7302 else if (IDVal ==
".arch_extension")
7303 parseDirectiveArchExtension(Loc);
7304 else if (IDVal ==
".variant_pcs")
7305 parseDirectiveVariantPCS(Loc);
7308 parseDirectiveLOH(IDVal, Loc);
7311 }
else if (IsCOFF) {
7312 if (IDVal ==
".seh_stackalloc")
7313 parseDirectiveSEHAllocStack(Loc);
7314 else if (IDVal ==
".seh_endprologue")
7315 parseDirectiveSEHPrologEnd(Loc);
7316 else if (IDVal ==
".seh_save_r19r20_x")
7317 parseDirectiveSEHSaveR19R20X(Loc);
7318 else if (IDVal ==
".seh_save_fplr")
7319 parseDirectiveSEHSaveFPLR(Loc);
7320 else if (IDVal ==
".seh_save_fplr_x")
7321 parseDirectiveSEHSaveFPLRX(Loc);
7322 else if (IDVal ==
".seh_save_reg")
7323 parseDirectiveSEHSaveReg(Loc);
7324 else if (IDVal ==
".seh_save_reg_x")
7325 parseDirectiveSEHSaveRegX(Loc);
7326 else if (IDVal ==
".seh_save_regp")
7327 parseDirectiveSEHSaveRegP(Loc);
7328 else if (IDVal ==
".seh_save_regp_x")
7329 parseDirectiveSEHSaveRegPX(Loc);
7330 else if (IDVal ==
".seh_save_lrpair")
7331 parseDirectiveSEHSaveLRPair(Loc);
7332 else if (IDVal ==
".seh_save_freg")
7333 parseDirectiveSEHSaveFReg(Loc);
7334 else if (IDVal ==
".seh_save_freg_x")
7335 parseDirectiveSEHSaveFRegX(Loc);
7336 else if (IDVal ==
".seh_save_fregp")
7337 parseDirectiveSEHSaveFRegP(Loc);
7338 else if (IDVal ==
".seh_save_fregp_x")
7339 parseDirectiveSEHSaveFRegPX(Loc);
7340 else if (IDVal ==
".seh_set_fp")
7341 parseDirectiveSEHSetFP(Loc);
7342 else if (IDVal ==
".seh_add_fp")
7343 parseDirectiveSEHAddFP(Loc);
7344 else if (IDVal ==
".seh_nop")
7345 parseDirectiveSEHNop(Loc);
7346 else if (IDVal ==
".seh_save_next")
7347 parseDirectiveSEHSaveNext(Loc);
7348 else if (IDVal ==
".seh_startepilogue")
7349 parseDirectiveSEHEpilogStart(Loc);
7350 else if (IDVal ==
".seh_endepilogue")
7351 parseDirectiveSEHEpilogEnd(Loc);
7352 else if (IDVal ==
".seh_trap_frame")
7353 parseDirectiveSEHTrapFrame(Loc);
7354 else if (IDVal ==
".seh_pushframe")
7355 parseDirectiveSEHMachineFrame(Loc);
7356 else if (IDVal ==
".seh_context")
7357 parseDirectiveSEHContext(Loc);
7358 else if (IDVal ==
".seh_ec_context")
7359 parseDirectiveSEHECContext(Loc);
7360 else if (IDVal ==
".seh_clear_unwound_to_call")
7361 parseDirectiveSEHClearUnwoundToCall(Loc);
7362 else if (IDVal ==
".seh_pac_sign_lr")
7363 parseDirectiveSEHPACSignLR(Loc);
7364 else if (IDVal ==
".seh_save_any_reg")
7365 parseDirectiveSEHSaveAnyReg(Loc,
false,
false);
7366 else if (IDVal ==
".seh_save_any_reg_p")
7367 parseDirectiveSEHSaveAnyReg(Loc,
true,
false);
7368 else if (IDVal ==
".seh_save_any_reg_x")
7369 parseDirectiveSEHSaveAnyReg(Loc,
false,
true);
7370 else if (IDVal ==
".seh_save_any_reg_px")
7371 parseDirectiveSEHSaveAnyReg(Loc,
true,
true);
7372 else if (IDVal ==
".seh_allocz")
7373 parseDirectiveSEHAllocZ(Loc);
7374 else if (IDVal ==
".seh_save_zreg")
7375 parseDirectiveSEHSaveZReg(Loc);
7376 else if (IDVal ==
".seh_save_preg")
7377 parseDirectiveSEHSavePReg(Loc);
7381 if (IDVal ==
".aeabi_subsection")
7382 parseDirectiveAeabiSubSectionHeader(Loc);
7383 else if (IDVal ==
".aeabi_attribute")
7384 parseDirectiveAeabiAArch64Attr(Loc);
7397 if (!NoCrypto && Crypto) {
7400 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7401 ArchInfo == AArch64::ARMV8_3A) {
7405 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7406 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7407 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7408 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7409 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7410 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7416 }
else if (NoCrypto) {
7419 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7420 ArchInfo == AArch64::ARMV8_3A) {
7421 RequestedExtensions.
push_back(
"nosha2");
7424 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7425 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7426 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7427 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7428 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7429 ArchInfo == AArch64::ARMV9_4A) {
7431 RequestedExtensions.
push_back(
"nosha3");
7432 RequestedExtensions.
push_back(
"nosha2");
7444bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7445 SMLoc CurLoc = getLoc();
7447 StringRef
Name = getParser().parseStringToEndOfStatement().trim();
7448 StringRef Arch, ExtensionString;
7449 std::tie(Arch, ExtensionString) =
Name.split(
'+');
7453 return Error(CurLoc,
"unknown arch name");
7459 std::vector<StringRef> AArch64Features;
7463 MCSubtargetInfo &STI = copySTI();
7464 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7466 join(ArchFeatures.begin(), ArchFeatures.end(),
","));
7469 if (!ExtensionString.
empty())
7470 ExtensionString.
split(RequestedExtensions,
'+');
7475 for (
auto Name : RequestedExtensions) {
7479 bool EnableFeature = !
Name.consume_front_insensitive(
"no");
7486 return Error(CurLoc,
"unsupported architectural extension: " + Name);
7494 FeatureBitset Features = ComputeAvailableFeatures(STI.
getFeatureBits());
7495 setAvailableFeatures(Features);
7497 getTargetStreamer().emitDirectiveArch(Name);
7503bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7504 SMLoc ExtLoc = getLoc();
7506 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7511 bool EnableFeature =
true;
7512 StringRef
Name = FullName;
7513 if (
Name.starts_with_insensitive(
"no")) {
7514 EnableFeature =
false;
7523 return Error(ExtLoc,
"unsupported architectural extension: " + Name);
7525 MCSubtargetInfo &STI = copySTI();
7530 FeatureBitset Features = ComputeAvailableFeatures(STI.
getFeatureBits());
7531 setAvailableFeatures(Features);
7533 getTargetStreamer().emitDirectiveArchExtension(FullName);
7539bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7540 SMLoc CurLoc = getLoc();
7542 StringRef CPU, ExtensionString;
7543 std::tie(CPU, ExtensionString) =
7544 getParser().parseStringToEndOfStatement().
trim().
split(
'+');
7550 if (!ExtensionString.
empty())
7551 ExtensionString.
split(RequestedExtensions,
'+');
7555 Error(CurLoc,
"unknown CPU name");
7560 MCSubtargetInfo &STI = copySTI();
7564 for (
auto Name : RequestedExtensions) {
7568 bool EnableFeature = !
Name.consume_front_insensitive(
"no");
7575 return Error(CurLoc,
"unsupported architectural extension: " + Name);
7583 FeatureBitset Features = ComputeAvailableFeatures(STI.
getFeatureBits());
7584 setAvailableFeatures(Features);
7590bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7592 return Error(Loc,
"expected expression following '.inst' directive");
7594 auto parseOp = [&]() ->
bool {
7596 const MCExpr *Expr =
nullptr;
7597 if (check(getParser().parseExpression(Expr), L,
"expected expression"))
7600 if (check(!
Value, L,
"expected constant expression"))
7602 getTargetStreamer().emitInst(
Value->getValue());
7606 return parseMany(parseOp);
7611bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7613 if (check(getParser().parseIdentifier(Name), L,
"expected symbol") ||
7625 getParser().getStreamer().emitInstruction(Inst, getSTI());
7631bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7635 return TokError(
"expected an identifier or a number in directive");
7638 int64_t
Id = getTok().getIntVal();
7640 return TokError(
"invalid numeric identifier in directive");
7643 StringRef
Name = getTok().getIdentifier();
7649 return TokError(
"invalid identifier in directive");
7657 assert(NbArgs != -1 &&
"Invalid number of arguments");
7660 for (
int Idx = 0; Idx < NbArgs; ++Idx) {
7662 if (getParser().parseIdentifier(Name))
7663 return TokError(
"expected identifier in directive");
7666 if (Idx + 1 == NbArgs)
7674 getStreamer().emitLOHDirective(Kind, Args);
7680bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7683 getTargetStreamer().emitCurrentConstantPool();
7689bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7691 SMLoc SRegLoc = getLoc();
7692 RegKind RegisterKind = RegKind::Scalar;
7694 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7698 RegisterKind = RegKind::NeonVector;
7699 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7705 return Error(SRegLoc,
"vector register without type specifier expected");
7710 RegisterKind = RegKind::SVEDataVector;
7712 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7718 return Error(SRegLoc,
7719 "sve vector register without type specifier expected");
7724 RegisterKind = RegKind::SVEPredicateVector;
7725 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7731 return Error(SRegLoc,
7732 "sve predicate register without type specifier expected");
7736 return Error(SRegLoc,
"register name or alias expected");
7742 auto pair = std::make_pair(RegisterKind, RegNum);
7743 if (RegisterReqs.
insert(std::make_pair(Name, pair)).first->second != pair)
7744 Warning(L,
"ignoring redefinition of register alias '" + Name +
"'");
7751bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7753 return TokError(
"unexpected input in .unreq directive.");
7754 RegisterReqs.
erase(getTok().getIdentifier().lower());
7759bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7762 getStreamer().emitCFINegateRAState();
7766bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7769 getStreamer().emitCFINegateRAStateWithPC();
7775bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7778 getStreamer().emitCFIBKeyFrame();
7784bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7787 getStreamer().emitCFIMTETaggedFrame();
7793bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7795 if (getParser().parseIdentifier(Name))
7796 return TokError(
"expected symbol name");
7799 getTargetStreamer().emitDirectiveVariantPCS(
7806bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7808 if (parseImmExpr(
Size))
7810 getTargetStreamer().emitARM64WinCFIAllocStack(
Size);
7816bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7817 getTargetStreamer().emitARM64WinCFIPrologEnd();
7823bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7825 if (parseImmExpr(
Offset))
7827 getTargetStreamer().emitARM64WinCFISaveR19R20X(
Offset);
7833bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7835 if (parseImmExpr(
Offset))
7837 getTargetStreamer().emitARM64WinCFISaveFPLR(
Offset);
7843bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7845 if (parseImmExpr(
Offset))
7847 getTargetStreamer().emitARM64WinCFISaveFPLRX(
Offset);
7853bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7856 if (parseRegisterInRange(
Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7857 parseComma() || parseImmExpr(
Offset))
7859 getTargetStreamer().emitARM64WinCFISaveReg(
Reg,
Offset);
7865bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7868 if (parseRegisterInRange(
Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7869 parseComma() || parseImmExpr(
Offset))
7871 getTargetStreamer().emitARM64WinCFISaveRegX(
Reg,
Offset);
7877bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7880 if (parseRegisterInRange(
Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7881 parseComma() || parseImmExpr(
Offset))
7883 getTargetStreamer().emitARM64WinCFISaveRegP(
Reg,
Offset);
7889bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7892 if (parseRegisterInRange(
Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7893 parseComma() || parseImmExpr(
Offset))
7895 getTargetStreamer().emitARM64WinCFISaveRegPX(
Reg,
Offset);
7901bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7905 if (parseRegisterInRange(
Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7906 parseComma() || parseImmExpr(
Offset))
7908 if (check(((
Reg - 19) % 2 != 0), L,
7909 "expected register with even offset from x19"))
7911 getTargetStreamer().emitARM64WinCFISaveLRPair(
Reg,
Offset);
7917bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7920 if (parseRegisterInRange(
Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7921 parseComma() || parseImmExpr(
Offset))
7923 getTargetStreamer().emitARM64WinCFISaveFReg(
Reg,
Offset);
7929bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7932 if (parseRegisterInRange(
Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7933 parseComma() || parseImmExpr(
Offset))
7935 getTargetStreamer().emitARM64WinCFISaveFRegX(
Reg,
Offset);
7941bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7944 if (parseRegisterInRange(
Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7945 parseComma() || parseImmExpr(
Offset))
7947 getTargetStreamer().emitARM64WinCFISaveFRegP(
Reg,
Offset);
7953bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7956 if (parseRegisterInRange(
Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7957 parseComma() || parseImmExpr(
Offset))
7959 getTargetStreamer().emitARM64WinCFISaveFRegPX(
Reg,
Offset);
7965bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7966 getTargetStreamer().emitARM64WinCFISetFP();
7972bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7974 if (parseImmExpr(
Size))
7976 getTargetStreamer().emitARM64WinCFIAddFP(
Size);
7982bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7983 getTargetStreamer().emitARM64WinCFINop();
7989bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7990 getTargetStreamer().emitARM64WinCFISaveNext();
7996bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7997 getTargetStreamer().emitARM64WinCFIEpilogStart();
8003bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
8004 getTargetStreamer().emitARM64WinCFIEpilogEnd();
8010bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
8011 getTargetStreamer().emitARM64WinCFITrapFrame();
8017bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
8018 getTargetStreamer().emitARM64WinCFIMachineFrame();
8024bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
8025 getTargetStreamer().emitARM64WinCFIContext();
8031bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
8032 getTargetStreamer().emitARM64WinCFIECContext();
8038bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
8039 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
8045bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
8046 getTargetStreamer().emitARM64WinCFIPACSignLR();
8055bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L,
bool Paired,
8060 if (check(parseRegister(
Reg, Start, End), getLoc(),
"expected register") ||
8061 parseComma() || parseImmExpr(
Offset))
8064 if (
Reg == AArch64::FP ||
Reg == AArch64::LR ||
8065 (
Reg >= AArch64::X0 &&
Reg <= AArch64::X28)) {
8066 if (
Offset < 0 ||
Offset % (Paired || Writeback ? 16 : 8))
8067 return Error(L,
"invalid save_any_reg offset");
8068 unsigned EncodedReg;
8069 if (
Reg == AArch64::FP)
8071 else if (
Reg == AArch64::LR)
8074 EncodedReg =
Reg - AArch64::X0;
8076 if (
Reg == AArch64::LR)
8077 return Error(Start,
"lr cannot be paired with another register");
8079 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg,
Offset);
8081 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg,
Offset);
8084 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg,
Offset);
8086 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg,
Offset);
8088 }
else if (
Reg >= AArch64::D0 &&
Reg <= AArch64::D31) {
8089 unsigned EncodedReg =
Reg - AArch64::D0;
8090 if (
Offset < 0 ||
Offset % (Paired || Writeback ? 16 : 8))
8091 return Error(L,
"invalid save_any_reg offset");
8093 if (
Reg == AArch64::D31)
8094 return Error(Start,
"d31 cannot be paired with another register");
8096 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg,
Offset);
8098 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg,
Offset);
8101 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg,
Offset);
8103 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg,
Offset);
8105 }
else if (
Reg >= AArch64::Q0 &&
Reg <= AArch64::Q31) {
8106 unsigned EncodedReg =
Reg - AArch64::Q0;
8108 return Error(L,
"invalid save_any_reg offset");
8110 if (
Reg == AArch64::Q31)
8111 return Error(Start,
"q31 cannot be paired with another register");
8113 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg,
Offset);
8115 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg,
Offset);
8118 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg,
Offset);
8120 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg,
Offset);
8123 return Error(Start,
"save_any_reg register must be x, q or d register");
8130bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
8132 if (parseImmExpr(
Offset))
8134 getTargetStreamer().emitARM64WinCFIAllocZ(
Offset);
8140bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
8145 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8148 if (check(RegNum < AArch64::Z8 || RegNum > AArch64::Z23, L,
8149 "expected register in range z8 to z23"))
8151 if (parseComma() || parseImmExpr(
Offset))
8153 getTargetStreamer().emitARM64WinCFISaveZReg(RegNum - AArch64::Z0,
Offset);
8159bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
8164 tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
8167 if (check(RegNum < AArch64::P4 || RegNum > AArch64::P15, L,
8168 "expected register in range p4 to p15"))
8170 if (parseComma() || parseImmExpr(
Offset))
8172 getTargetStreamer().emitARM64WinCFISavePReg(RegNum - AArch64::P0,
Offset);
8176bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
8182 MCAsmParser &Parser = getParser();
8185 StringRef SubsectionName;
8196 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
8197 getTargetStreamer().getAttributesSubsectionByName(SubsectionName);
8202 if (SubsectionExists) {
8203 getTargetStreamer().emitAttributesSubsection(
8206 SubsectionExists->IsOptional),
8208 SubsectionExists->ParameterType));
8214 "Could not switch to subsection '" + SubsectionName +
8215 "' using subsection name, subsection has not been defined");
8238 if (SubsectionExists) {
8239 if (IsOptional != SubsectionExists->IsOptional) {
8241 "optionality mismatch! subsection '" + SubsectionName +
8242 "' already exists with optionality defined as '" +
8244 SubsectionExists->IsOptional) +
8252 "optionality parameter not found, expected required|optional");
8259 "aeabi_feature_and_bits must be marked as optional");
8266 "aeabi_pauthabi must be marked as required");
8286 if (SubsectionExists) {
8287 if (
Type != SubsectionExists->ParameterType) {
8289 "type mismatch! subsection '" + SubsectionName +
8290 "' already exists with type defined as '" +
8292 SubsectionExists->ParameterType) +
8300 "type parameter not found, expected uleb128|ntbs");
8308 SubsectionName +
" must be marked as ULEB128");
8317 "attributes subsection header directive");
8321 getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional,
Type);
8326bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8330 MCAsmParser &Parser = getParser();
8332 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8333 getTargetStreamer().getActiveAttributesSubsection();
8334 if (
nullptr == ActiveSubsection) {
8336 "no active subsection, build attribute can not be added");
8339 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8340 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8348 ActiveSubsectionName)
8351 StringRef TagStr =
"";
8354 Tag = getTok().getIntVal();
8357 switch (ActiveSubsectionID) {
8362 "' \nExcept for public subsections, "
8363 "tags have to be an unsigned int.");
8370 TagStr +
"' for subsection '" +
8371 ActiveSubsectionName +
"'");
8379 TagStr +
"' for subsection '" +
8380 ActiveSubsectionName +
"'");
8398 unsigned ValueInt = unsigned(-1);
8399 std::string ValueStr =
"";
8404 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8407 ValueInt = getTok().getIntVal();
8412 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8420 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8431 if (0 != ValueInt && 1 != ValueInt) {
8433 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8434 "' options are 0|1");
8443 "unexpected token for AArch64 build attributes tag and value "
8444 "attribute directive");
8448 if (
unsigned(-1) != ValueInt) {
8449 getTargetStreamer().emitAttribute(ActiveSubsectionName,
Tag, ValueInt,
"");
8451 if (
"" != ValueStr) {
8452 getTargetStreamer().emitAttribute(ActiveSubsectionName,
Tag,
unsigned(-1),
8458bool AArch64AsmParser::parseDataExpr(
const MCExpr *&Res) {
8461 if (getParser().parseExpression(Res))
8463 MCAsmParser &Parser = getParser();
8467 return Error(getLoc(),
"expected relocation specifier");
8470 SMLoc Loc = getLoc();
8472 if (Identifier ==
"auth")
8473 return parseAuthExpr(Res, EndLoc);
8477 if (Identifier ==
"got")
8481 if (Identifier ==
"gotpcrel")
8483 else if (Identifier ==
"plt")
8485 else if (Identifier ==
"funcinit")
8489 return Error(Loc,
"invalid relocation specifier");
8494 return Error(Loc,
"@ specifier only allowed after a symbol");
8497 std::optional<MCBinaryExpr::Opcode> Opcode;
8505 if (getParser().parsePrimaryExpr(Term, EndLoc,
nullptr))
8516bool AArch64AsmParser::parseAuthExpr(
const MCExpr *&Res, SMLoc &EndLoc) {
8517 MCAsmParser &Parser = getParser();
8519 AsmToken Tok = Parser.
getTok();
8526 return TokError(
"expected key name");
8531 return TokError(
"invalid key '" + KeyStr +
"'");
8538 return TokError(
"expected integer discriminator");
8542 return TokError(
"integer discriminator " + Twine(Discriminator) +
8543 " out of range [0, 0xFFFF]");
8546 bool UseAddressDiversity =
false;
8551 return TokError(
"expected 'addr'");
8552 UseAddressDiversity =
true;
8561 UseAddressDiversity, Ctx, Res->
getLoc());
8565bool AArch64AsmParser::classifySymbolRef(
const MCExpr *Expr,
8574 ELFSpec = AE->getSpecifier();
8575 Expr = AE->getSubExpr();
8615#define GET_REGISTER_MATCHER
8616#define GET_SUBTARGET_FEATURE_NAME
8617#define GET_MATCHER_IMPLEMENTATION
8618#define GET_MNEMONIC_SPELL_CHECKER
8619#include "AArch64GenAsmMatcher.inc"
8625 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(AsmOp);
8627 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8629 return Match_InvalidOperand;
8632 return Match_InvalidOperand;
8633 if (CE->getValue() == ExpectedVal)
8634 return Match_Success;
8635 return Match_InvalidOperand;
8640 return Match_InvalidOperand;
8646 if (
Op.isTokenEqual(
"za"))
8647 return Match_Success;
8648 return Match_InvalidOperand;
8654#define MATCH_HASH(N) \
8655 case MCK__HASH_##N: \
8656 return MatchesOpImmediate(N);
8682#define MATCH_HASH_MINUS(N) \
8683 case MCK__HASH__MINUS_##N: \
8684 return MatchesOpImmediate(-N);
8688#undef MATCH_HASH_MINUS
8692ParseStatus AArch64AsmParser::tryParseGPRSeqPair(
OperandVector &Operands) {
8697 return Error(S,
"expected register");
8699 MCRegister FirstReg;
8700 ParseStatus Res = tryParseScalarRegister(FirstReg);
8702 return Error(S,
"expected first even register of a consecutive same-size "
8703 "even/odd register pair");
8705 const MCRegisterClass &WRegClass =
8706 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8707 const MCRegisterClass &XRegClass =
8708 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8710 bool isXReg = XRegClass.
contains(FirstReg),
8711 isWReg = WRegClass.
contains(FirstReg);
8712 if (!isXReg && !isWReg)
8713 return Error(S,
"expected first even register of a consecutive same-size "
8714 "even/odd register pair");
8716 const MCRegisterInfo *RI =
getContext().getRegisterInfo();
8719 if (FirstEncoding & 0x1)
8720 return Error(S,
"expected first even register of a consecutive same-size "
8721 "even/odd register pair");
8724 return Error(getLoc(),
"expected comma");
8729 MCRegister SecondReg;
8730 Res = tryParseScalarRegister(SecondReg);
8732 return Error(
E,
"expected second odd register of a consecutive same-size "
8733 "even/odd register pair");
8736 (isXReg && !XRegClass.
contains(SecondReg)) ||
8737 (isWReg && !WRegClass.
contains(SecondReg)))
8738 return Error(
E,
"expected second odd register of a consecutive same-size "
8739 "even/odd register pair");
8744 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8747 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8750 Operands.
push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8756template <
bool ParseShiftExtend,
bool ParseSuffix>
8757ParseStatus AArch64AsmParser::tryParseSVEDataVector(
OperandVector &Operands) {
8758 const SMLoc S = getLoc();
8764 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8769 if (ParseSuffix &&
Kind.empty())
8776 unsigned ElementWidth = KindRes->second;
8780 Operands.
push_back(AArch64Operand::CreateVectorReg(
8781 RegNum, RegKind::SVEDataVector, ElementWidth, S, S,
getContext()));
8783 ParseStatus Res = tryParseVectorIndex(Operands);
8794 Res = tryParseOptionalShiftExtend(ExtOpnd);
8798 auto Ext =
static_cast<AArch64Operand *
>(ExtOpnd.
back().
get());
8799 Operands.
push_back(AArch64Operand::CreateVectorReg(
8800 RegNum, RegKind::SVEDataVector, ElementWidth, S,
Ext->getEndLoc(),
8802 Ext->hasShiftExtendAmount()));
8807ParseStatus AArch64AsmParser::tryParseSVEPattern(
OperandVector &Operands) {
8808 MCAsmParser &Parser = getParser();
8810 SMLoc
SS = getLoc();
8811 const AsmToken &TokE = getTok();
8822 const MCExpr *ImmVal;
8829 return TokError(
"invalid operand for instruction");
8834 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.
getString());
8839 Pattern = Pat->Encoding;
8840 assert(Pattern >= 0 && Pattern < 32);
8851AArch64AsmParser::tryParseSVEVecLenSpecifier(
OperandVector &Operands) {
8853 SMLoc
SS = getLoc();
8854 const AsmToken &TokE = getTok();
8856 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8862 Pattern = Pat->Encoding;
8863 assert(Pattern >= 0 && Pattern <= 1 &&
"Pattern does not exist");
8872ParseStatus AArch64AsmParser::tryParseGPR64x8(
OperandVector &Operands) {
8873 SMLoc
SS = getLoc();
8876 if (!tryParseScalarRegister(XReg).isSuccess())
8882 XReg, AArch64::x8sub_0,
8883 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8886 "expected an even-numbered x-register in the range [x0,x22]");
8889 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8893ParseStatus AArch64AsmParser::tryParseImmRange(
OperandVector &Operands) {
8903 if (getParser().parseExpression(ImmF))
8913 SMLoc
E = getTok().getLoc();
8915 if (getParser().parseExpression(ImmL))
8922 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S,
E,
getContext()));
8927ParseStatus AArch64AsmParser::tryParseAdjImm0_63(
OperandVector &Operands) {
8937 if (getParser().parseExpression(Ex))
8947 static_assert(Adj == 1 || Adj == -1,
"Unsafe immediate adjustment");
8954 Operands.
push_back(AArch64Operand::CreateImm(
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (elements, element-width) if Suffix is a valid vector kind.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
static SDValue getCondCode(SelectionDAG &DAG, AArch64CC::CondCode CC)
Like SelectionDAG::getCondCode(), but for AArch64 condition codes.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
#define LLVM_EXTERNAL_VISIBILITY
Value * getPointer(Value *Ptr)
loop data Loop Data Prefetch
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
const SmallVectorImpl< MachineOperand > & Cond
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
This file defines the SmallVector class.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
APInt bitcastToAPInt() const
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
int64_t getSExtValue() const
Get sign extended value.
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
void UnLex(AsmToken const &Token)
LLVM_ABI SMLoc getLoc() const
int64_t getIntVal() const
bool isNot(TokenKind K) const
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
bool is(TokenKind K) const
LLVM_ABI SMLoc getEndLoc() const
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Base class for user error types.
Container class for subtarget features.
constexpr size_t size() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
void printExpr(raw_ostream &, const MCExpr &) const
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
static LLVM_ABI const MCBinaryExpr * create(Opcode Op, const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
const MCRegisterInfo * getRegisterInfo() const
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
unsigned getNumOperands() const
unsigned getOpcode() const
void addOperand(const MCOperand Op)
void setOpcode(unsigned Op)
const MCOperand & getOperand(unsigned i) const
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
static MCOperand createExpr(const MCExpr *Val)
static MCOperand createReg(MCRegister Reg)
static MCOperand createImm(int64_t Val)
MCRegister getReg() const
Returns the register number.
const MCExpr * getExpr() const
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
constexpr unsigned id() const
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Streaming machine code generation interface.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
VariantKind getKind() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
const MCSymbol * getAddSym() const
int64_t getConstant() const
uint32_t getSpecifier() const
const MCSymbol * getSubSym() const
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
constexpr unsigned id() const
Represents a location in source code.
static SMLoc getFromPointer(const char *Ptr)
constexpr const char * getPointer() const
void insert_range(Range &&R)
bool contains(const T &V) const
Check if the SmallSet contains the given element.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
iterator find(StringRef Key)
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
static constexpr size_t npos
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
constexpr bool empty() const
empty - Check if the string is empty.
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
LLVM_ABI std::string upper() const
Convert the given ASCII string to uppercase.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
LLVM_ABI std::string lower() const
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
@ FEATURE_AND_BITS_TAG_NOT_FOUND
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
StringRef getSubsectionTypeUnknownError()
SubsectionOptional getOptionalID(StringRef Optional)
StringRef getSubsectionOptionalUnknownError()
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
LLVM_ABI const ArchInfo * parseArch(StringRef Arch)
LLVM_ABI const ArchInfo * getArchForCpu(StringRef CPU)
@ DestructiveInstTypeMask
LLVM_ABI bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
NodeAddr< CodeNode * > Code
Context & getContext() const
This is an optimization pass for GlobalISel generic memory operations.
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
FunctionAddr VTableAddr Value
static int MCLOHNameToId(StringRef Name)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
static bool isMem(const MachineInstr &MI, unsigned Op)
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
std::string utostr(uint64_t X, bool isNeg=false)
static bool isValidMCLOHType(unsigned Kind)
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
auto dyn_cast_or_null(const Y &Val)
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FunctionAddr VTableAddr Count
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
static int MCLOHIdToNbArgs(MCLOHType Kind)
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
FunctionAddr VTableAddr Next
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
const FeatureBitset Features
AArch64::ExtensionBitset DefaultExts
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
FeatureBitset FeaturesRequired