38#define DEBUG_TYPE "riscv-insert-vsetvli"
39#define RISCV_INSERT_VSETVLI_NAME "RISC-V Insert VSETVLI pass"
41STATISTIC(NumInsertedVSETVL,
"Number of VSETVL inst inserted");
42STATISTIC(NumCoalescedVSETVL,
"Number of VSETVL inst coalesced");
46 cl::desc(
"Insert vsetvlis before vmvNr.vs to ensure vtype is valid and "
62 return LI.getVNInfoBefore(
SI);
79static std::optional<unsigned> getEEWForLoadStore(
const MachineInstr &
MI) {
101 case RISCV::VSSE64_V:
111 const unsigned Log2SEW =
MI.getOperand(getSEWOpNum(
MI)).getImm();
123 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx))
137 return MI.isCopy() &&
MI.getOperand(0).getReg().isPhysical() &&
139 TRI->getMinimalPhysRegClass(
MI.getOperand(0).getReg()));
143struct DemandedFields {
148 bool VLZeroness =
false;
152 SEWGreaterThanOrEqualAndLessThan64 =
156 SEWGreaterThanOrEqual = 1,
162 LMULLessThanOrEqualToM1 = 1,
165 bool SEWLMULRatio =
false;
166 bool TailPolicy =
false;
167 bool MaskPolicy =
false;
175 bool usedVTYPE()
const {
176 return SEW || LMUL || SEWLMULRatio || TailPolicy || MaskPolicy || VILL ||
182 return VLAny || VLZeroness;
203 static DemandedFields
all() {
211 void doUnion(
const DemandedFields &
B) {
213 VLZeroness |=
B.VLZeroness;
214 SEW = std::max(SEW,
B.SEW);
215 LMUL = std::max(LMUL,
B.LMUL);
216 SEWLMULRatio |=
B.SEWLMULRatio;
217 TailPolicy |=
B.TailPolicy;
218 MaskPolicy |=
B.MaskPolicy;
224#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
232 void print(raw_ostream &OS)
const {
234 OS <<
"VLAny=" << VLAny <<
", ";
235 OS <<
"VLZeroness=" << VLZeroness <<
", ";
241 case SEWGreaterThanOrEqual:
242 OS <<
"SEWGreaterThanOrEqual";
244 case SEWGreaterThanOrEqualAndLessThan64:
245 OS <<
"SEWGreaterThanOrEqualAndLessThan64";
257 case LMULLessThanOrEqualToM1:
258 OS <<
"LMULLessThanOrEqualToM1";
265 OS <<
"SEWLMULRatio=" << SEWLMULRatio <<
", ";
266 OS <<
"TailPolicy=" << TailPolicy <<
", ";
267 OS <<
"MaskPolicy=" << MaskPolicy <<
", ";
268 OS <<
"VILL=" << VILL <<
", ";
269 OS <<
"AltFmt=" << AltFmt <<
", ";
270 OS <<
"TWiden=" << TWiden;
276#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
286 return Fractional || LMul == 1;
293 const DemandedFields &Used) {
295 case DemandedFields::SEWNone:
297 case DemandedFields::SEWEqual:
301 case DemandedFields::SEWGreaterThanOrEqual:
305 case DemandedFields::SEWGreaterThanOrEqualAndLessThan64:
313 case DemandedFields::LMULNone:
315 case DemandedFields::LMULEqual:
319 case DemandedFields::LMULLessThanOrEqualToM1:
325 if (
Used.SEWLMULRatio) {
330 if (Ratio1 != Ratio2)
362 if (
MI.isCall() ||
MI.isInlineAsm() ||
363 MI.readsRegister(RISCV::VL,
nullptr))
365 if (
MI.isCall() ||
MI.isInlineAsm() ||
366 MI.readsRegister(RISCV::VTYPE,
nullptr))
374 !VLOp.isReg() || !VLOp.isUndef())
379 Res.MaskPolicy =
false;
388 if (getEEWForLoadStore(
MI)) {
389 Res.SEW = DemandedFields::SEWNone;
390 Res.LMUL = DemandedFields::LMULNone;
395 Res.TailPolicy =
false;
396 Res.MaskPolicy =
false;
403 if (isMaskRegOp(
MI)) {
404 Res.SEW = DemandedFields::SEWNone;
405 Res.LMUL = DemandedFields::LMULNone;
409 if (RISCVInstrInfo::isScalarInsertInstr(
MI)) {
410 Res.LMUL = DemandedFields::LMULNone;
411 Res.SEWLMULRatio =
false;
419 if (hasUndefinedPassthru(
MI)) {
420 if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(
MI) &&
421 !
ST->hasVInstructionsF64())
422 Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
424 Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
425 Res.TailPolicy =
false;
430 if (RISCVInstrInfo::isScalarExtractInstr(
MI)) {
432 Res.LMUL = DemandedFields::LMULNone;
433 Res.SEWLMULRatio =
false;
434 Res.TailPolicy =
false;
435 Res.MaskPolicy =
false;
449 if (RISCVInstrInfo::isVSlideInstr(
MI) && VLOp.
isImm() &&
450 VLOp.
getImm() == 1 && hasUndefinedPassthru(
MI) &&
451 !
ST->hasVLDependentLatency()) {
453 Res.VLZeroness =
true;
454 Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
455 Res.TailPolicy =
false;
464 if (RISCVInstrInfo::isScalarSplatInstr(
MI) && VLOp.
isImm() &&
465 VLOp.
getImm() == 1 && hasUndefinedPassthru(
MI) &&
466 !
ST->hasVLDependentLatency()) {
467 Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
468 Res.SEWLMULRatio =
false;
470 if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(
MI) &&
471 !
ST->hasVInstructionsF64())
472 Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
474 Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
475 Res.TailPolicy =
false;
486 if (isVectorCopy(
ST->getRegisterInfo(),
MI)) {
487 Res.LMUL = DemandedFields::LMULNone;
488 Res.SEW = DemandedFields::SEWNone;
489 Res.SEWLMULRatio =
false;
490 Res.TailPolicy =
false;
491 Res.MaskPolicy =
false;
494 if (RISCVInstrInfo::isVExtractInstr(
MI)) {
497 Res.TailPolicy =
false;
503 RISCVInstrInfo::isXSfmmVectorConfigInstr(
MI);
522 enum class AVLState : uint8_t {
528 } State = AVLState::Uninitialized;
533 uint8_t TailAgnostic : 1;
534 uint8_t MaskAgnostic : 1;
535 uint8_t SEWLMULRatioOnly : 1;
541 : AVLImm(0), TailAgnostic(
false), MaskAgnostic(
false),
542 SEWLMULRatioOnly(
false), AltFmt(
false), TWiden(0) {}
544 static VSETVLIInfo getUnknown() {
550 bool isValid()
const {
return State != AVLState::Uninitialized; }
551 void setUnknown() { State = AVLState::Unknown; }
552 bool isUnknown()
const {
return State == AVLState::Unknown; }
554 void setAVLRegDef(
const VNInfo *VNInfo,
Register AVLReg) {
556 AVLRegDef.ValNo = VNInfo;
557 AVLRegDef.DefReg = AVLReg;
558 State = AVLState::AVLIsReg;
561 void setAVLImm(
unsigned Imm) {
563 State = AVLState::AVLIsImm;
566 void setAVLVLMAX() { State = AVLState::AVLIsVLMAX; }
568 bool hasAVLImm()
const {
return State == AVLState::AVLIsImm; }
569 bool hasAVLReg()
const {
return State == AVLState::AVLIsReg; }
570 bool hasAVLVLMAX()
const {
return State == AVLState::AVLIsVLMAX; }
572 assert(hasAVLReg() && AVLRegDef.DefReg.isVirtual());
573 return AVLRegDef.DefReg;
575 unsigned getAVLImm()
const {
579 const VNInfo *getAVLVNInfo()
const {
581 return AVLRegDef.ValNo;
587 const MachineInstr *getAVLDefMI(
const LiveIntervals *LIS)
const {
589 if (!LIS || getAVLVNInfo()->isPHIDef())
596 void setAVL(
const VSETVLIInfo &
Info) {
598 if (
Info.isUnknown())
600 else if (
Info.hasAVLReg())
601 setAVLRegDef(
Info.getAVLVNInfo(),
Info.getAVLReg());
602 else if (
Info.hasAVLVLMAX())
606 setAVLImm(
Info.getAVLImm());
610 unsigned getSEW()
const {
612 "Can't use VTYPE for uninitialized or unknown");
617 "Can't use VTYPE for uninitialized or unknown");
620 bool getTailAgnostic()
const {
622 "Can't use VTYPE for uninitialized or unknown");
625 bool getMaskAgnostic()
const {
627 "Can't use VTYPE for uninitialized or unknown");
630 bool getAltFmt()
const {
632 "Can't use VTYPE for uninitialized or unknown");
635 unsigned getTWiden()
const {
637 "Can't use VTYPE for uninitialized or unknown");
641 bool hasNonZeroAVL(
const LiveIntervals *LIS)
const {
643 return getAVLImm() > 0;
645 if (
auto *
DefMI = getAVLDefMI(LIS))
646 return RISCVInstrInfo::isNonZeroLoadImmediate(*
DefMI);
653 bool hasEquallyZeroAVL(
const VSETVLIInfo &
Other,
654 const LiveIntervals *LIS)
const {
655 if (hasSameAVL(
Other))
657 return (hasNonZeroAVL(LIS) &&
Other.hasNonZeroAVL(LIS));
660 bool hasSameAVLLatticeValue(
const VSETVLIInfo &
Other)
const {
661 if (hasAVLReg() &&
Other.hasAVLReg()) {
663 "we either have intervals or we don't");
665 return getAVLReg() ==
Other.getAVLReg();
666 return getAVLVNInfo()->id ==
Other.getAVLVNInfo()->id &&
667 getAVLReg() ==
Other.getAVLReg();
670 if (hasAVLImm() &&
Other.hasAVLImm())
671 return getAVLImm() ==
Other.getAVLImm();
674 return Other.hasAVLVLMAX() && hasSameVLMAX(
Other);
681 bool hasSameAVL(
const VSETVLIInfo &
Other)
const {
685 if (hasAVLReg() &&
Other.hasAVLReg()) {
687 "we either have intervals or we don't");
691 return hasSameAVLLatticeValue(
Other);
694 void setVTYPE(
unsigned VType) {
696 "Can't set VTYPE for uninitialized or unknown");
708 "Can't set VTYPE for uninitialized or unknown");
717 void setAltFmt(
bool AF) { AltFmt = AF; }
723 "Can't encode VTYPE for uninitialized or unknown");
730 bool hasSEWLMULRatioOnly()
const {
return SEWLMULRatioOnly; }
732 bool hasSameVTYPE(
const VSETVLIInfo &
Other)
const {
734 "Can't compare invalid VSETVLIInfos");
736 "Can't compare VTYPE in unknown state");
737 assert(!SEWLMULRatioOnly && !
Other.SEWLMULRatioOnly &&
738 "Can't compare when only LMUL/SEW ratio is valid.");
739 return std::tie(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt, TWiden) ==
746 "Can't use VTYPE for uninitialized or unknown");
754 bool hasSameVLMAX(
const VSETVLIInfo &
Other)
const {
756 "Can't compare invalid VSETVLIInfos");
758 "Can't compare VTYPE in unknown state");
762 bool hasCompatibleVTYPE(
const DemandedFields &Used,
763 const VSETVLIInfo &Require)
const {
764 return areCompatibleVTYPEs(Require.encodeVTYPE(),
encodeVTYPE(), Used);
770 bool isCompatible(
const DemandedFields &Used,
const VSETVLIInfo &Require,
771 const LiveIntervals *LIS)
const {
773 "Can't compare invalid VSETVLIInfos");
775 if (isUnknown() || Require.isUnknown())
779 if (SEWLMULRatioOnly || Require.SEWLMULRatioOnly)
782 if (
Used.VLAny && !(hasSameAVL(Require) && hasSameVLMAX(Require)))
785 if (
Used.VLZeroness && !hasEquallyZeroAVL(Require, LIS))
788 return hasCompatibleVTYPE(Used, Require);
794 return !
Other.isValid();
795 if (!
Other.isValid())
800 return Other.isUnknown();
801 if (
Other.isUnknown())
804 if (!hasSameAVLLatticeValue(
Other))
808 if (SEWLMULRatioOnly !=
Other.SEWLMULRatioOnly)
812 if (SEWLMULRatioOnly)
813 return hasSameVLMAX(
Other);
816 return hasSameVTYPE(
Other);
820 return !(*
this ==
Other);
827 if (!
Other.isValid())
835 if (isUnknown() ||
Other.isUnknown())
836 return VSETVLIInfo::getUnknown();
844 if (hasSameAVL(
Other) && hasSameVLMAX(
Other)) {
845 VSETVLIInfo MergeInfo = *
this;
846 MergeInfo.SEWLMULRatioOnly =
true;
851 return VSETVLIInfo::getUnknown();
854#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
863 void print(raw_ostream &OS)
const {
866 case AVLState::Uninitialized:
867 OS <<
"Uninitialized";
869 case AVLState::Unknown:
872 case AVLState::AVLIsReg:
875 case AVLState::AVLIsImm:
876 OS <<
"AVLImm=" << (unsigned)AVLImm;
878 case AVLState::AVLIsVLMAX:
882 if (
isValid() && !isUnknown()) {
893 <<
"SEW=e" << (unsigned)SEW <<
", "
894 <<
"TailAgnostic=" << (
bool)TailAgnostic <<
", "
895 <<
"MaskAgnostic=" << (bool)MaskAgnostic <<
", "
896 <<
"SEWLMULRatioOnly=" << (
bool)SEWLMULRatioOnly <<
", "
897 <<
"TWiden=" << (unsigned)TWiden <<
", "
898 <<
"AltFmt=" << (
bool)AltFmt;
906#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
924 bool InQueue =
false;
935 const RISCVSubtarget *ST;
936 const TargetInstrInfo *TII;
937 MachineRegisterInfo *MRI;
941 std::vector<BlockData> BlockInfo;
942 std::queue<const MachineBasicBlock *> WorkList;
947 RISCVInsertVSETVLI() : MachineFunctionPass(ID) {}
948 bool runOnMachineFunction(MachineFunction &MF)
override;
950 void getAnalysisUsage(AnalysisUsage &AU)
const override {
965 bool needVSETVLI(
const DemandedFields &Used,
const VSETVLIInfo &Require,
966 const VSETVLIInfo &CurInfo)
const;
967 bool needVSETVLIPHI(
const VSETVLIInfo &Require,
968 const MachineBasicBlock &
MBB)
const;
969 void insertVSETVLI(MachineBasicBlock &
MBB,
971 const VSETVLIInfo &
Info,
const VSETVLIInfo &PrevInfo);
973 void transferBefore(VSETVLIInfo &
Info,
const MachineInstr &
MI)
const;
974 void transferAfter(VSETVLIInfo &
Info,
const MachineInstr &
MI)
const;
975 bool computeVLVTYPEChanges(
const MachineBasicBlock &
MBB,
976 VSETVLIInfo &
Info)
const;
977 void computeIncomingVLVTYPE(
const MachineBasicBlock &
MBB);
978 void emitVSETVLIs(MachineBasicBlock &
MBB);
979 void doPRE(MachineBasicBlock &
MBB);
980 void insertReadVL(MachineBasicBlock &
MBB);
982 bool canMutatePriorConfig(
const MachineInstr &PrevMI,
const MachineInstr &
MI,
983 const DemandedFields &Used)
const;
984 void coalesceVSETVLIs(MachineBasicBlock &
MBB)
const;
986 VSETVLIInfo getInfoForVSETVLI(
const MachineInstr &
MI)
const;
987 VSETVLIInfo computeInfoForInstr(
const MachineInstr &
MI)
const;
988 void forwardVSETVLIAVL(VSETVLIInfo &
Info)
const;
989 bool insertVSETMTK(MachineBasicBlock &
MBB, TKTMMode
Mode)
const;
994char RISCVInsertVSETVLI::ID = 0;
1008void RISCVInsertVSETVLI::forwardVSETVLIAVL(VSETVLIInfo &
Info)
const {
1009 if (!
Info.hasAVLReg())
1012 if (!
DefMI || !RISCVInstrInfo::isVectorConfigInstr(*
DefMI))
1014 VSETVLIInfo DefInstrInfo = getInfoForVSETVLI(*
DefMI);
1015 if (!DefInstrInfo.hasSameVLMAX(
Info))
1017 Info.setAVL(DefInstrInfo);
1023RISCVInsertVSETVLI::getInfoForVSETVLI(
const MachineInstr &
MI)
const {
1024 VSETVLIInfo NewInfo;
1025 if (
MI.getOpcode() == RISCV::PseudoVSETIVLI) {
1026 NewInfo.setAVLImm(
MI.getOperand(1).getImm());
1027 }
else if (RISCVInstrInfo::isXSfmmVectorConfigTNInstr(
MI)) {
1028 assert(
MI.getOpcode() == RISCV::PseudoSF_VSETTNT ||
1029 MI.getOpcode() == RISCV::PseudoSF_VSETTNTX0);
1030 switch (
MI.getOpcode()) {
1031 case RISCV::PseudoSF_VSETTNTX0:
1032 NewInfo.setAVLVLMAX();
1034 case RISCV::PseudoSF_VSETTNT:
1036 NewInfo.setAVLRegDef(getVNInfoFromReg(ATNReg,
MI, LIS), ATNReg);
1040 assert(
MI.getOpcode() == RISCV::PseudoVSETVLI ||
1041 MI.getOpcode() == RISCV::PseudoVSETVLIX0);
1042 if (
MI.getOpcode() == RISCV::PseudoVSETVLIX0)
1043 NewInfo.setAVLVLMAX();
1044 else if (
MI.getOperand(1).isUndef())
1046 NewInfo.setAVLImm(1);
1049 VNInfo *VNI = getVNInfoFromReg(AVLReg,
MI, LIS);
1050 NewInfo.setAVLRegDef(VNI, AVLReg);
1053 NewInfo.setVTYPE(
MI.getOperand(2).getImm());
1055 forwardVSETVLIAVL(NewInfo);
1060static unsigned computeVLMAX(
unsigned VLEN,
unsigned SEW,
1071RISCVInsertVSETVLI::computeInfoForInstr(
const MachineInstr &
MI)
const {
1072 VSETVLIInfo InstrInfo;
1073 const uint64_t TSFlags =
MI.getDesc().TSFlags;
1075 bool TailAgnostic =
true;
1076 bool MaskAgnostic =
true;
1077 if (!hasUndefinedPassthru(
MI)) {
1079 TailAgnostic =
false;
1080 MaskAgnostic =
false;
1084 const MachineOperand &
Op =
MI.getOperand(getVecPolicyOpNum(
MI));
1085 uint64_t Policy =
Op.getImm();
1088 "Invalid Policy Value");
1094 MaskAgnostic =
true;
1100 InstrInfo.setAltFmt(AltFmt);
1102 unsigned Log2SEW =
MI.getOperand(getSEWOpNum(
MI)).getImm();
1104 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1108 const MachineOperand &TWidenOp =
1109 MI.getOperand(
MI.getNumExplicitOperands() - 1);
1110 unsigned TWiden = TWidenOp.
getImm();
1112 InstrInfo.setAVLVLMAX();
1114 const MachineOperand &TNOp =
1118 InstrInfo.setAVLRegDef(getVNInfoFromReg(TNOp.
getReg(),
MI, LIS),
1122 InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt, TWiden);
1128 const MachineOperand &VLOp =
MI.getOperand(getVLOpNum(
MI));
1135 const unsigned VLMAX = computeVLMAX(
ST->getRealMaxVLen(), SEW, VLMul);
1136 if (
ST->getRealMinVLen() ==
ST->getRealMaxVLen() && VLMAX <= 31)
1137 InstrInfo.setAVLImm(VLMAX);
1139 InstrInfo.setAVLVLMAX();
1142 InstrInfo.setAVLImm(Imm);
1145 InstrInfo.setAVLImm(1);
1147 VNInfo *VNI = getVNInfoFromReg(VLOp.
getReg(),
MI, LIS);
1148 InstrInfo.setAVLRegDef(VNI, VLOp.
getReg());
1151 assert(RISCVInstrInfo::isScalarExtractInstr(
MI) ||
1152 RISCVInstrInfo::isVExtractInstr(
MI));
1155 InstrInfo.setAVLImm(1);
1158 if (std::optional<unsigned> EEW = getEEWForLoadStore(
MI)) {
1159 assert(SEW == EEW &&
"Initial SEW doesn't match expected EEW");
1163 InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt,
1166 forwardVSETVLIAVL(InstrInfo);
1171void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &
MBB,
1174 const VSETVLIInfo &PrevInfo) {
1175 ++NumInsertedVSETVL;
1177 if (
Info.getTWiden()) {
1178 if (
Info.hasAVLVLMAX()) {
1179 Register DestReg =
MRI->createVirtualRegister(&RISCV::GPRNoX0RegClass);
1199 if (PrevInfo.isValid() && !PrevInfo.isUnknown()) {
1202 if (
Info.hasSameAVL(PrevInfo) &&
Info.hasSameVLMAX(PrevInfo)) {
1216 if (
Info.hasSameVLMAX(PrevInfo) &&
Info.hasAVLReg()) {
1217 if (
const MachineInstr *
DefMI =
Info.getAVLDefMI(LIS);
1218 DefMI && RISCVInstrInfo::isVectorConfigInstr(*
DefMI)) {
1219 VSETVLIInfo DefInfo = getInfoForVSETVLI(*
DefMI);
1220 if (DefInfo.hasSameAVL(PrevInfo) && DefInfo.hasSameVLMAX(PrevInfo)) {
1235 if (
Info.hasAVLImm()) {
1245 if (
Info.hasAVLVLMAX()) {
1246 Register DestReg =
MRI->createVirtualRegister(&RISCV::GPRNoX0RegClass);
1259 MRI->constrainRegClass(AVLReg, &RISCV::GPRNoX0RegClass);
1268 const VNInfo *CurVNI =
Info.getAVLVNInfo();
1276 MRI->createVirtualRegister(&RISCV::GPRNoX0RegClass);
1289 MI->getOperand(1).setReg(AVLCopyReg);
1298bool RISCVInsertVSETVLI::needVSETVLI(
const DemandedFields &Used,
1299 const VSETVLIInfo &Require,
1300 const VSETVLIInfo &CurInfo)
const {
1301 if (!CurInfo.isValid() || CurInfo.isUnknown() || CurInfo.hasSEWLMULRatioOnly())
1304 if (CurInfo.isCompatible(Used, Require, LIS))
1314 const VSETVLIInfo &NewInfo,
1315 DemandedFields &Demanded) {
1316 VSETVLIInfo
Info = NewInfo;
1318 if (!Demanded.LMUL && !Demanded.SEWLMULRatio && PrevInfo.isValid() &&
1319 !PrevInfo.isUnknown()) {
1321 PrevInfo.getSEW(), PrevInfo.getVLMUL(),
Info.getSEW()))
1322 Info.setVLMul(*NewVLMul);
1323 Demanded.LMUL = DemandedFields::LMULEqual;
1332void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &
Info,
1333 const MachineInstr &
MI)
const {
1334 if (isVectorCopy(
ST->getRegisterInfo(),
MI) &&
1335 (
Info.isUnknown() || !
Info.isValid() ||
Info.hasSEWLMULRatioOnly())) {
1338 VSETVLIInfo NewInfo;
1339 NewInfo.setAVLImm(1);
1349 DemandedFields Demanded = getDemanded(
MI, ST);
1351 const VSETVLIInfo NewInfo = computeInfoForInstr(
MI);
1352 assert(NewInfo.isValid() && !NewInfo.isUnknown());
1353 if (
Info.isValid() && !needVSETVLI(Demanded, NewInfo,
Info))
1356 const VSETVLIInfo PrevInfo =
Info;
1357 if (!
Info.isValid() ||
Info.isUnknown())
1360 const VSETVLIInfo IncomingInfo =
adjustIncoming(PrevInfo, NewInfo, Demanded);
1369 bool EquallyZero = IncomingInfo.hasEquallyZeroAVL(PrevInfo, LIS) &&
1370 IncomingInfo.hasSameVLMAX(PrevInfo);
1371 if (Demanded.VLAny || (Demanded.VLZeroness && !EquallyZero))
1372 Info.setAVL(IncomingInfo);
1375 ((Demanded.LMUL || Demanded.SEWLMULRatio) ? IncomingInfo :
Info)
1377 ((Demanded.SEW || Demanded.SEWLMULRatio) ? IncomingInfo :
Info).getSEW(),
1380 (Demanded.TailPolicy ? IncomingInfo :
Info).getTailAgnostic() ||
1381 IncomingInfo.getTailAgnostic(),
1382 (Demanded.MaskPolicy ? IncomingInfo :
Info).getMaskAgnostic() ||
1383 IncomingInfo.getMaskAgnostic(),
1384 (Demanded.AltFmt ? IncomingInfo :
Info).getAltFmt(),
1385 Demanded.TWiden ? IncomingInfo.getTWiden() : 0);
1389 if (
Info.hasSEWLMULRatioOnly()) {
1390 VSETVLIInfo RatiolessInfo = IncomingInfo;
1391 RatiolessInfo.setAVL(
Info);
1392 Info = RatiolessInfo;
1399void RISCVInsertVSETVLI::transferAfter(VSETVLIInfo &
Info,
1400 const MachineInstr &
MI)
const {
1401 if (RISCVInstrInfo::isVectorConfigInstr(
MI)) {
1402 Info = getInfoForVSETVLI(
MI);
1406 if (RISCVInstrInfo::isFaultOnlyFirstLoad(
MI)) {
1408 assert(
MI.getOperand(1).getReg().isVirtual());
1414 Info.setAVLRegDef(VNI,
MI.getOperand(1).getReg());
1416 Info.setAVLRegDef(
nullptr,
MI.getOperand(1).getReg());
1422 if (
MI.isCall() ||
MI.isInlineAsm() ||
1423 MI.modifiesRegister(RISCV::VL,
nullptr) ||
1424 MI.modifiesRegister(RISCV::VTYPE,
nullptr))
1425 Info = VSETVLIInfo::getUnknown();
1428bool RISCVInsertVSETVLI::computeVLVTYPEChanges(
const MachineBasicBlock &
MBB,
1429 VSETVLIInfo &
Info)
const {
1430 bool HadVectorOp =
false;
1433 for (
const MachineInstr &
MI :
MBB) {
1434 transferBefore(
Info,
MI);
1436 if (RISCVInstrInfo::isVectorConfigInstr(
MI) ||
1438 isVectorCopy(
ST->getRegisterInfo(),
MI) ||
1439 RISCVInstrInfo::isXSfmmVectorConfigInstr(
MI))
1448void RISCVInsertVSETVLI::computeIncomingVLVTYPE(
const MachineBasicBlock &
MBB) {
1452 BBInfo.InQueue =
false;
1456 VSETVLIInfo InInfo = BBInfo.
Pred;
1459 InInfo.setUnknown();
1462 InInfo = InInfo.intersect(BlockInfo[
P->getNumber()].Exit);
1466 if (!InInfo.isValid())
1470 if (InInfo == BBInfo.
Pred)
1473 BBInfo.
Pred = InInfo;
1475 <<
" changed to " << BBInfo.
Pred <<
"\n");
1481 VSETVLIInfo TmpStatus;
1482 computeVLVTYPEChanges(
MBB, TmpStatus);
1486 if (BBInfo.
Exit == TmpStatus)
1489 BBInfo.
Exit = TmpStatus;
1491 <<
" changed to " << BBInfo.
Exit <<
"\n");
1496 if (!BlockInfo[S->getNumber()].InQueue) {
1497 BlockInfo[S->getNumber()].InQueue =
true;
1505bool RISCVInsertVSETVLI::needVSETVLIPHI(
const VSETVLIInfo &Require,
1506 const MachineBasicBlock &
MBB)
const {
1507 if (!Require.hasAVLReg())
1514 const VNInfo *Valno = Require.getAVLVNInfo();
1521 const VSETVLIInfo &PBBExit = BlockInfo[PBB->getNumber()].Exit;
1528 if (!
DefMI || !RISCVInstrInfo::isVectorConfigInstr(*
DefMI))
1533 VSETVLIInfo DefInfo = getInfoForVSETVLI(*
DefMI);
1534 if (DefInfo != PBBExit)
1540 if (PBBExit.isUnknown() || !PBBExit.hasSameVTYPE(Require))
1549void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &
MBB) {
1553 bool PrefixTransparent =
true;
1554 for (MachineInstr &
MI :
MBB) {
1555 const VSETVLIInfo PrevInfo = CurInfo;
1556 transferBefore(CurInfo,
MI);
1559 if (RISCVInstrInfo::isVectorConfigInstr(
MI)) {
1561 assert(
MI.getOperand(3).getReg() == RISCV::VL &&
1562 MI.getOperand(4).getReg() == RISCV::VTYPE &&
1563 "Unexpected operands where VL and VTYPE should be");
1564 MI.getOperand(3).setIsDead(
false);
1565 MI.getOperand(4).setIsDead(
false);
1566 PrefixTransparent =
false;
1570 isVectorCopy(
ST->getRegisterInfo(),
MI)) {
1571 if (!PrevInfo.isCompatible(DemandedFields::all(), CurInfo, LIS)) {
1572 insertVSETVLI(
MBB,
MI,
MI.getDebugLoc(), CurInfo, PrevInfo);
1573 PrefixTransparent =
false;
1579 uint64_t TSFlags =
MI.getDesc().TSFlags;
1581 if (!PrevInfo.isCompatible(DemandedFields::all(), CurInfo, LIS)) {
1589 if (!PrefixTransparent || needVSETVLIPHI(CurInfo,
MBB))
1590 insertVSETVLI(
MBB,
MI,
MI.getDebugLoc(), CurInfo, PrevInfo);
1591 PrefixTransparent =
false;
1595 MachineOperand &VLOp =
MI.getOperand(getVLOpNum(
MI));
1615 for (MachineInstr *DeadMI : DeadMIs) {
1616 if (!
TII->isAddImmediate(*DeadMI,
Reg))
1619 Register AddReg = DeadMI->getOperand(1).getReg();
1620 DeadMI->eraseFromParent();
1633 if (
MI.isInlineAsm()) {
1640 if (
MI.isCall() ||
MI.isInlineAsm() ||
1641 MI.modifiesRegister(RISCV::VL,
nullptr) ||
1642 MI.modifiesRegister(RISCV::VTYPE,
nullptr))
1643 PrefixTransparent =
false;
1645 transferAfter(CurInfo,
MI);
1649 if (CurInfo !=
Info.Exit) {
1655 assert(CurInfo ==
Info.Exit &&
"InsertVSETVLI dataflow invariant violated");
1663void RISCVInsertVSETVLI::doPRE(MachineBasicBlock &
MBB) {
1667 MachineBasicBlock *UnavailablePred =
nullptr;
1668 VSETVLIInfo AvailableInfo;
1670 const VSETVLIInfo &PredInfo = BlockInfo[
P->getNumber()].Exit;
1671 if (PredInfo.isUnknown()) {
1672 if (UnavailablePred)
1674 UnavailablePred =
P;
1675 }
else if (!AvailableInfo.isValid()) {
1676 AvailableInfo = PredInfo;
1677 }
else if (AvailableInfo != PredInfo) {
1684 if (!UnavailablePred || !AvailableInfo.isValid())
1692 if (AvailableInfo.hasSEWLMULRatioOnly())
1702 if (AvailableInfo.hasAVLReg()) {
1703 SlotIndex
SI = AvailableInfo.getAVLVNInfo()->def;
1722 VSETVLIInfo CurInfo = AvailableInfo;
1723 int TransitionsRemoved = 0;
1724 for (
const MachineInstr &
MI :
MBB) {
1725 const VSETVLIInfo LastInfo = CurInfo;
1726 const VSETVLIInfo LastOldInfo = OldInfo;
1727 transferBefore(CurInfo,
MI);
1728 transferBefore(OldInfo,
MI);
1729 if (CurInfo == LastInfo)
1730 TransitionsRemoved++;
1731 if (LastOldInfo == OldInfo)
1732 TransitionsRemoved--;
1733 transferAfter(CurInfo,
MI);
1734 transferAfter(OldInfo,
MI);
1735 if (CurInfo == OldInfo)
1739 if (CurInfo != OldInfo || TransitionsRemoved <= 0)
1746 auto OldExit = BlockInfo[UnavailablePred->
getNumber()].Exit;
1748 << UnavailablePred->
getName() <<
" with state "
1749 << AvailableInfo <<
"\n");
1750 BlockInfo[UnavailablePred->
getNumber()].Exit = AvailableInfo;
1756 insertVSETVLI(*UnavailablePred, InsertPt,
1758 AvailableInfo, OldExit);
1763bool RISCVInsertVSETVLI::canMutatePriorConfig(
1764 const MachineInstr &PrevMI,
const MachineInstr &
MI,
1765 const DemandedFields &Used)
const {
1769 if (!RISCVInstrInfo::isVLPreservingConfig(
MI)) {
1773 if (
Used.VLZeroness) {
1774 if (RISCVInstrInfo::isVLPreservingConfig(PrevMI))
1776 if (!getInfoForVSETVLI(PrevMI).hasEquallyZeroAVL(getInfoForVSETVLI(
MI),
1781 auto &AVL =
MI.getOperand(1);
1785 if (AVL.isReg() && AVL.getReg() != RISCV::X0) {
1786 VNInfo *VNI = getVNInfoFromReg(AVL.getReg(),
MI, LIS);
1787 VNInfo *PrevVNI = getVNInfoFromReg(AVL.getReg(), PrevMI, LIS);
1788 if (!VNI || !PrevVNI || VNI != PrevVNI)
1803 auto VType =
MI.getOperand(2).getImm();
1804 return areCompatibleVTYPEs(PriorVType, VType, Used);
1807void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &
MBB)
const {
1808 MachineInstr *NextMI =
nullptr;
1811 DemandedFields
Used;
1816 auto dropAVLUse = [&](MachineOperand &MO) {
1817 if (!MO.isReg() || !MO.getReg().isVirtual())
1825 MachineInstr *VLOpDef =
MRI->getUniqueVRegDef(OldVLReg);
1826 if (VLOpDef &&
TII->isAddImmediate(*VLOpDef, OldVLReg) &&
1827 MRI->use_nodbg_empty(OldVLReg))
1834 RISCVInstrInfo::isXSfmmVectorConfigInstr(
MI)) {
1839 if (!RISCVInstrInfo::isVectorConfigInstr(
MI)) {
1840 Used.doUnion(getDemanded(
MI, ST));
1841 if (
MI.isCall() ||
MI.isInlineAsm() ||
1842 MI.modifiesRegister(RISCV::VL,
nullptr) ||
1843 MI.modifiesRegister(RISCV::VTYPE,
nullptr))
1848 if (!
MI.getOperand(0).isDead())
1852 if (!
Used.usedVL() && !
Used.usedVTYPE()) {
1853 dropAVLUse(
MI.getOperand(1));
1856 MI.eraseFromParent();
1857 NumCoalescedVSETVL++;
1862 if (canMutatePriorConfig(
MI, *NextMI, Used)) {
1863 if (!RISCVInstrInfo::isVLPreservingConfig(*NextMI)) {
1866 MI.getOperand(0).setReg(DefReg);
1867 MI.getOperand(0).setIsDead(
false);
1870 dropAVLUse(
MI.getOperand(1));
1883 SlotIndex NextMISlot =
1886 LiveInterval::Segment S(MISlot, NextMISlot, DefVNI);
1888 DefVNI->
def = MISlot;
1905 NumCoalescedVSETVL++;
1910 Used = getDemanded(
MI, ST);
1915 for (
auto *
MI : ToDelete) {
1916 assert(
MI->getOpcode() == RISCV::ADDI);
1917 Register AddReg =
MI->getOperand(1).getReg();
1922 MI->eraseFromParent();
1928void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &
MBB) {
1930 MachineInstr &
MI = *
I++;
1931 if (RISCVInstrInfo::isFaultOnlyFirstLoad(
MI)) {
1932 Register VLOutput =
MI.getOperand(1).getReg();
1934 if (!
MI.getOperand(1).isDead()) {
1936 TII->get(RISCV::PseudoReadVL), VLOutput);
1939 SlotIndex NewDefSI =
1945 DefVNI->
def = NewDefSI;
1949 MI.getOperand(1).setReg(RISCV::X0);
1950 MI.addRegisterDefined(RISCV::VL,
MRI->getTargetRegisterInfo());
1955bool RISCVInsertVSETVLI::insertVSETMTK(MachineBasicBlock &
MBB,
1956 TKTMMode
Mode)
const {
1959 for (
auto &
MI :
MBB) {
1960 uint64_t TSFlags =
MI.getDesc().TSFlags;
1961 if (RISCVInstrInfo::isXSfmmVectorConfigTMTKInstr(
MI) ||
1965 VSETVLIInfo CurrInfo = computeInfoForInstr(
MI);
1974 unsigned Opcode = 0;
1978 Opcode = RISCV::PseudoSF_VSETTK;
1982 Opcode = RISCV::PseudoSF_VSETTM;
1986 assert(OpNum && Opcode &&
"Invalid OpNum or Opcode");
1988 MachineOperand &
Op =
MI.getOperand(OpNum);
1999 Op.setIsKill(
false);
2014bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
2017 if (!
ST->hasVInstructions())
2022 TII =
ST->getInstrInfo();
2024 auto *LISWrapper = getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
2025 LIS = LISWrapper ? &LISWrapper->getLIS() :
nullptr;
2027 assert(BlockInfo.empty() &&
"Expect empty block infos");
2030 bool HaveVectorOp =
false;
2033 for (
const MachineBasicBlock &
MBB : MF) {
2034 VSETVLIInfo TmpStatus;
2035 HaveVectorOp |= computeVLVTYPEChanges(
MBB, TmpStatus);
2038 BBInfo.
Exit = TmpStatus;
2040 <<
" is " << BBInfo.
Exit <<
"\n");
2045 if (!HaveVectorOp) {
2053 for (
const MachineBasicBlock &
MBB : MF) {
2054 WorkList.push(&
MBB);
2057 while (!WorkList.empty()) {
2058 const MachineBasicBlock &
MBB = *WorkList.front();
2060 computeIncomingVLVTYPE(
MBB);
2064 for (MachineBasicBlock &
MBB : MF)
2071 for (MachineBasicBlock &
MBB : MF)
2084 coalesceVSETVLIs(*
MBB);
2088 for (MachineBasicBlock &
MBB : MF)
2091 for (MachineBasicBlock &
MBB : MF) {
2092 insertVSETMTK(
MBB, VSETTM);
2093 insertVSETMTK(
MBB, VSETTK);
2097 return HaveVectorOp;
2102 return new RISCVInsertVSETVLI();
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
#define LLVM_ATTRIBUTE_USED
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
const HexagonInstrInfo * TII
Register const TargetRegisterInfo * TRI
static Interval intersect(const Interval &I1, const Interval &I2)
Promote Memory to Register
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static cl::opt< bool > EnsureWholeVectorRegisterMoveValidVTYPE(DEBUG_TYPE "-whole-vector-register-move-valid-vtype", cl::Hidden, cl::desc("Insert vsetvlis before vmvNr.vs to ensure vtype is valid and " "vill is cleared"), cl::init(true))
static VSETVLIInfo adjustIncoming(const VSETVLIInfo &PrevInfo, const VSETVLIInfo &NewInfo, DemandedFields &Demanded)
#define RISCV_INSERT_VSETVLI_NAME
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI Optimize VGPR LiveRange
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
FunctionPass class - This class is used to implement most global optimizations.
void setWeight(float Value)
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
SlotIndexes * getSlotIndexes() const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveInterval & getInterval(Register Reg)
void removeInterval(Register Reg)
Interval removal.
LLVM_ABI bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
LLVM_ABI void extendToIndices(LiveRange &LR, ArrayRef< SlotIndex > Indices, ArrayRef< SlotIndex > Undefs)
Extend the live range LR to reach all points in Indices.
LLVM_ABI void splitSeparateComponents(LiveInterval &LI, SmallVectorImpl< LiveInterval * > &SplitLIs)
Split separate components in LiveInterval LI into separate intervals.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
LiveInterval & createAndComputeVirtRegInterval(Register Reg)
LLVM_ABI iterator addSegment(Segment S)
Add the specified Segment to this range, merging segments as appropriate.
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
bool liveAt(SlotIndex index) const
bool overlaps(const LiveRange &other) const
overlaps - Return true if the intersection of the two live ranges is not empty.
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
bool containsOneValue() const
LLVM_ABI void removeSegment(SlotIndex Start, SlotIndex End, bool RemoveDeadValNo=false)
Remove the specified interval from this live range.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator_range< iterator > terminators()
iterator_range< succ_iterator > successors()
LLVM_ABI instr_iterator getFirstInstrTerminator()
Same getFirstTerminator but it ignores bundles and return an instr_iterator instead.
iterator_range< pred_iterator > predecessors()
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
void push_back(const T &Elt)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
VNInfo - Value Number Information.
SlotIndex def
The index of the defining instruction.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
This class implements an extremely fast bulk output stream that can only output to a stream.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static unsigned getTMOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasTWidenOp(uint64_t TSFlags)
static RISCVVType::VLMUL getLMul(uint64_t TSFlags)
static unsigned getTKOpNum(const MCInstrDesc &Desc)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static AltFmtType getAltFmtType(uint64_t TSFlags)
static bool hasTKOp(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static bool hasTMOp(uint64_t TSFlags)
static unsigned getTNOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI std::optional< VLMUL > getSameRatioLMUL(unsigned SEW, VLMUL VLMUL, unsigned EEW)
LLVM_ABI unsigned encodeXSfmmVType(unsigned SEW, unsigned Widen, bool AltFmt)
static unsigned getXSfmmWiden(unsigned VType)
static bool isMaskAgnostic(unsigned VType)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool hasXSfmmWiden(unsigned VType)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
static bool isAltFmt(unsigned VType)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
FunctionAddr VTableAddr Value
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool operator!=(uint64_t V1, const APInt &V2)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
iterator_range< po_iterator< T > > post_order(const T &G)
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionPass * createRISCVInsertVSETVLIPass()
Returns an instance of the Insert VSETVLI pass.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
char & RISCVInsertVSETVLIID
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
static bool isRVVRegClass(const TargetRegisterClass *RC)