38#define DEBUG_TYPE "riscv-insert-vsetvli"
39#define RISCV_INSERT_VSETVLI_NAME "RISC-V Insert VSETVLI pass"
41STATISTIC(NumInsertedVSETVL,
"Number of VSETVL inst inserted");
42STATISTIC(NumCoalescedVSETVL,
"Number of VSETVL inst coalesced");
46 cl::desc(
"Insert vsetvlis before vmvNr.vs to ensure vtype is valid and "
62 return LI.getVNInfoBefore(
SI);
79static std::optional<unsigned> getEEWForLoadStore(
const MachineInstr &
MI) {
101 case RISCV::VSSE64_V:
111 const unsigned Log2SEW =
MI.getOperand(getSEWOpNum(
MI)).getImm();
123 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx))
137 return MI.isCopy() &&
MI.getOperand(0).getReg().isPhysical() &&
139 TRI->getMinimalPhysRegClass(
MI.getOperand(0).getReg()));
143struct DemandedFields {
148 bool VLZeroness =
false;
152 SEWGreaterThanOrEqualAndLessThan64 =
156 SEWGreaterThanOrEqual = 1,
162 LMULLessThanOrEqualToM1 = 1,
165 bool SEWLMULRatio =
false;
166 bool TailPolicy =
false;
167 bool MaskPolicy =
false;
175 bool usedVTYPE()
const {
176 return SEW || LMUL || SEWLMULRatio || TailPolicy || MaskPolicy || VILL ||
182 return VLAny || VLZeroness;
203 static DemandedFields
all() {
211 void doUnion(
const DemandedFields &
B) {
213 VLZeroness |=
B.VLZeroness;
214 SEW = std::max(SEW,
B.SEW);
215 LMUL = std::max(LMUL,
B.LMUL);
216 SEWLMULRatio |=
B.SEWLMULRatio;
217 TailPolicy |=
B.TailPolicy;
218 MaskPolicy |=
B.MaskPolicy;
224#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
232 void print(raw_ostream &OS)
const {
234 OS <<
"VLAny=" << VLAny <<
", ";
235 OS <<
"VLZeroness=" << VLZeroness <<
", ";
241 case SEWGreaterThanOrEqual:
242 OS <<
"SEWGreaterThanOrEqual";
244 case SEWGreaterThanOrEqualAndLessThan64:
245 OS <<
"SEWGreaterThanOrEqualAndLessThan64";
257 case LMULLessThanOrEqualToM1:
258 OS <<
"LMULLessThanOrEqualToM1";
265 OS <<
"SEWLMULRatio=" << SEWLMULRatio <<
", ";
266 OS <<
"TailPolicy=" << TailPolicy <<
", ";
267 OS <<
"MaskPolicy=" << MaskPolicy <<
", ";
268 OS <<
"VILL=" << VILL <<
", ";
269 OS <<
"AltFmt=" << AltFmt <<
", ";
270 OS <<
"TWiden=" << TWiden;
276#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
286 return Fractional || LMul == 1;
293 const DemandedFields &Used) {
295 case DemandedFields::SEWNone:
297 case DemandedFields::SEWEqual:
301 case DemandedFields::SEWGreaterThanOrEqual:
305 case DemandedFields::SEWGreaterThanOrEqualAndLessThan64:
313 case DemandedFields::LMULNone:
315 case DemandedFields::LMULEqual:
319 case DemandedFields::LMULLessThanOrEqualToM1:
325 if (
Used.SEWLMULRatio) {
330 if (Ratio1 != Ratio2)
362 if (
MI.isCall() ||
MI.isInlineAsm() ||
363 MI.readsRegister(RISCV::VL,
nullptr))
365 if (
MI.isCall() ||
MI.isInlineAsm() ||
366 MI.readsRegister(RISCV::VTYPE,
nullptr))
374 !VLOp.isReg() || !VLOp.isUndef())
379 Res.MaskPolicy =
false;
388 if (getEEWForLoadStore(
MI)) {
389 Res.SEW = DemandedFields::SEWNone;
390 Res.LMUL = DemandedFields::LMULNone;
395 Res.TailPolicy =
false;
396 Res.MaskPolicy =
false;
403 if (isMaskRegOp(
MI)) {
404 Res.SEW = DemandedFields::SEWNone;
405 Res.LMUL = DemandedFields::LMULNone;
409 if (RISCVInstrInfo::isScalarInsertInstr(
MI)) {
410 Res.LMUL = DemandedFields::LMULNone;
411 Res.SEWLMULRatio =
false;
419 if (hasUndefinedPassthru(
MI)) {
420 if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(
MI) &&
421 !
ST->hasVInstructionsF64())
422 Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
424 Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
425 Res.TailPolicy =
false;
430 if (RISCVInstrInfo::isScalarExtractInstr(
MI)) {
432 Res.LMUL = DemandedFields::LMULNone;
433 Res.SEWLMULRatio =
false;
434 Res.TailPolicy =
false;
435 Res.MaskPolicy =
false;
449 if (RISCVInstrInfo::isVSlideInstr(
MI) && VLOp.
isImm() &&
450 VLOp.
getImm() == 1 && hasUndefinedPassthru(
MI) &&
451 !
ST->hasVLDependentLatency()) {
453 Res.VLZeroness =
true;
454 Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
455 Res.TailPolicy =
false;
464 if (RISCVInstrInfo::isScalarSplatInstr(
MI) && VLOp.
isImm() &&
465 VLOp.
getImm() == 1 && hasUndefinedPassthru(
MI) &&
466 !
ST->hasVLDependentLatency()) {
467 Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
468 Res.SEWLMULRatio =
false;
470 if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(
MI) &&
471 !
ST->hasVInstructionsF64())
472 Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
474 Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
475 Res.TailPolicy =
false;
486 if (isVectorCopy(
ST->getRegisterInfo(),
MI)) {
487 Res.LMUL = DemandedFields::LMULNone;
488 Res.SEW = DemandedFields::SEWNone;
489 Res.SEWLMULRatio =
false;
490 Res.TailPolicy =
false;
491 Res.MaskPolicy =
false;
494 if (RISCVInstrInfo::isVExtractInstr(
MI)) {
497 Res.TailPolicy =
false;
503 RISCVInstrInfo::isXSfmmVectorConfigInstr(
MI);
528 } State = Uninitialized;
533 uint8_t TailAgnostic : 1;
534 uint8_t MaskAgnostic : 1;
535 uint8_t SEWLMULRatioOnly : 1;
541 : AVLImm(0), TailAgnostic(
false), MaskAgnostic(
false),
542 SEWLMULRatioOnly(
false) {}
544 static VSETVLIInfo getUnknown() {
550 bool isValid()
const {
return State != Uninitialized; }
551 void setUnknown() { State = Unknown; }
552 bool isUnknown()
const {
return State == Unknown; }
554 void setAVLRegDef(
const VNInfo *VNInfo,
Register AVLReg) {
556 AVLRegDef.ValNo = VNInfo;
557 AVLRegDef.DefReg = AVLReg;
561 void setAVLImm(
unsigned Imm) {
566 void setAVLVLMAX() { State = AVLIsVLMAX; }
568 bool hasAVLImm()
const {
return State == AVLIsImm; }
569 bool hasAVLReg()
const {
return State == AVLIsReg; }
570 bool hasAVLVLMAX()
const {
return State == AVLIsVLMAX; }
572 assert(hasAVLReg() && AVLRegDef.DefReg.isVirtual());
573 return AVLRegDef.DefReg;
575 unsigned getAVLImm()
const {
579 const VNInfo *getAVLVNInfo()
const {
581 return AVLRegDef.ValNo;
587 const MachineInstr *getAVLDefMI(
const LiveIntervals *LIS)
const {
589 if (!LIS || getAVLVNInfo()->isPHIDef())
596 void setAVL(
const VSETVLIInfo &
Info) {
598 if (
Info.isUnknown())
600 else if (
Info.hasAVLReg())
601 setAVLRegDef(
Info.getAVLVNInfo(),
Info.getAVLReg());
602 else if (
Info.hasAVLVLMAX())
606 setAVLImm(
Info.getAVLImm());
610 unsigned getSEW()
const {
return SEW; }
612 bool getTailAgnostic()
const {
return TailAgnostic; }
613 bool getMaskAgnostic()
const {
return MaskAgnostic; }
614 bool getAltFmt()
const {
return AltFmt; }
615 unsigned getTWiden()
const {
return TWiden; }
617 bool hasNonZeroAVL(
const LiveIntervals *LIS)
const {
619 return getAVLImm() > 0;
621 if (
auto *
DefMI = getAVLDefMI(LIS))
622 return RISCVInstrInfo::isNonZeroLoadImmediate(*
DefMI);
629 bool hasEquallyZeroAVL(
const VSETVLIInfo &
Other,
630 const LiveIntervals *LIS)
const {
631 if (hasSameAVL(
Other))
633 return (hasNonZeroAVL(LIS) &&
Other.hasNonZeroAVL(LIS));
636 bool hasSameAVLLatticeValue(
const VSETVLIInfo &
Other)
const {
637 if (hasAVLReg() &&
Other.hasAVLReg()) {
639 "we either have intervals or we don't");
641 return getAVLReg() ==
Other.getAVLReg();
642 return getAVLVNInfo()->id ==
Other.getAVLVNInfo()->id &&
643 getAVLReg() ==
Other.getAVLReg();
646 if (hasAVLImm() &&
Other.hasAVLImm())
647 return getAVLImm() ==
Other.getAVLImm();
650 return Other.hasAVLVLMAX() && hasSameVLMAX(
Other);
657 bool hasSameAVL(
const VSETVLIInfo &
Other)
const {
661 if (hasAVLReg() &&
Other.hasAVLReg()) {
663 "we either have intervals or we don't");
667 return hasSameAVLLatticeValue(
Other);
670 void setVTYPE(
unsigned VType) {
672 "Can't set VTYPE for uninitialized or unknown");
684 "Can't set VTYPE for uninitialized or unknown");
693 void setAltFmt(
bool AF) { AltFmt = AF; }
699 "Can't encode VTYPE for uninitialized or unknown");
706 bool hasSEWLMULRatioOnly()
const {
return SEWLMULRatioOnly; }
708 bool hasSameVTYPE(
const VSETVLIInfo &
Other)
const {
710 "Can't compare invalid VSETVLIInfos");
712 "Can't compare VTYPE in unknown state");
713 assert(!SEWLMULRatioOnly && !
Other.SEWLMULRatioOnly &&
714 "Can't compare when only LMUL/SEW ratio is valid.");
715 return std::tie(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt, TWiden) ==
722 "Can't use VTYPE for uninitialized or unknown");
730 bool hasSameVLMAX(
const VSETVLIInfo &
Other)
const {
732 "Can't compare invalid VSETVLIInfos");
734 "Can't compare VTYPE in unknown state");
738 bool hasCompatibleVTYPE(
const DemandedFields &Used,
739 const VSETVLIInfo &Require)
const {
740 return areCompatibleVTYPEs(Require.encodeVTYPE(),
encodeVTYPE(), Used);
746 bool isCompatible(
const DemandedFields &Used,
const VSETVLIInfo &Require,
747 const LiveIntervals *LIS)
const {
749 "Can't compare invalid VSETVLIInfos");
751 if (isUnknown() || Require.isUnknown())
755 if (SEWLMULRatioOnly || Require.SEWLMULRatioOnly)
758 if (
Used.VLAny && !(hasSameAVL(Require) && hasSameVLMAX(Require)))
761 if (
Used.VLZeroness && !hasEquallyZeroAVL(Require, LIS))
764 return hasCompatibleVTYPE(Used, Require);
770 return !
Other.isValid();
771 if (!
Other.isValid())
776 return Other.isUnknown();
777 if (
Other.isUnknown())
780 if (!hasSameAVLLatticeValue(
Other))
784 if (SEWLMULRatioOnly !=
Other.SEWLMULRatioOnly)
788 if (SEWLMULRatioOnly)
789 return hasSameVLMAX(
Other);
792 return hasSameVTYPE(
Other);
796 return !(*
this ==
Other);
803 if (!
Other.isValid())
811 if (isUnknown() ||
Other.isUnknown())
812 return VSETVLIInfo::getUnknown();
820 if (hasSameAVL(
Other) && hasSameVLMAX(
Other)) {
821 VSETVLIInfo MergeInfo = *
this;
822 MergeInfo.SEWLMULRatioOnly =
true;
827 return VSETVLIInfo::getUnknown();
830#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
839 void print(raw_ostream &OS)
const {
842 OS <<
"Uninitialized";
848 OS <<
"AVLImm=" << (unsigned)AVLImm;
863 <<
"SEW=e" << (unsigned)SEW <<
", "
864 <<
"TailAgnostic=" << (
bool)TailAgnostic <<
", "
865 <<
"MaskAgnostic=" << (bool)MaskAgnostic <<
", "
866 <<
"SEWLMULRatioOnly=" << (
bool)SEWLMULRatioOnly <<
", "
867 <<
"TWiden=" << (unsigned)TWiden <<
", "
868 <<
"AltFmt=" << (
bool)AltFmt <<
"}";
873#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
891 bool InQueue =
false;
902 const RISCVSubtarget *ST;
903 const TargetInstrInfo *TII;
904 MachineRegisterInfo *MRI;
908 std::vector<BlockData> BlockInfo;
909 std::queue<const MachineBasicBlock *> WorkList;
914 RISCVInsertVSETVLI() : MachineFunctionPass(ID) {}
915 bool runOnMachineFunction(MachineFunction &MF)
override;
917 void getAnalysisUsage(AnalysisUsage &AU)
const override {
932 bool needVSETVLI(
const DemandedFields &Used,
const VSETVLIInfo &Require,
933 const VSETVLIInfo &CurInfo)
const;
934 bool needVSETVLIPHI(
const VSETVLIInfo &Require,
935 const MachineBasicBlock &
MBB)
const;
936 void insertVSETVLI(MachineBasicBlock &
MBB,
938 const VSETVLIInfo &
Info,
const VSETVLIInfo &PrevInfo);
940 void transferBefore(VSETVLIInfo &
Info,
const MachineInstr &
MI)
const;
941 void transferAfter(VSETVLIInfo &
Info,
const MachineInstr &
MI)
const;
942 bool computeVLVTYPEChanges(
const MachineBasicBlock &
MBB,
943 VSETVLIInfo &
Info)
const;
944 void computeIncomingVLVTYPE(
const MachineBasicBlock &
MBB);
945 void emitVSETVLIs(MachineBasicBlock &
MBB);
946 void doPRE(MachineBasicBlock &
MBB);
947 void insertReadVL(MachineBasicBlock &
MBB);
949 bool canMutatePriorConfig(
const MachineInstr &PrevMI,
const MachineInstr &
MI,
950 const DemandedFields &Used)
const;
951 void coalesceVSETVLIs(MachineBasicBlock &
MBB)
const;
953 VSETVLIInfo getInfoForVSETVLI(
const MachineInstr &
MI)
const;
954 VSETVLIInfo computeInfoForInstr(
const MachineInstr &
MI)
const;
955 void forwardVSETVLIAVL(VSETVLIInfo &
Info)
const;
956 bool insertVSETMTK(MachineBasicBlock &
MBB, TKTMMode
Mode)
const;
961char RISCVInsertVSETVLI::ID = 0;
975void RISCVInsertVSETVLI::forwardVSETVLIAVL(VSETVLIInfo &
Info)
const {
976 if (!
Info.hasAVLReg())
979 if (!
DefMI || !RISCVInstrInfo::isVectorConfigInstr(*
DefMI))
981 VSETVLIInfo DefInstrInfo = getInfoForVSETVLI(*
DefMI);
982 if (!DefInstrInfo.hasSameVLMAX(
Info))
984 Info.setAVL(DefInstrInfo);
990RISCVInsertVSETVLI::getInfoForVSETVLI(
const MachineInstr &
MI)
const {
992 if (
MI.getOpcode() == RISCV::PseudoVSETIVLI) {
993 NewInfo.setAVLImm(
MI.getOperand(1).getImm());
994 }
else if (RISCVInstrInfo::isXSfmmVectorConfigTNInstr(
MI)) {
995 assert(
MI.getOpcode() == RISCV::PseudoSF_VSETTNT ||
996 MI.getOpcode() == RISCV::PseudoSF_VSETTNTX0);
997 switch (
MI.getOpcode()) {
998 case RISCV::PseudoSF_VSETTNTX0:
999 NewInfo.setAVLVLMAX();
1001 case RISCV::PseudoSF_VSETTNT:
1003 NewInfo.setAVLRegDef(getVNInfoFromReg(ATNReg,
MI, LIS), ATNReg);
1007 assert(
MI.getOpcode() == RISCV::PseudoVSETVLI ||
1008 MI.getOpcode() == RISCV::PseudoVSETVLIX0);
1009 if (
MI.getOpcode() == RISCV::PseudoVSETVLIX0)
1010 NewInfo.setAVLVLMAX();
1011 else if (
MI.getOperand(1).isUndef())
1013 NewInfo.setAVLImm(1);
1016 VNInfo *VNI = getVNInfoFromReg(AVLReg,
MI, LIS);
1017 NewInfo.setAVLRegDef(VNI, AVLReg);
1020 NewInfo.setVTYPE(
MI.getOperand(2).getImm());
1022 forwardVSETVLIAVL(NewInfo);
1027static unsigned computeVLMAX(
unsigned VLEN,
unsigned SEW,
1038RISCVInsertVSETVLI::computeInfoForInstr(
const MachineInstr &
MI)
const {
1039 VSETVLIInfo InstrInfo;
1040 const uint64_t TSFlags =
MI.getDesc().TSFlags;
1042 bool TailAgnostic =
true;
1043 bool MaskAgnostic =
true;
1044 if (!hasUndefinedPassthru(
MI)) {
1046 TailAgnostic =
false;
1047 MaskAgnostic =
false;
1051 const MachineOperand &
Op =
MI.getOperand(getVecPolicyOpNum(
MI));
1052 uint64_t Policy =
Op.getImm();
1055 "Invalid Policy Value");
1061 MaskAgnostic =
true;
1067 InstrInfo.setAltFmt(AltFmt);
1069 unsigned Log2SEW =
MI.getOperand(getSEWOpNum(
MI)).getImm();
1071 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
1075 const MachineOperand &TWidenOp =
1076 MI.getOperand(
MI.getNumExplicitOperands() - 1);
1077 unsigned TWiden = TWidenOp.
getImm();
1079 InstrInfo.setAVLVLMAX();
1081 const MachineOperand &TNOp =
1085 InstrInfo.setAVLRegDef(getVNInfoFromReg(TNOp.
getReg(),
MI, LIS),
1089 InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt, TWiden);
1095 const MachineOperand &VLOp =
MI.getOperand(getVLOpNum(
MI));
1102 const unsigned VLMAX = computeVLMAX(
ST->getRealMaxVLen(), SEW, VLMul);
1103 if (
ST->getRealMinVLen() ==
ST->getRealMaxVLen() && VLMAX <= 31)
1104 InstrInfo.setAVLImm(VLMAX);
1106 InstrInfo.setAVLVLMAX();
1109 InstrInfo.setAVLImm(Imm);
1112 InstrInfo.setAVLImm(1);
1114 VNInfo *VNI = getVNInfoFromReg(VLOp.
getReg(),
MI, LIS);
1115 InstrInfo.setAVLRegDef(VNI, VLOp.
getReg());
1118 assert(RISCVInstrInfo::isScalarExtractInstr(
MI) ||
1119 RISCVInstrInfo::isVExtractInstr(
MI));
1122 InstrInfo.setAVLImm(1);
1125 if (std::optional<unsigned> EEW = getEEWForLoadStore(
MI)) {
1126 assert(SEW == EEW &&
"Initial SEW doesn't match expected EEW");
1130 InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt,
1133 forwardVSETVLIAVL(InstrInfo);
1138void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &
MBB,
1141 const VSETVLIInfo &PrevInfo) {
1142 ++NumInsertedVSETVL;
1144 if (
Info.getTWiden()) {
1145 if (
Info.hasAVLVLMAX()) {
1146 Register DestReg =
MRI->createVirtualRegister(&RISCV::GPRNoX0RegClass);
1166 if (PrevInfo.isValid() && !PrevInfo.isUnknown()) {
1169 if (
Info.hasSameAVL(PrevInfo) &&
Info.hasSameVLMAX(PrevInfo)) {
1183 if (
Info.hasSameVLMAX(PrevInfo) &&
Info.hasAVLReg()) {
1184 if (
const MachineInstr *
DefMI =
Info.getAVLDefMI(LIS);
1185 DefMI && RISCVInstrInfo::isVectorConfigInstr(*
DefMI)) {
1186 VSETVLIInfo DefInfo = getInfoForVSETVLI(*
DefMI);
1187 if (DefInfo.hasSameAVL(PrevInfo) && DefInfo.hasSameVLMAX(PrevInfo)) {
1202 if (
Info.hasAVLImm()) {
1212 if (
Info.hasAVLVLMAX()) {
1213 Register DestReg =
MRI->createVirtualRegister(&RISCV::GPRNoX0RegClass);
1226 MRI->constrainRegClass(AVLReg, &RISCV::GPRNoX0RegClass);
1235 const VNInfo *CurVNI =
Info.getAVLVNInfo();
1243 MRI->createVirtualRegister(&RISCV::GPRNoX0RegClass);
1256 MI->getOperand(1).setReg(AVLCopyReg);
1265bool RISCVInsertVSETVLI::needVSETVLI(
const DemandedFields &Used,
1266 const VSETVLIInfo &Require,
1267 const VSETVLIInfo &CurInfo)
const {
1268 if (!CurInfo.isValid() || CurInfo.isUnknown() || CurInfo.hasSEWLMULRatioOnly())
1271 if (CurInfo.isCompatible(Used, Require, LIS))
1281 const VSETVLIInfo &NewInfo,
1282 DemandedFields &Demanded) {
1283 VSETVLIInfo
Info = NewInfo;
1285 if (!Demanded.LMUL && !Demanded.SEWLMULRatio && PrevInfo.isValid() &&
1286 !PrevInfo.isUnknown()) {
1288 PrevInfo.getSEW(), PrevInfo.getVLMUL(),
Info.getSEW()))
1289 Info.setVLMul(*NewVLMul);
1290 Demanded.LMUL = DemandedFields::LMULEqual;
1299void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &
Info,
1300 const MachineInstr &
MI)
const {
1301 if (isVectorCopy(
ST->getRegisterInfo(),
MI) &&
1302 (
Info.isUnknown() || !
Info.isValid() ||
Info.hasSEWLMULRatioOnly())) {
1305 VSETVLIInfo NewInfo;
1306 NewInfo.setAVLImm(1);
1316 DemandedFields Demanded = getDemanded(
MI, ST);
1318 const VSETVLIInfo NewInfo = computeInfoForInstr(
MI);
1319 assert(NewInfo.isValid() && !NewInfo.isUnknown());
1320 if (
Info.isValid() && !needVSETVLI(Demanded, NewInfo,
Info))
1323 const VSETVLIInfo PrevInfo =
Info;
1324 if (!
Info.isValid() ||
Info.isUnknown())
1327 const VSETVLIInfo IncomingInfo =
adjustIncoming(PrevInfo, NewInfo, Demanded);
1336 bool EquallyZero = IncomingInfo.hasEquallyZeroAVL(PrevInfo, LIS) &&
1337 IncomingInfo.hasSameVLMAX(PrevInfo);
1338 if (Demanded.VLAny || (Demanded.VLZeroness && !EquallyZero))
1339 Info.setAVL(IncomingInfo);
1342 ((Demanded.LMUL || Demanded.SEWLMULRatio) ? IncomingInfo :
Info)
1344 ((Demanded.SEW || Demanded.SEWLMULRatio) ? IncomingInfo :
Info).getSEW(),
1347 (Demanded.TailPolicy ? IncomingInfo :
Info).getTailAgnostic() ||
1348 IncomingInfo.getTailAgnostic(),
1349 (Demanded.MaskPolicy ? IncomingInfo :
Info).getMaskAgnostic() ||
1350 IncomingInfo.getMaskAgnostic(),
1351 (Demanded.AltFmt ? IncomingInfo :
Info).getAltFmt(),
1352 Demanded.TWiden ? IncomingInfo.getTWiden() : 0);
1356 if (
Info.hasSEWLMULRatioOnly()) {
1357 VSETVLIInfo RatiolessInfo = IncomingInfo;
1358 RatiolessInfo.setAVL(
Info);
1359 Info = RatiolessInfo;
1366void RISCVInsertVSETVLI::transferAfter(VSETVLIInfo &
Info,
1367 const MachineInstr &
MI)
const {
1368 if (RISCVInstrInfo::isVectorConfigInstr(
MI)) {
1369 Info = getInfoForVSETVLI(
MI);
1373 if (RISCVInstrInfo::isFaultOnlyFirstLoad(
MI)) {
1375 assert(
MI.getOperand(1).getReg().isVirtual());
1381 Info.setAVLRegDef(VNI,
MI.getOperand(1).getReg());
1383 Info.setAVLRegDef(
nullptr,
MI.getOperand(1).getReg());
1389 if (
MI.isCall() ||
MI.isInlineAsm() ||
1390 MI.modifiesRegister(RISCV::VL,
nullptr) ||
1391 MI.modifiesRegister(RISCV::VTYPE,
nullptr))
1392 Info = VSETVLIInfo::getUnknown();
1395bool RISCVInsertVSETVLI::computeVLVTYPEChanges(
const MachineBasicBlock &
MBB,
1396 VSETVLIInfo &
Info)
const {
1397 bool HadVectorOp =
false;
1400 for (
const MachineInstr &
MI :
MBB) {
1401 transferBefore(
Info,
MI);
1403 if (RISCVInstrInfo::isVectorConfigInstr(
MI) ||
1405 isVectorCopy(
ST->getRegisterInfo(),
MI) ||
1406 RISCVInstrInfo::isXSfmmVectorConfigInstr(
MI))
1415void RISCVInsertVSETVLI::computeIncomingVLVTYPE(
const MachineBasicBlock &
MBB) {
1419 BBInfo.InQueue =
false;
1423 VSETVLIInfo InInfo = BBInfo.
Pred;
1426 InInfo.setUnknown();
1429 InInfo = InInfo.intersect(BlockInfo[
P->getNumber()].Exit);
1433 if (!InInfo.isValid())
1437 if (InInfo == BBInfo.
Pred)
1440 BBInfo.
Pred = InInfo;
1442 <<
" changed to " << BBInfo.
Pred <<
"\n");
1448 VSETVLIInfo TmpStatus;
1449 computeVLVTYPEChanges(
MBB, TmpStatus);
1453 if (BBInfo.
Exit == TmpStatus)
1456 BBInfo.
Exit = TmpStatus;
1458 <<
" changed to " << BBInfo.
Exit <<
"\n");
1463 if (!BlockInfo[S->getNumber()].InQueue) {
1464 BlockInfo[S->getNumber()].InQueue =
true;
1472bool RISCVInsertVSETVLI::needVSETVLIPHI(
const VSETVLIInfo &Require,
1473 const MachineBasicBlock &
MBB)
const {
1474 if (!Require.hasAVLReg())
1481 const VNInfo *Valno = Require.getAVLVNInfo();
1488 const VSETVLIInfo &PBBExit = BlockInfo[PBB->getNumber()].Exit;
1495 if (!
DefMI || !RISCVInstrInfo::isVectorConfigInstr(*
DefMI))
1500 VSETVLIInfo DefInfo = getInfoForVSETVLI(*
DefMI);
1501 if (DefInfo != PBBExit)
1507 if (PBBExit.isUnknown() || !PBBExit.hasSameVTYPE(Require))
1516void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &
MBB) {
1520 bool PrefixTransparent =
true;
1521 for (MachineInstr &
MI :
MBB) {
1522 const VSETVLIInfo PrevInfo = CurInfo;
1523 transferBefore(CurInfo,
MI);
1526 if (RISCVInstrInfo::isVectorConfigInstr(
MI)) {
1528 assert(
MI.getOperand(3).getReg() == RISCV::VL &&
1529 MI.getOperand(4).getReg() == RISCV::VTYPE &&
1530 "Unexpected operands where VL and VTYPE should be");
1531 MI.getOperand(3).setIsDead(
false);
1532 MI.getOperand(4).setIsDead(
false);
1533 PrefixTransparent =
false;
1537 isVectorCopy(
ST->getRegisterInfo(),
MI)) {
1538 if (!PrevInfo.isCompatible(DemandedFields::all(), CurInfo, LIS)) {
1539 insertVSETVLI(
MBB,
MI,
MI.getDebugLoc(), CurInfo, PrevInfo);
1540 PrefixTransparent =
false;
1546 uint64_t TSFlags =
MI.getDesc().TSFlags;
1548 if (!PrevInfo.isCompatible(DemandedFields::all(), CurInfo, LIS)) {
1556 if (!PrefixTransparent || needVSETVLIPHI(CurInfo,
MBB))
1557 insertVSETVLI(
MBB,
MI,
MI.getDebugLoc(), CurInfo, PrevInfo);
1558 PrefixTransparent =
false;
1562 MachineOperand &VLOp =
MI.getOperand(getVLOpNum(
MI));
1582 for (MachineInstr *DeadMI : DeadMIs) {
1583 if (!
TII->isAddImmediate(*DeadMI,
Reg))
1586 DeadMI->eraseFromParent();
1597 if (
MI.isInlineAsm()) {
1604 if (
MI.isCall() ||
MI.isInlineAsm() ||
1605 MI.modifiesRegister(RISCV::VL,
nullptr) ||
1606 MI.modifiesRegister(RISCV::VTYPE,
nullptr))
1607 PrefixTransparent =
false;
1609 transferAfter(CurInfo,
MI);
1613 if (CurInfo !=
Info.Exit) {
1619 assert(CurInfo ==
Info.Exit &&
"InsertVSETVLI dataflow invariant violated");
1627void RISCVInsertVSETVLI::doPRE(MachineBasicBlock &
MBB) {
1631 MachineBasicBlock *UnavailablePred =
nullptr;
1632 VSETVLIInfo AvailableInfo;
1634 const VSETVLIInfo &PredInfo = BlockInfo[
P->getNumber()].Exit;
1635 if (PredInfo.isUnknown()) {
1636 if (UnavailablePred)
1638 UnavailablePred =
P;
1639 }
else if (!AvailableInfo.isValid()) {
1640 AvailableInfo = PredInfo;
1641 }
else if (AvailableInfo != PredInfo) {
1648 if (!UnavailablePred || !AvailableInfo.isValid())
1656 if (AvailableInfo.hasSEWLMULRatioOnly())
1666 if (AvailableInfo.hasAVLReg()) {
1667 SlotIndex
SI = AvailableInfo.getAVLVNInfo()->def;
1686 VSETVLIInfo CurInfo = AvailableInfo;
1687 int TransitionsRemoved = 0;
1688 for (
const MachineInstr &
MI :
MBB) {
1689 const VSETVLIInfo LastInfo = CurInfo;
1690 const VSETVLIInfo LastOldInfo = OldInfo;
1691 transferBefore(CurInfo,
MI);
1692 transferBefore(OldInfo,
MI);
1693 if (CurInfo == LastInfo)
1694 TransitionsRemoved++;
1695 if (LastOldInfo == OldInfo)
1696 TransitionsRemoved--;
1697 transferAfter(CurInfo,
MI);
1698 transferAfter(OldInfo,
MI);
1699 if (CurInfo == OldInfo)
1703 if (CurInfo != OldInfo || TransitionsRemoved <= 0)
1710 auto OldExit = BlockInfo[UnavailablePred->
getNumber()].Exit;
1712 << UnavailablePred->
getName() <<
" with state "
1713 << AvailableInfo <<
"\n");
1714 BlockInfo[UnavailablePred->
getNumber()].Exit = AvailableInfo;
1720 insertVSETVLI(*UnavailablePred, InsertPt,
1722 AvailableInfo, OldExit);
1727bool RISCVInsertVSETVLI::canMutatePriorConfig(
1728 const MachineInstr &PrevMI,
const MachineInstr &
MI,
1729 const DemandedFields &Used)
const {
1733 if (!RISCVInstrInfo::isVLPreservingConfig(
MI)) {
1737 if (
Used.VLZeroness) {
1738 if (RISCVInstrInfo::isVLPreservingConfig(PrevMI))
1740 if (!getInfoForVSETVLI(PrevMI).hasEquallyZeroAVL(getInfoForVSETVLI(
MI),
1745 auto &AVL =
MI.getOperand(1);
1749 if (AVL.isReg() && AVL.getReg() != RISCV::X0) {
1750 VNInfo *VNI = getVNInfoFromReg(AVL.getReg(),
MI, LIS);
1751 VNInfo *PrevVNI = getVNInfoFromReg(AVL.getReg(), PrevMI, LIS);
1752 if (!VNI || !PrevVNI || VNI != PrevVNI)
1759 auto VType =
MI.getOperand(2).getImm();
1760 return areCompatibleVTYPEs(PriorVType, VType, Used);
1763void RISCVInsertVSETVLI::coalesceVSETVLIs(MachineBasicBlock &
MBB)
const {
1764 MachineInstr *NextMI =
nullptr;
1767 DemandedFields
Used;
1772 auto dropAVLUse = [&](MachineOperand &MO) {
1773 if (!MO.isReg() || !MO.getReg().isVirtual())
1781 MachineInstr *VLOpDef =
MRI->getUniqueVRegDef(OldVLReg);
1782 if (VLOpDef &&
TII->isAddImmediate(*VLOpDef, OldVLReg) &&
1783 MRI->use_nodbg_empty(OldVLReg))
1790 RISCVInstrInfo::isXSfmmVectorConfigInstr(
MI)) {
1795 if (!RISCVInstrInfo::isVectorConfigInstr(
MI)) {
1796 Used.doUnion(getDemanded(
MI, ST));
1797 if (
MI.isCall() ||
MI.isInlineAsm() ||
1798 MI.modifiesRegister(RISCV::VL,
nullptr) ||
1799 MI.modifiesRegister(RISCV::VTYPE,
nullptr))
1804 if (!
MI.getOperand(0).isDead())
1808 if (!
Used.usedVL() && !
Used.usedVTYPE()) {
1809 dropAVLUse(
MI.getOperand(1));
1812 MI.eraseFromParent();
1813 NumCoalescedVSETVL++;
1818 if (canMutatePriorConfig(
MI, *NextMI, Used)) {
1819 if (!RISCVInstrInfo::isVLPreservingConfig(*NextMI)) {
1822 MI.getOperand(0).setReg(DefReg);
1823 MI.getOperand(0).setIsDead(
false);
1826 dropAVLUse(
MI.getOperand(1));
1839 SlotIndex NextMISlot =
1842 LiveInterval::Segment S(MISlot, NextMISlot, DefVNI);
1844 DefVNI->
def = MISlot;
1861 NumCoalescedVSETVL++;
1866 Used = getDemanded(
MI, ST);
1871 for (
auto *
MI : ToDelete) {
1876 MI->eraseFromParent();
1880void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &
MBB) {
1882 MachineInstr &
MI = *
I++;
1883 if (RISCVInstrInfo::isFaultOnlyFirstLoad(
MI)) {
1884 Register VLOutput =
MI.getOperand(1).getReg();
1886 if (!
MI.getOperand(1).isDead()) {
1888 TII->get(RISCV::PseudoReadVL), VLOutput);
1891 SlotIndex NewDefSI =
1897 DefVNI->
def = NewDefSI;
1901 MI.getOperand(1).setReg(RISCV::X0);
1902 MI.addRegisterDefined(RISCV::VL,
MRI->getTargetRegisterInfo());
1907bool RISCVInsertVSETVLI::insertVSETMTK(MachineBasicBlock &
MBB,
1908 TKTMMode
Mode)
const {
1911 for (
auto &
MI :
MBB) {
1912 uint64_t TSFlags =
MI.getDesc().TSFlags;
1913 if (RISCVInstrInfo::isXSfmmVectorConfigTMTKInstr(
MI) ||
1917 VSETVLIInfo CurrInfo = computeInfoForInstr(
MI);
1926 unsigned Opcode = 0;
1930 Opcode = RISCV::PseudoSF_VSETTK;
1934 Opcode = RISCV::PseudoSF_VSETTM;
1938 assert(OpNum && Opcode &&
"Invalid OpNum or Opcode");
1940 MachineOperand &
Op =
MI.getOperand(OpNum);
1951 Op.setIsKill(
false);
1966bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
1969 if (!
ST->hasVInstructions())
1974 TII =
ST->getInstrInfo();
1976 auto *LISWrapper = getAnalysisIfAvailable<LiveIntervalsWrapperPass>();
1977 LIS = LISWrapper ? &LISWrapper->getLIS() :
nullptr;
1979 assert(BlockInfo.empty() &&
"Expect empty block infos");
1982 bool HaveVectorOp =
false;
1985 for (
const MachineBasicBlock &
MBB : MF) {
1986 VSETVLIInfo TmpStatus;
1987 HaveVectorOp |= computeVLVTYPEChanges(
MBB, TmpStatus);
1990 BBInfo.
Exit = TmpStatus;
1992 <<
" is " << BBInfo.
Exit <<
"\n");
1997 if (!HaveVectorOp) {
2005 for (
const MachineBasicBlock &
MBB : MF) {
2006 WorkList.push(&
MBB);
2009 while (!WorkList.empty()) {
2010 const MachineBasicBlock &
MBB = *WorkList.front();
2012 computeIncomingVLVTYPE(
MBB);
2016 for (MachineBasicBlock &
MBB : MF)
2023 for (MachineBasicBlock &
MBB : MF)
2036 coalesceVSETVLIs(*
MBB);
2040 for (MachineBasicBlock &
MBB : MF)
2043 for (MachineBasicBlock &
MBB : MF) {
2044 insertVSETMTK(
MBB, VSETTM);
2045 insertVSETMTK(
MBB, VSETTK);
2049 return HaveVectorOp;
2054 return new RISCVInsertVSETVLI();
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
#define LLVM_ATTRIBUTE_USED
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
static RegisterPass< DebugifyFunctionPass > DF("debugify-function", "Attach debug info to a function")
const HexagonInstrInfo * TII
Register const TargetRegisterInfo * TRI
static Interval intersect(const Interval &I1, const Interval &I2)
Promote Memory to Register
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
static cl::opt< bool > EnsureWholeVectorRegisterMoveValidVTYPE(DEBUG_TYPE "-whole-vector-register-move-valid-vtype", cl::Hidden, cl::desc("Insert vsetvlis before vmvNr.vs to ensure vtype is valid and " "vill is cleared"), cl::init(true))
static VSETVLIInfo adjustIncoming(const VSETVLIInfo &PrevInfo, const VSETVLIInfo &NewInfo, DemandedFields &Demanded)
#define RISCV_INSERT_VSETVLI_NAME
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
SI Optimize VGPR LiveRange
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
AnalysisUsage & addUsedIfAvailable()
Add the specified Pass class to the set of analyses used by this pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
LLVM_ABI void setPreservesCFG()
This function should be called by the pass, iff they do not:
FunctionPass class - This class is used to implement most global optimizations.
void setWeight(float Value)
MachineInstr * getInstructionFromIndex(SlotIndex index) const
Returns the instruction associated with the given index.
SlotIndex InsertMachineInstrInMaps(MachineInstr &MI)
SlotIndexes * getSlotIndexes() const
SlotIndex getInstructionIndex(const MachineInstr &Instr) const
Returns the base index of the given instruction.
void RemoveMachineInstrFromMaps(MachineInstr &MI)
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const
Return the last index in the given basic block.
LiveInterval & getInterval(Register Reg)
void removeInterval(Register Reg)
Interval removal.
LLVM_ABI bool shrinkToUses(LiveInterval *li, SmallVectorImpl< MachineInstr * > *dead=nullptr)
After removing some uses of a register, shrink its live range to just the remaining uses.
LLVM_ABI void extendToIndices(LiveRange &LR, ArrayRef< SlotIndex > Indices, ArrayRef< SlotIndex > Undefs)
Extend the live range LR to reach all points in Indices.
LLVM_ABI void splitSeparateComponents(LiveInterval &LI, SmallVectorImpl< LiveInterval * > &SplitLIs)
Split separate components in LiveInterval LI into separate intervals.
MachineBasicBlock * getMBBFromIndex(SlotIndex index) const
LiveInterval & createAndComputeVirtRegInterval(Register Reg)
LLVM_ABI iterator addSegment(Segment S)
Add the specified Segment to this range, merging segments as appropriate.
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
bool liveAt(SlotIndex index) const
VNInfo * getVNInfoBefore(SlotIndex Idx) const
getVNInfoBefore - Return the VNInfo that is live up to but not necessarily including Idx,...
bool containsOneValue() const
LLVM_ABI void removeSegment(SlotIndex Start, SlotIndex End, bool RemoveDeadValNo=false)
Remove the specified interval from this live range.
VNInfo * getVNInfoAt(SlotIndex Idx) const
getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
LLVM_ABI DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator_range< iterator > terminators()
iterator_range< succ_iterator > successors()
LLVM_ABI instr_iterator getFirstInstrTerminator()
Same getFirstTerminator but it ignores bundles and return an instr_iterator instead.
iterator_range< pred_iterator > predecessors()
MachineInstrBundleIterator< MachineInstr > iterator
LLVM_ABI StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
LLVM_ABI void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
void push_back(const T &Elt)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
VNInfo - Value Number Information.
SlotIndex def
The index of the defining instruction.
bool isPHIDef() const
Returns true if this value is defined by a PHI instruction (or was, PHI instructions may have been el...
This class implements an extremely fast bulk output stream that can only output to a stream.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static unsigned getTMOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasTWidenOp(uint64_t TSFlags)
static RISCVVType::VLMUL getLMul(uint64_t TSFlags)
static unsigned getTKOpNum(const MCInstrDesc &Desc)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static AltFmtType getAltFmtType(uint64_t TSFlags)
static bool hasTKOp(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static bool hasTMOp(uint64_t TSFlags)
static unsigned getTNOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI std::optional< VLMUL > getSameRatioLMUL(unsigned SEW, VLMUL VLMUL, unsigned EEW)
LLVM_ABI unsigned encodeXSfmmVType(unsigned SEW, unsigned Widen, bool AltFmt)
static unsigned getXSfmmWiden(unsigned VType)
static bool isMaskAgnostic(unsigned VType)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool hasXSfmmWiden(unsigned VType)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
static bool isAltFmt(unsigned VType)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
FunctionAddr VTableAddr Value
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool operator!=(uint64_t V1, const APInt &V2)
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
iterator_range< po_iterator< T > > post_order(const T &G)
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto reverse(ContainerTy &&C)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
FunctionPass * createRISCVInsertVSETVLIPass()
Returns an instance of the Insert VSETVLI pass.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
DWARFExpression::Operation Op
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
char & RISCVInsertVSETVLIID
LLVM_ABI Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
LLVM_ABI Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
static bool isRVVRegClass(const TargetRegisterClass *RC)