31 return LI.getVNInfoBefore(
SI);
91 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx))
104 return Fractional || LMul == 1;
143 if (Used.SEWLMULRatio) {
148 if (Ratio1 != Ratio2)
180 if (
MI.isCall() ||
MI.isInlineAsm() ||
181 MI.readsRegister(RISCV::VL,
nullptr))
183 if (
MI.isCall() ||
MI.isInlineAsm() ||
184 MI.readsRegister(RISCV::VTYPE,
nullptr))
192 !VLOp.isReg() || !VLOp.isUndef())
227 if (RISCVInstrInfo::isScalarInsertInstr(
MI)) {
238 if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(
MI) &&
239 !ST->hasVInstructionsF64())
248 if (RISCVInstrInfo::isScalarExtractInstr(
MI)) {
267 if (RISCVInstrInfo::isVSlideInstr(
MI) && VLOp.
isImm() &&
269 !ST->hasVLDependentLatency()) {
282 if (RISCVInstrInfo::isScalarSplatInstr(
MI) && VLOp.
isImm() &&
284 !ST->hasVLDependentLatency()) {
288 if (RISCVInstrInfo::isFloatScalarMoveOrScalarSplatInstr(
MI) &&
289 !ST->hasVInstructionsF64())
312 if (RISCVInstrInfo::isVExtractInstr(
MI)) {
321 RISCVInstrInfo::isXSfmmVectorConfigInstr(
MI);
339void RISCVVSETVLIInfoAnalysis::forwardVSETVLIAVL(
VSETVLIInfo &
Info)
const {
340 if (!
Info.hasAVLReg())
343 if (!
DefMI || !RISCVInstrInfo::isVectorConfigInstr(*
DefMI))
348 Info.setAVL(DefInstrInfo);
356 if (
MI.getOpcode() == RISCV::PseudoVSETIVLI) {
358 }
else if (RISCVInstrInfo::isXSfmmVectorConfigTNInstr(
MI)) {
359 assert(
MI.getOpcode() == RISCV::PseudoSF_VSETTNT ||
360 MI.getOpcode() == RISCV::PseudoSF_VSETTNTX0);
361 switch (
MI.getOpcode()) {
362 case RISCV::PseudoSF_VSETTNTX0:
365 case RISCV::PseudoSF_VSETTNT:
371 assert(
MI.getOpcode() == RISCV::PseudoVSETVLI ||
372 MI.getOpcode() == RISCV::PseudoVSETVLIX0);
373 if (
MI.getOpcode() == RISCV::PseudoVSETVLIX0)
375 else if (
MI.getOperand(1).isUndef())
386 forwardVSETVLIAVL(NewInfo);
404 const uint64_t TSFlags =
MI.getDesc().TSFlags;
406 bool TailAgnostic =
true;
407 bool MaskAgnostic =
true;
410 TailAgnostic =
false;
411 MaskAgnostic =
false;
419 "Invalid Policy Value");
431 InstrInfo.setAltFmt(AltFmt);
435 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
440 MI.getOperand(
MI.getNumExplicitOperands() - 1);
441 unsigned TWiden = TWidenOp.
getImm();
443 InstrInfo.setAVLVLMAX();
453 InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt, TWiden);
461 int64_t Imm = VLOp.
getImm();
466 const unsigned VLMAX =
computeVLMAX(ST->getRealMaxVLen(), SEW, VLMul);
467 if (ST->getRealMinVLen() == ST->getRealMaxVLen() && VLMAX <= 31)
468 InstrInfo.setAVLImm(VLMAX);
470 InstrInfo.setAVLVLMAX();
472 InstrInfo.setAVLImm(Imm);
475 InstrInfo.setAVLImm(1);
478 InstrInfo.setAVLRegDef(VNI, VLOp.
getReg());
481 assert(RISCVInstrInfo::isScalarExtractInstr(
MI) ||
482 RISCVInstrInfo::isVExtractInstr(
MI));
485 InstrInfo.setAVLImm(1);
489 assert(SEW == EEW &&
"Initial SEW doesn't match expected EEW");
493 InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt,
496 forwardVSETVLIAVL(InstrInfo);
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Analysis containing CSE Info
SlotIndexes * getSlotIndexes() const
LiveInterval & getInterval(Register Reg)
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
VSETVLIInfo getInfoForVSETVLI(const MachineInstr &MI) const
VSETVLIInfo computeInfoForInstr(const MachineInstr &MI) const
Defines the abstract state with which the forward dataflow models the values of the VL and VTYPE regi...
void setAVLImm(unsigned Imm)
void setVTYPE(unsigned VType)
bool hasSameVLMAX(const VSETVLIInfo &Other) const
bool hasCompatibleVTYPE(const DemandedFields &Used, const VSETVLIInfo &Require) const
unsigned encodeVTYPE() const
void setAVLRegDef(const VNInfo *VNInfo, Register AVLReg)
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
VNInfo - Value Number Information.
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasTWidenOp(uint64_t TSFlags)
static RISCVVType::VLMUL getLMul(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static AltFmtType getAltFmtType(uint64_t TSFlags)
static bool hasVLOp(uint64_t TSFlags)
static unsigned getTNOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isTailAgnostic(unsigned VType)
static unsigned getXSfmmWiden(unsigned VType)
static bool isMaskAgnostic(unsigned VType)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool hasXSfmmWiden(unsigned VType)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
static bool isAltFmt(unsigned VType)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
static unsigned getVecPolicyOpNum(const MachineInstr &MI)
DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST)
Return the fields and properties demanded by the provided instruction.
bool areCompatibleVTYPEs(uint64_t CurVType, uint64_t NewVType, const DemandedFields &Used)
Return true if moving from CurVType to NewVType is indistinguishable from the perspective of an instr...
static VNInfo * getVNInfoFromReg(Register Reg, const MachineInstr &MI, const LiveIntervals *LIS)
Given a virtual register Reg, return the corresponding VNInfo for it.
static bool isMaskRegOp(const MachineInstr &MI)
Return true if this is an operation on mask registers.
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
static std::optional< unsigned > getEEWForLoadStore(const MachineInstr &MI)
Get the EEW for a load or store instruction.
static unsigned getVLOpNum(const MachineInstr &MI)
static unsigned getSEWOpNum(const MachineInstr &MI)
static unsigned computeVLMAX(unsigned VLEN, unsigned SEW, RISCVVType::VLMUL VLMul)
static constexpr int64_t VLMaxSentinel
bool isVectorCopy(const TargetRegisterInfo *TRI, const MachineInstr &MI)
Return true if MI is a copy that will be lowered to one or more vmvNr.vs.
static bool hasUndefinedPassthru(const MachineInstr &MI)
Return true if the inactive elements in the result are entirely undefined.
static bool isLMUL1OrSmaller(RISCVVType::VLMUL LMUL)
This is an optimization pass for GlobalISel generic memory operations.
DWARFExpression::Operation Op
Which subfields of VL or VTYPE have values we need to preserve?
@ SEWGreaterThanOrEqualAndLessThan64
@ LMULLessThanOrEqualToM1
enum llvm::RISCV::DemandedFields::@326061152055210015167034143142117063364004052074 SEW
enum llvm::RISCV::DemandedFields::@201276154261047021277240313173154105356124146047 LMUL