78#define DEBUG_TYPE "si-fix-sgpr-copies"
81 "amdgpu-enable-merge-m0",
82 cl::desc(
"Merge and hoist M0 initializations"),
100 unsigned NumReadfirstlanes;
102 bool NeedToBeConvertedToVALU =
false;
108 unsigned SiblingPenalty = 0;
110 V2SCopyInfo() : Copy(nullptr),
ID(0){};
112 :
Copy(
C), NumSVCopies(0), NumReadfirstlanes(Width / 32),
ID(
Id){};
113#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
116 <<
"\n\tSV:" << NumSVCopies <<
"\n\tSP: " << SiblingPenalty
117 <<
"\nScore: " << Score <<
"\n";
122class SIFixSGPRCopies {
128 unsigned NextVGPRToSGPRCopyID = 0;
142 unsigned getNextVGPRToSGPRCopyId() {
return ++NextVGPRToSGPRCopyID; }
143 bool needToBeConvertedToVALU(V2SCopyInfo *
I);
170 &getAnalysis<MachineDominatorTreeWrapperPass>().getDomTree();
171 SIFixSGPRCopies Impl(MDT);
193char SIFixSGPRCopiesLegacy::
ID = 0;
198 return new SIFixSGPRCopiesLegacy();
201static std::pair<const TargetRegisterClass *, const TargetRegisterClass *>
205 Register DstReg = Copy.getOperand(0).getReg();
206 Register SrcReg = Copy.getOperand(1).getReg();
209 ?
MRI.getRegClass(SrcReg)
210 :
TRI.getPhysRegBaseClass(SrcReg);
216 ?
MRI.getRegClass(DstReg)
217 :
TRI.getPhysRegBaseClass(DstReg);
219 return std::pair(SrcRC, DstRC);
225 return SrcRC != &AMDGPU::VReg_1RegClass &&
TRI.isSGPRClass(DstRC) &&
226 TRI.hasVectorRegisters(SrcRC);
232 return DstRC != &AMDGPU::VReg_1RegClass &&
TRI.isSGPRClass(SrcRC) &&
233 TRI.hasVectorRegisters(DstRC);
240 auto &Src =
MI.getOperand(1);
246 for (
const auto &MO :
MRI.reg_nodbg_operands(DstReg)) {
247 const auto *
UseMI = MO.getParent();
254 unsigned OpIdx = MO.getOperandNo();
256 !
TII->isOperandLegal(*
UseMI, OpIdx, &Src))
260 MRI.setRegClass(DstReg,
TRI->getEquivalentSGPRClass(
MRI.getRegClass(DstReg)));
284 if (!
TRI->isSGPRClass(
MRI.getRegClass(DstReg)))
287 if (!
MRI.hasOneUse(DstReg))
309 if (
SubReg != AMDGPU::NoSubRegister)
312 MRI.setRegClass(DstReg, DstRC);
323 bool IsAGPR =
TRI->isAGPRClass(DstRC);
325 for (
unsigned I = 1,
N =
MI.getNumOperands();
I !=
N;
I += 2) {
327 TRI->getRegClassForOperandReg(
MRI,
MI.getOperand(
I));
329 "Expected SGPR REG_SEQUENCE to only have SGPR inputs");
332 Register TmpReg =
MRI.createVirtualRegister(NewSrcRC);
340 Register TmpAReg =
MRI.createVirtualRegister(NewSrcRC);
341 unsigned Opc = NewSrcRC == &AMDGPU::AGPR_32RegClass ?
342 AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::COPY;
349 MI.getOperand(
I).setReg(TmpReg);
361 if (Copy->getOpcode() != AMDGPU::COPY)
364 if (!MoveImm->isMoveImmediate())
368 TII->getNamedOperand(*MoveImm, AMDGPU::OpName::src0);
373 if (Copy->getOperand(1).getSubReg())
376 switch (MoveImm->getOpcode()) {
379 case AMDGPU::V_MOV_B32_e32:
380 SMovOp = AMDGPU::S_MOV_B32;
382 case AMDGPU::V_MOV_B64_PSEUDO:
383 SMovOp = AMDGPU::S_MOV_B64_IMM_PSEUDO;
390template <
class UnaryPredicate>
393 UnaryPredicate Predicate) {
400 while (!Worklist.
empty()) {
440 while (
I !=
MBB->
end() &&
TII->isBasicBlockPrologue(*
I))
456 using InitListMap = std::map<unsigned, std::list<MachineInstr *>>;
463 bool Changed =
false;
465 for (
auto &
MI :
MRI.def_instructions(Reg)) {
467 for (
auto &MO :
MI.operands()) {
468 if ((MO.isReg() && ((MO.isDef() && MO.getReg() != Reg) || !MO.isDef())) ||
469 (!MO.isImm() && !MO.isReg()) || (MO.isImm() && Imm)) {
477 Inits[Imm->getImm()].push_front(&
MI);
482 for (
auto &
Init : Inits) {
483 auto &Defs =
Init.second;
485 for (
auto I1 = Defs.begin(), E = Defs.end(); I1 != E; ) {
488 for (
auto I2 = std::next(I1); I2 != E; ) {
501 bool MayClobberTo =
isReachable(Clobber, &*To, MBBTo, MDT);
502 if (!MayClobberFrom && !MayClobberTo)
504 if ((MayClobberFrom && !MayClobberTo) ||
505 (!MayClobberFrom && MayClobberTo))
511 return !((MBBFrom == MBBTo &&
519 return C.first !=
Init.first &&
525 if (!interferes(MI2, MI1)) {
535 if (!interferes(MI1, MI2)) {
553 if (!interferes(MI1,
I) && !interferes(MI2,
I)) {
557 <<
"and moving from "
574 for (
auto &
Init : Inits) {
575 auto &Defs =
Init.second;
576 auto I = Defs.begin();
577 while (
I != Defs.end()) {
578 if (MergedInstrs.
count(*
I)) {
579 (*I)->eraseFromParent();
587 for (
auto &
Init : Inits) {
588 auto &Defs =
Init.second;
589 for (
auto *
MI : Defs) {
590 auto MBB =
MI->getParent();
595 if (!
TII->isBasicBlockPrologue(*
B))
598 auto R = std::next(
MI->getReverseIterator());
599 const unsigned Threshold = 50;
601 for (
unsigned I = 0; R !=
B &&
I < Threshold; ++R, ++
I)
602 if (R->readsRegister(Reg,
TRI) || R->definesRegister(Reg,
TRI) ||
613 MRI.clearKillFlags(Reg);
621 MachineFunctionProperties::Property::Selected))
626 TRI =
ST.getRegisterInfo();
627 TII =
ST.getInstrInfo();
634 switch (
MI.getOpcode()) {
639 case AMDGPU::STRICT_WQM:
640 case AMDGPU::SOFT_WQM:
641 case AMDGPU::STRICT_WWM: {
654 S2VCopies.push_back(&
MI);
658 if (lowerSpecialCase(
MI,
I))
661 analyzeVGPRToSGPRCopy(&
MI);
665 case AMDGPU::INSERT_SUBREG:
667 case AMDGPU::REG_SEQUENCE: {
668 if (
TRI->isSGPRClass(
TII->getOpRegClass(
MI, 0))) {
670 if (!MO.isReg() || !MO.getReg().isVirtual())
673 if (
TRI->hasVectorRegisters(SrcRC)) {
675 TRI->getEquivalentSGPRClass(SrcRC);
676 Register NewDst =
MRI->createVirtualRegister(DestRC);
678 MI.isPHI() ?
MI.getOperand(MO.getOperandNo() + 1).getMBB()
683 if (!tryMoveVGPRConstToSGPR(MO, NewDst, BlockToInsertCopy,
684 PointToInsertCopy)) {
686 BuildMI(*BlockToInsertCopy, PointToInsertCopy,
687 PointToInsertCopy->getDebugLoc(),
688 TII->get(AMDGPU::COPY), NewDst)
691 analyzeVGPRToSGPRCopy(NewCopy);
698 PHINodes.push_back(&
MI);
699 else if (
MI.isRegSequence())
700 RegSequences.push_back(&
MI);
704 case AMDGPU::V_WRITELANE_B32: {
707 if (
ST.getConstantBusLimit(
MI.getOpcode()) != 1)
725 Src0.
getReg() != AMDGPU::M0) &&
727 Src1.
getReg() != AMDGPU::M0)) {
735 if (MO->getReg().isVirtual()) {
740 MO->getReg() ==
Def.getReg() &&
741 MO->getSubReg() ==
Def.getSubReg()) {
743 if (Copied.
isImm() &&
758 TII->get(AMDGPU::COPY), AMDGPU::M0)
769 lowerVGPR2SGPRCopies(MF);
772 for (
auto MI : S2VCopies) {
781 for (
auto MI : RegSequences) {
783 if (
MI->isRegSequence())
786 for (
auto MI : PHINodes) {
789 if (MF.getTarget().getOptLevel() > CodeGenOptLevel::None &&
EnableM0Merge)
792 SiblingPenalty.clear();
795 RegSequences.clear();
803 bool AllAGPRUses =
true;
810 bool HasUses =
false;
811 while (!worklist.
empty()) {
814 for (
const auto &
Use :
MRI->use_operands(Reg)) {
831 if (HasUses && AllAGPRUses && !
TRI->isAGPRClass(RC0)) {
833 MRI->setRegClass(PHIRes,
TRI->getEquivalentAGPRClass(RC0));
834 for (
unsigned I = 1,
N =
MI.getNumOperands();
I !=
N;
I += 2) {
841 if (
TRI->isVectorRegister(*
MRI, PHIRes) ||
842 RC0 == &AMDGPU::VReg_1RegClass) {
844 TII->legalizeOperands(
MI, MDT);
848 while (!PHIOperands.
empty()) {
853bool SIFixSGPRCopies::tryMoveVGPRConstToSGPR(
863 if (SrcConst->
isReg())
867 MRI->getRegClass(MaybeVGPRConstMO.
getReg());
868 unsigned MoveSize =
TRI->getRegSizeInBits(*SrcRC);
869 unsigned MoveOp = MoveSize == 64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
870 BuildMI(*BlockToInsertTo, PointToInsertTo, PointToInsertTo->getDebugLoc(),
871 TII->get(MoveOp), DstReg)
873 if (
MRI->hasOneUse(MaybeVGPRConstMO.
getReg()))
875 MaybeVGPRConstMO.
setReg(DstReg);
888 if (DstReg == AMDGPU::M0 &&
889 TRI->hasVectorRegisters(
MRI->getRegClass(SrcReg))) {
891 MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
893 TII->get(AMDGPU::V_READFIRSTLANE_B32), TmpReg)
894 .
add(
MI.getOperand(1));
895 MI.getOperand(1).setReg(TmpReg);
896 }
else if (tryMoveVGPRConstToSGPR(
MI.getOperand(1), DstReg,
MI.getParent(),
899 MI.eraseFromParent();
906 TII->moveToVALU(worklist, MDT);
915 MI.getOperand(1).ChangeToImmediate(Imm);
916 MI.addImplicitDefUseOperands(*
MI.getParent()->getParent());
917 MI.setDesc(
TII->get(SMovOp));
927 V2SCopyInfo
Info(getNextVGPRToSGPRCopyId(),
MI,
928 TRI->getRegSizeInBits(*DstRC));
934 while (!AnalysisWorklist.
empty()) {
938 if (!Visited.
insert(Inst).second)
953 SiblingPenalty[Inst].insert(
Info.ID);
961 !
I->findRegisterDefOperand(AMDGPU::SCC,
nullptr)) {
962 if (
I->readsRegister(AMDGPU::SCC,
nullptr))
967 if (
TRI->isSGPRReg(*
MRI, Reg) && !
TII->isVALU(*Inst))
968 for (
auto &U :
MRI->use_instructions(Reg))
971 for (
auto U :
Users) {
973 Info.SChain.insert(U);
982bool SIFixSGPRCopies::needToBeConvertedToVALU(V2SCopyInfo *Info) {
983 if (
Info->SChain.empty()) {
989 return SiblingPenalty[A].size() < SiblingPenalty[B].size();
991 Info->Siblings.remove_if([&](
unsigned ID) {
return ID ==
Info->ID; });
998 for (
auto J :
Info->Siblings) {
999 auto InfoIt = V2SCopies.find(J);
1000 if (InfoIt != V2SCopies.end()) {
1010 Info->SiblingPenalty = SrcRegs.
size();
1013 Info->NumSVCopies +
Info->SiblingPenalty +
Info->NumReadfirstlanes;
1014 unsigned Profit =
Info->SChain.size();
1015 Info->Score = Penalty > Profit ? 0 : Profit - Penalty;
1016 Info->NeedToBeConvertedToVALU =
Info->Score < 3;
1017 return Info->NeedToBeConvertedToVALU;
1023 for (
auto &
C : V2SCopies) {
1024 if (needToBeConvertedToVALU(&
C.second))
1032 while (!LoweringWorklist.
empty()) {
1034 auto CurInfoIt = V2SCopies.find(CurID);
1035 if (CurInfoIt != V2SCopies.end()) {
1036 V2SCopyInfo
C = CurInfoIt->second;
1038 for (
auto S :
C.Siblings) {
1039 auto SibInfoIt = V2SCopies.find(S);
1040 if (SibInfoIt != V2SCopies.end()) {
1041 V2SCopyInfo &
SI = SibInfoIt->second;
1043 if (!
SI.NeedToBeConvertedToVALU) {
1044 SI.SChain.set_subtract(
C.SChain);
1045 if (needToBeConvertedToVALU(&SI))
1048 SI.Siblings.remove_if([&](
unsigned ID) {
return ID ==
C.ID; });
1052 <<
" is being turned to VALU\n");
1055 V2SCopies.erase(
C.ID);
1064 for (
auto C : V2SCopies) {
1070 <<
" is being turned to v_readfirstlane_b32"
1071 <<
" Score: " <<
C.second.Score <<
"\n");
1072 Register DstReg =
MI->getOperand(0).getReg();
1073 Register SrcReg =
MI->getOperand(1).getReg();
1074 unsigned SubReg =
MI->getOperand(1).getSubReg();
1076 TRI->getRegClassForOperandReg(*
MRI,
MI->getOperand(1));
1077 size_t SrcSize =
TRI->getRegSizeInBits(*SrcRC);
1078 if (SrcSize == 16) {
1081 TII->get(AMDGPU::V_READFIRSTLANE_B32), DstReg);
1082 MIB.addReg(SrcReg, 0, AMDGPU::NoSubRegister);
1083 }
else if (SrcSize == 32) {
1085 TII->get(AMDGPU::V_READFIRSTLANE_B32), DstReg);
1086 MIB.addReg(SrcReg, 0,
SubReg);
1089 TII->get(AMDGPU::REG_SEQUENCE), DstReg);
1090 int N =
TRI->getRegSizeInBits(*SrcRC) / 32;
1091 for (
int i = 0; i <
N; i++) {
1093 Result, *
MRI,
MI->getOperand(1), SrcRC,
1094 TRI->getSubRegFromChannel(i), &AMDGPU::VGPR_32RegClass);
1096 MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1098 TII->get(AMDGPU::V_READFIRSTLANE_B32), PartialDst)
1100 Result.addReg(PartialDst).addImm(
TRI->getSubRegFromChannel(i));
1103 MI->eraseFromParent();
1118 if (SrcReg == AMDGPU::SCC) {
1120 TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID));
1123 TII->get(IsWave32 ? AMDGPU::S_CSELECT_B32
1124 : AMDGPU::S_CSELECT_B64),
1128 I =
BuildMI(*
MI.getParent(), std::next(
I),
I->getDebugLoc(),
1129 TII->get(AMDGPU::COPY), DstReg)
1131 MI.eraseFromParent();
1134 if (DstReg == AMDGPU::SCC) {
1135 unsigned Opcode = IsWave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
1136 Register Exec = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
1139 MI.getDebugLoc(),
TII->get(Opcode))
1143 MI.eraseFromParent();
1153 SIFixSGPRCopies Impl(&MDT);
1154 bool Changed = Impl.run(MF);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
Provides AMDGPU specific target descriptions.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
iv Induction Variable Users
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static std::pair< const TargetRegisterClass *, const TargetRegisterClass * > getCopyRegClasses(const MachineInstr &Copy, const SIRegisterInfo &TRI, const MachineRegisterInfo &MRI)
static cl::opt< bool > EnableM0Merge("amdgpu-enable-merge-m0", cl::desc("Merge and hoist M0 initializations"), cl::init(true))
static bool hoistAndMergeSGPRInits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo *TRI, MachineDominatorTree &MDT, const TargetInstrInfo *TII)
static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI, const SIRegisterInfo *TRI, const SIInstrInfo *TII, MachineRegisterInfo &MRI)
bool searchPredecessors(const MachineBasicBlock *MBB, const MachineBasicBlock *CutOff, UnaryPredicate Predicate)
static bool isReachable(const MachineInstr *From, const MachineInstr *To, const MachineBasicBlock *CutOff, MachineDominatorTree &MDT)
static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC, const TargetRegisterClass *DstRC, const SIRegisterInfo &TRI)
static bool tryChangeVGPRtoSGPRinCopy(MachineInstr &MI, const SIRegisterInfo *TRI, const SIInstrInfo *TII)
static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC, const TargetRegisterClass *DstRC, const SIRegisterInfo &TRI)
static bool isSafeToFoldImmIntoCopy(const MachineInstr *Copy, const MachineInstr *MoveImm, const SIInstrInfo *TII, unsigned &SMovOp, int64_t &Imm)
static MachineBasicBlock::iterator getFirstNonPrologue(MachineBasicBlock *MBB, const TargetInstrInfo *TII)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines generic set operations that may be used on set's of different types,...
Class for arbitrary precision integers.
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Implements a dense probed hash-table based set.
FunctionPass class - This class is used to implement most global optimizations.
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
Test if the given instruction should be considered a scheduling boundary.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
pred_iterator pred_begin()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator getFirstInstrTerminator()
Same getFirstTerminator but it ignores bundles and return an instr_iterator instead.
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
Analysis pass which computes a MachineDominatorTree.
Analysis pass which computes a MachineDominatorTree.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineBasicBlock * findNearestCommonDominator(MachineBasicBlock *A, MachineBasicBlock *B)
findNearestCommonDominator - Find nearest common dominator basic block for basic block A and B.
bool dominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
bool properlyDominates(const MachineDomTreeNode *A, const MachineDomTreeNode *B) const
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCompare(QueryType Type=IgnoreBundle) const
Return true if this instruction is a comparison.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isRegSequence() const
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
bool isMoveImmediate(QueryType Type=IgnoreBundle) const
Return true if this instruction is a move immediate (including conditional moves) instruction.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
void ChangeToRegister(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class implements a map that also provides access to all stored values in a deterministic order.
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &MFAM)
A vector that has set insertion semantics.
size_type size() const
Determine the number of elements in the SetVector.
bool empty() const
Determine if the SetVector is empty or not.
bool insert(const value_type &X)
Insert a new element into the SetVector.
value_type pop_back_val()
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
A Use represents the edge between a Value definition and its users.
std::pair< iterator, bool > insert(const ValueT &V)
self_iterator getIterator()
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
PointerTypeMap run(const Module &M)
Compute the PointerTypeMap for the module M.
@ Resolved
Queried, materialization begun.
NodeAddr< InstrNode * > Instr
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
unsigned getDefRegState(bool B)
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
char & SIFixSGPRCopiesLegacyID
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
FunctionPass * createSIFixSGPRCopiesLegacyPass()
Utility to store machine instructions worklist.
void insert(MachineInstr *MI)