19#define DEBUG_TYPE "si-shrink-instructions"
22 "Number of 64-bit instruction reduced to 32-bit.");
24 "Number of literal constants folded into 32-bit instructions.");
30class SIShrinkInstructions {
37 bool foldImmediates(
MachineInstr &
MI,
bool TryToCommute =
true)
const;
41 bool isKImmOrKUImmOperand(
const MachineOperand &Src,
bool &IsUnsigned)
const;
60 SIShrinkInstructions() =
default;
84 "SI Shrink Instructions",
false,
false)
86char SIShrinkInstructionsLegacy::
ID = 0;
89 return new SIShrinkInstructionsLegacy();
96 bool TryToCommute)
const {
105 if (
Reg.isVirtual()) {
107 if (Def &&
Def->isMoveImmediate()) {
109 bool ConstantFolded =
false;
111 if (
TII->isOperandLegal(
MI, Src0Idx, &MovSrc)) {
112 if (MovSrc.
isImm()) {
114 ConstantFolded =
true;
115 }
else if (MovSrc.
isFI()) {
117 ConstantFolded =
true;
121 ConstantFolded =
true;
125 if (ConstantFolded) {
126 if (
MRI->use_nodbg_empty(Reg))
127 Def->eraseFromParent();
128 ++NumLiteralConstantsFolded;
136 if (TryToCommute &&
MI.isCommutable()) {
137 if (
TII->commuteInstruction(
MI)) {
138 if (foldImmediates(
MI,
false))
142 TII->commuteInstruction(
MI);
151bool SIShrinkInstructions::shouldShrinkTrue16(
MachineInstr &
MI)
const {
152 for (
unsigned I = 0, E =
MI.getNumExplicitOperands();
I != E; ++
I) {
156 assert(!
Reg.isVirtual() &&
"Prior checks should ensure we only shrink "
157 "True16 Instructions post-RA");
158 if (AMDGPU::VGPR_32RegClass.
contains(Reg) &&
159 !AMDGPU::VGPR_32_Lo128RegClass.
contains(Reg))
162 if (AMDGPU::VGPR_16RegClass.
contains(Reg) &&
163 !AMDGPU::VGPR_16_Lo128RegClass.
contains(Reg))
170bool SIShrinkInstructions::isKImmOperand(
const MachineOperand &Src)
const {
172 !
TII->isInlineConstant(*Src.getParent(), Src.getOperandNo());
175bool SIShrinkInstructions::isKUImmOperand(
const MachineOperand &Src)
const {
176 return isUInt<16>(Src.getImm()) &&
177 !
TII->isInlineConstant(*Src.getParent(), Src.getOperandNo());
180bool SIShrinkInstructions::isKImmOrKUImmOperand(
const MachineOperand &Src,
181 bool &IsUnsigned)
const {
184 return !
TII->isInlineConstant(Src);
187 if (isUInt<16>(Src.getImm())) {
189 return !
TII->isInlineConstant(Src);
206 int32_t &ModifiedImm,
bool Scalar) {
207 if (
TII->isInlineConstant(Src))
209 int32_t SrcImm =
static_cast<int32_t
>(Src.getImm());
215 ModifiedImm = ~SrcImm;
216 if (
TII->isInlineConstant(
APInt(32, ModifiedImm,
true)))
217 return AMDGPU::V_NOT_B32_e32;
220 ModifiedImm = reverseBits<int32_t>(SrcImm);
221 if (
TII->isInlineConstant(
APInt(32, ModifiedImm,
true)))
222 return Scalar ? AMDGPU::S_BREV_B32 : AMDGPU::V_BFREV_B32_e32;
229void SIShrinkInstructions::copyExtraImplicitOps(
MachineInstr &NewMI,
232 for (
unsigned i =
MI.getDesc().getNumOperands() +
233 MI.getDesc().implicit_uses().size() +
234 MI.getDesc().implicit_defs().size(),
235 e =
MI.getNumOperands();
243void SIShrinkInstructions::shrinkScalarCompare(
MachineInstr &
MI)
const {
249 if (!
MI.getOperand(0).isReg())
250 TII->commuteInstruction(
MI,
false, 0, 1);
267 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
269 if (isKImmOrKUImmOperand(Src1, HasUImm)) {
271 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
272 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
276 MI.setDesc(
TII->get(SOPKOpc));
299 switch (
Info->MIMGEncoding) {
300 case AMDGPU::MIMGEncGfx10NSA:
301 NewEncoding = AMDGPU::MIMGEncGfx10Default;
303 case AMDGPU::MIMGEncGfx11NSA:
304 NewEncoding = AMDGPU::MIMGEncGfx11Default;
312 unsigned NewAddrDwords =
Info->VAddrDwords;
315 if (
Info->VAddrDwords == 2) {
316 RC = &AMDGPU::VReg_64RegClass;
317 }
else if (
Info->VAddrDwords == 3) {
318 RC = &AMDGPU::VReg_96RegClass;
319 }
else if (
Info->VAddrDwords == 4) {
320 RC = &AMDGPU::VReg_128RegClass;
321 }
else if (
Info->VAddrDwords == 5) {
322 RC = &AMDGPU::VReg_160RegClass;
323 }
else if (
Info->VAddrDwords == 6) {
324 RC = &AMDGPU::VReg_192RegClass;
325 }
else if (
Info->VAddrDwords == 7) {
326 RC = &AMDGPU::VReg_224RegClass;
327 }
else if (
Info->VAddrDwords == 8) {
328 RC = &AMDGPU::VReg_256RegClass;
329 }
else if (
Info->VAddrDwords == 9) {
330 RC = &AMDGPU::VReg_288RegClass;
331 }
else if (
Info->VAddrDwords == 10) {
332 RC = &AMDGPU::VReg_320RegClass;
333 }
else if (
Info->VAddrDwords == 11) {
334 RC = &AMDGPU::VReg_352RegClass;
335 }
else if (
Info->VAddrDwords == 12) {
336 RC = &AMDGPU::VReg_384RegClass;
338 RC = &AMDGPU::VReg_512RegClass;
342 unsigned VgprBase = 0;
343 unsigned NextVgpr = 0;
345 bool IsKill = NewAddrDwords ==
Info->VAddrDwords;
346 const unsigned NSAMaxSize =
ST->getNSAMaxSize();
347 const bool IsPartialNSA = NewAddrDwords > NSAMaxSize;
348 const unsigned EndVAddr = IsPartialNSA ? NSAMaxSize :
Info->VAddrOperands;
349 for (
unsigned Idx = 0;
Idx < EndVAddr; ++
Idx) {
351 unsigned Vgpr =
TRI->getHWRegIndex(
Op.getReg());
352 unsigned Dwords =
TRI->getRegSizeInBits(
Op.getReg(), *
MRI) / 32;
353 assert(Dwords > 0 &&
"Un-implemented for less than 32 bit regs");
357 NextVgpr = Vgpr + Dwords;
358 }
else if (Vgpr == NextVgpr) {
359 NextVgpr = Vgpr + Dwords;
370 if (VgprBase + NewAddrDwords > 256)
377 unsigned TFEVal = (TFEIdx == -1) ? 0 :
MI.getOperand(TFEIdx).getImm();
378 unsigned LWEVal = (LWEIdx == -1) ? 0 :
MI.getOperand(LWEIdx).getImm();
380 if (TFEVal || LWEVal) {
382 for (
unsigned i = LWEIdx + 1, e =
MI.getNumOperands(); i !=
e; ++i) {
383 if (
MI.getOperand(i).isReg() &&
MI.getOperand(i).isTied() &&
384 MI.getOperand(i).isImplicit()) {
388 "found more than one tied implicit operand when expecting only 1");
390 MI.untieRegOperand(ToUntie);
396 Info->VDataDwords, NewAddrDwords);
397 MI.setDesc(
TII->get(NewOpcode));
399 MI.getOperand(VAddr0Idx).setIsUndef(IsUndef);
400 MI.getOperand(VAddr0Idx).setIsKill(IsKill);
402 for (
unsigned i = 1; i < EndVAddr; ++i)
403 MI.removeOperand(VAddr0Idx + 1);
408 ToUntie - (EndVAddr - 1));
416 if (!
ST->hasVOP3Literal())
421 MachineFunctionProperties::Property::NoVRegs))
424 if (
TII->hasAnyModifiersSet(
MI))
427 const unsigned Opcode =
MI.getOpcode();
431 unsigned NewOpcode = AMDGPU::INSTRUCTION_LIST_END;
436 if (Src2.
isImm() && !
TII->isInlineConstant(Src2)) {
447 case AMDGPU::V_MAD_F32_e64:
448 NewOpcode = AMDGPU::V_MADAK_F32;
450 case AMDGPU::V_FMA_F32_e64:
451 NewOpcode = AMDGPU::V_FMAAK_F32;
453 case AMDGPU::V_MAD_F16_e64:
454 NewOpcode = AMDGPU::V_MADAK_F16;
456 case AMDGPU::V_FMA_F16_e64:
457 case AMDGPU::V_FMA_F16_gfx9_e64:
458 NewOpcode =
ST->hasTrue16BitInsts() ? AMDGPU::V_FMAAK_F16_fake16
459 : AMDGPU::V_FMAAK_F16;
466 if (Src1.
isImm() && !
TII->isInlineConstant(Src1))
468 else if (Src0.
isImm() && !
TII->isInlineConstant(Src0))
476 case AMDGPU::V_MAD_F32_e64:
477 NewOpcode = AMDGPU::V_MADMK_F32;
479 case AMDGPU::V_FMA_F32_e64:
480 NewOpcode = AMDGPU::V_FMAMK_F32;
482 case AMDGPU::V_MAD_F16_e64:
483 NewOpcode = AMDGPU::V_MADMK_F16;
485 case AMDGPU::V_FMA_F16_e64:
486 case AMDGPU::V_FMA_F16_gfx9_e64:
487 NewOpcode =
ST->hasTrue16BitInsts() ? AMDGPU::V_FMAMK_F16_fake16
488 : AMDGPU::V_FMAMK_F16;
493 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
502 MI.getOperand(0).getReg())
507 MI.eraseFromParent();
509 TII->removeModOperands(
MI);
510 MI.setDesc(
TII->get(NewOpcode));
519bool SIShrinkInstructions::shrinkScalarLogicOp(
MachineInstr &
MI)
const {
520 unsigned Opc =
MI.getOpcode();
527 if (!SrcImm->
isImm() ||
534 if (Opc == AMDGPU::S_AND_B32) {
537 Opc = AMDGPU::S_BITSET0_B32;
540 Opc = AMDGPU::S_ANDN2_B32;
542 }
else if (Opc == AMDGPU::S_OR_B32) {
545 Opc = AMDGPU::S_BITSET1_B32;
548 Opc = AMDGPU::S_ORN2_B32;
550 }
else if (Opc == AMDGPU::S_XOR_B32) {
553 Opc = AMDGPU::S_XNOR_B32;
567 const bool IsUndef = SrcReg->
isUndef();
568 const bool IsKill = SrcReg->
isKill();
569 MI.setDesc(
TII->get(Opc));
570 if (Opc == AMDGPU::S_BITSET0_B32 ||
571 Opc == AMDGPU::S_BITSET1_B32) {
574 MI.getOperand(2).ChangeToRegister(Dest->
getReg(),
false,
577 MI.tieOperands(0, 2);
589bool SIShrinkInstructions::instAccessReg(
599 }
else if (MO.
getReg() == Reg &&
Reg.isVirtual()) {
609bool SIShrinkInstructions::instReadsReg(
const MachineInstr *
MI,
unsigned Reg,
611 return instAccessReg(
MI->uses(), Reg,
SubReg);
614bool SIShrinkInstructions::instModifiesReg(
const MachineInstr *
MI,
unsigned Reg,
616 return instAccessReg(
MI->defs(), Reg,
SubReg);
620SIShrinkInstructions::getSubRegForIndex(
Register Reg,
unsigned Sub,
622 if (
TRI->getRegSizeInBits(Reg, *
MRI) != 32) {
623 if (
Reg.isPhysical()) {
624 Reg =
TRI->getSubReg(Reg,
TRI->getSubRegFromChannel(
I));
626 Sub =
TRI->getSubRegFromChannel(
I +
TRI->getChannelFromSubReg(Sub));
632void SIShrinkInstructions::dropInstructionKeepingImpDefs(
634 for (
unsigned i =
MI.getDesc().getNumOperands() +
635 MI.getDesc().implicit_uses().size() +
636 MI.getDesc().implicit_defs().size(),
637 e =
MI.getNumOperands();
643 TII->get(AMDGPU::IMPLICIT_DEF),
Op.getReg());
646 MI.eraseFromParent();
670 MovT.
getOpcode() == AMDGPU::V_MOV_B16_t16_e32 ||
682 unsigned Size =
TII->getOpSize(MovT, 0);
686 if (
Size == 2 &&
X.isVirtual())
692 const unsigned SearchLimit = 16;
694 bool KilledT =
false;
697 Iter != E && Count < SearchLimit && !KilledT; ++Iter, ++Count) {
702 if ((MovY->
getOpcode() != AMDGPU::V_MOV_B32_e32 &&
703 MovY->
getOpcode() != AMDGPU::V_MOV_B16_t16_e32 &&
718 if (instReadsReg(&*
I,
X, Xsub) || instModifiesReg(&*
I,
Y, Ysub) ||
719 instModifiesReg(&*
I,
T, Tsub) ||
720 (MovX && instModifiesReg(&*
I,
X, Xsub))) {
724 if (!instReadsReg(&*
I,
Y, Ysub)) {
725 if (!MovX && instModifiesReg(&*
I,
X, Xsub)) {
732 (
I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
733 I->getOpcode() != AMDGPU::V_MOV_B16_t16_e32 &&
734 I->getOpcode() != AMDGPU::COPY) ||
735 I->getOperand(0).getReg() !=
X ||
736 I->getOperand(0).getSubReg() != Xsub) {
741 if (
Size > 4 && (
I->getNumImplicitOperands() > (
I->isCopy() ? 0U : 1U)))
750 LLVM_DEBUG(
dbgs() <<
"Matched v_swap:\n" << MovT << *MovX << *MovY);
756 TII->get(AMDGPU::V_SWAP_B16))
765 for (
unsigned I = 0;
I <
Size / 4; ++
I) {
767 X1 = getSubRegForIndex(
X, Xsub,
I);
768 Y1 = getSubRegForIndex(
Y, Ysub,
I);
770 TII->get(AMDGPU::V_SWAP_B32))
782 Swap->removeOperand(Swap->getNumExplicitOperands());
787 dropInstructionKeepingImpDefs(*MovY);
790 if (
T.isVirtual() &&
MRI->use_nodbg_empty(
T)) {
791 dropInstructionKeepingImpDefs(MovT);
797 if (
Op.isKill() &&
TRI->regsOverlap(
X,
Op.getReg()))
809bool SIShrinkInstructions::tryReplaceDeadSDST(
MachineInstr &
MI)
const {
810 if (!
ST->hasGFX10_3Insts())
820 Op->setReg(
ST->isWave32() ? AMDGPU::SGPR_NULL : AMDGPU::SGPR_NULL64);
829 TII =
ST->getInstrInfo();
830 TRI = &
TII->getRegisterInfo();
832 unsigned VCCReg =
ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
834 std::vector<unsigned> I1Defs;
845 if (
MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
855 if (Src.isImm() &&
MI.getOperand(0).getReg().isPhysical()) {
859 if (ModOpcode != 0) {
860 MI.setDesc(
TII->get(ModOpcode));
861 Src.setImm(
static_cast<int64_t
>(ModImm));
867 if (
ST->hasSwap() && (
MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
868 MI.getOpcode() == AMDGPU::V_MOV_B16_t16_e32 ||
869 MI.getOpcode() == AMDGPU::COPY)) {
870 if (
auto *NextMI = matchSwap(
MI)) {
871 Next = NextMI->getIterator();
877 if (
MI.getOpcode() == AMDGPU::S_ADD_I32 ||
878 MI.getOpcode() == AMDGPU::S_MUL_I32) {
884 if (
TII->commuteInstruction(
MI,
false, 1, 2))
899 unsigned Opc = (
MI.getOpcode() == AMDGPU::S_ADD_I32) ?
900 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
903 MI.setDesc(
TII->get(Opc));
904 MI.tieOperands(0, 1);
910 if (
MI.isCompare() &&
TII->isSOPC(
MI)) {
911 shrinkScalarCompare(
MI);
916 if (
MI.getOpcode() == AMDGPU::S_MOV_B32) {
920 if (Src.isImm() && Dst.getReg().isPhysical()) {
924 MI.setDesc(
TII->get(AMDGPU::S_MOVK_I32));
928 MI.setDesc(
TII->get(ModOpc));
929 Src.setImm(
static_cast<int64_t
>(ModImm));
937 if (
MI.getOpcode() == AMDGPU::S_AND_B32 ||
938 MI.getOpcode() == AMDGPU::S_OR_B32 ||
939 MI.getOpcode() == AMDGPU::S_XOR_B32) {
940 if (shrinkScalarLogicOp(
MI))
944 if (
TII->isMIMG(
MI.getOpcode()) &&
947 MachineFunctionProperties::Property::NoVRegs)) {
952 if (!
TII->isVOP3(
MI))
955 if (
MI.getOpcode() == AMDGPU::V_MAD_F32_e64 ||
956 MI.getOpcode() == AMDGPU::V_FMA_F32_e64 ||
957 MI.getOpcode() == AMDGPU::V_MAD_F16_e64 ||
958 MI.getOpcode() == AMDGPU::V_FMA_F16_e64 ||
959 MI.getOpcode() == AMDGPU::V_FMA_F16_gfx9_e64) {
964 if (!
TII->hasVALU32BitEncoding(
MI.getOpcode())) {
967 tryReplaceDeadSDST(
MI);
974 if (!
MI.isCommutable() || !
TII->commuteInstruction(
MI) ||
976 tryReplaceDeadSDST(
MI);
983 if (
TII->isVOPC(Op32)) {
999 MRI->setRegAllocationHint(DstReg, 0, VCCReg);
1002 if (DstReg != VCCReg)
1007 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
1011 TII->getNamedOperand(
MI, AMDGPU::OpName::src2);
1016 MRI->setRegAllocationHint(SReg, 0, VCCReg);
1025 AMDGPU::OpName::sdst);
1030 if (SDst->
getReg() != VCCReg) {
1032 MRI->setRegAllocationHint(SDst->
getReg(), 0, VCCReg);
1039 AMDGPU::OpName::src2);
1040 if (Src2 && Src2->
getReg() != VCCReg) {
1042 MRI->setRegAllocationHint(Src2->
getReg(), 0, VCCReg);
1054 if (
ST->hasVOP3Literal() &&
1056 MachineFunctionProperties::Property::NoVRegs))
1060 !shouldShrinkTrue16(
MI))
1067 ++NumInstructionsShrunk;
1070 copyExtraImplicitOps(*Inst32,
MI);
1073 if (SDst && SDst->
isDead())
1076 MI.eraseFromParent();
1077 foldImmediates(*Inst32);
1085bool SIShrinkInstructionsLegacy::runOnMachineFunction(
MachineFunction &MF) {
1089 return SIShrinkInstructions().run(MF);
unsigned const MachineRegisterInfo * MRI
Provides AMDGPU specific target descriptions.
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define DEBUG_TYPE
The pass tries to use the 32-bit encoding for instructions when possible.
static unsigned canModifyToInlineImmOp32(const SIInstrInfo *TII, const MachineOperand &Src, int32_t &ModifiedImm, bool Scalar)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Class for arbitrary precision integers.
A container for analyses that lazily runs them and caches their results.
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Represents analyses that only rely on functions' control flow.
This class represents an Operation in the Expression.
FunctionPass class - This class is used to implement most global optimizations.
bool hasOptNone() const
Do not optimize this function (-O0).
Describe properties that are true of each instruction in the target description file.
instr_iterator instr_end()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getNumImplicitOperands() const
Returns the implicit operands number.
const MachineBasicBlock * getParent() const
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void ChangeToFrameIndex(int Idx, unsigned TargetFlags=0)
Replace this operand with a frame index.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
void setIsDead(bool Val=true)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
void setIsKill(bool Val=true)
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
static bool sopkIsZext(unsigned Opcode)
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
self_iterator getIterator()
A range adaptor for a pair of iterators.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_READONLY int getSOPKOp(uint16_t Opcode)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
LLVM_READONLY int getVOPe32(uint16_t Opcode)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool isTrue16Inst(unsigned Opc)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
FunctionPass * createSIShrinkInstructionsLegacyPass()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
constexpr bool any() const
A pair composed of a register and a sub-register index.