19#define DEBUG_TYPE "si-shrink-instructions"
22 "Number of 64-bit instruction reduced to 32-bit.");
24 "Number of literal constants folded into 32-bit instructions.");
30class SIShrinkInstructions {
37 bool foldImmediates(
MachineInstr &
MI,
bool TryToCommute =
true)
const;
41 bool isKImmOrKUImmOperand(
const MachineOperand &Src,
bool &IsUnsigned)
const;
60 SIShrinkInstructions() =
default;
84 "SI Shrink Instructions",
false,
false)
86char SIShrinkInstructionsLegacy::
ID = 0;
89 return new SIShrinkInstructionsLegacy();
96 bool TryToCommute)
const {
105 if (
Reg.isVirtual()) {
107 if (Def &&
Def->isMoveImmediate()) {
109 bool ConstantFolded =
false;
111 if (
TII->isOperandLegal(
MI, Src0Idx, &MovSrc)) {
112 if (MovSrc.
isImm()) {
114 ConstantFolded =
true;
115 }
else if (MovSrc.
isFI()) {
117 ConstantFolded =
true;
121 ConstantFolded =
true;
125 if (ConstantFolded) {
126 if (
MRI->use_nodbg_empty(Reg))
127 Def->eraseFromParent();
128 ++NumLiteralConstantsFolded;
136 if (TryToCommute &&
MI.isCommutable()) {
137 if (
TII->commuteInstruction(
MI)) {
138 if (foldImmediates(
MI,
false))
142 TII->commuteInstruction(
MI);
151bool SIShrinkInstructions::shouldShrinkTrue16(
MachineInstr &
MI)
const {
152 for (
unsigned I = 0, E =
MI.getNumExplicitOperands();
I != E; ++
I) {
156 assert(!
Reg.isVirtual() &&
"Prior checks should ensure we only shrink "
157 "True16 Instructions post-RA");
158 if (AMDGPU::VGPR_32RegClass.
contains(Reg) &&
159 !AMDGPU::VGPR_32_Lo128RegClass.
contains(Reg))
162 if (AMDGPU::VGPR_16RegClass.
contains(Reg) &&
163 !AMDGPU::VGPR_16_Lo128RegClass.
contains(Reg))
170bool SIShrinkInstructions::isKImmOperand(
const MachineOperand &Src)
const {
172 !
TII->isInlineConstant(*Src.getParent(), Src.getOperandNo());
175bool SIShrinkInstructions::isKUImmOperand(
const MachineOperand &Src)
const {
176 return isUInt<16>(Src.getImm()) &&
177 !
TII->isInlineConstant(*Src.getParent(), Src.getOperandNo());
180bool SIShrinkInstructions::isKImmOrKUImmOperand(
const MachineOperand &Src,
181 bool &IsUnsigned)
const {
184 return !
TII->isInlineConstant(Src);
187 if (isUInt<16>(Src.getImm())) {
189 return !
TII->isInlineConstant(Src);
206 int32_t &ModifiedImm,
bool Scalar) {
207 if (
TII->isInlineConstant(Src))
209 int32_t SrcImm =
static_cast<int32_t
>(Src.getImm());
215 ModifiedImm = ~SrcImm;
216 if (
TII->isInlineConstant(
APInt(32, ModifiedImm,
true)))
217 return AMDGPU::V_NOT_B32_e32;
220 ModifiedImm = reverseBits<int32_t>(SrcImm);
221 if (
TII->isInlineConstant(
APInt(32, ModifiedImm,
true)))
222 return Scalar ? AMDGPU::S_BREV_B32 : AMDGPU::V_BFREV_B32_e32;
229void SIShrinkInstructions::copyExtraImplicitOps(
MachineInstr &NewMI,
232 for (
unsigned i =
MI.getDesc().getNumOperands() +
233 MI.getDesc().implicit_uses().size() +
234 MI.getDesc().implicit_defs().size(),
235 e =
MI.getNumOperands();
243void SIShrinkInstructions::shrinkScalarCompare(
MachineInstr &
MI)
const {
249 if (!
MI.getOperand(0).isReg())
250 TII->commuteInstruction(
MI,
false, 0, 1);
267 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
269 if (isKImmOrKUImmOperand(Src1, HasUImm)) {
271 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
272 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
276 MI.setDesc(
TII->get(SOPKOpc));
299 switch (
Info->MIMGEncoding) {
300 case AMDGPU::MIMGEncGfx10NSA:
301 NewEncoding = AMDGPU::MIMGEncGfx10Default;
303 case AMDGPU::MIMGEncGfx11NSA:
304 NewEncoding = AMDGPU::MIMGEncGfx11Default;
312 unsigned NewAddrDwords =
Info->VAddrDwords;
315 if (
Info->VAddrDwords == 2) {
316 RC = &AMDGPU::VReg_64RegClass;
317 }
else if (
Info->VAddrDwords == 3) {
318 RC = &AMDGPU::VReg_96RegClass;
319 }
else if (
Info->VAddrDwords == 4) {
320 RC = &AMDGPU::VReg_128RegClass;
321 }
else if (
Info->VAddrDwords == 5) {
322 RC = &AMDGPU::VReg_160RegClass;
323 }
else if (
Info->VAddrDwords == 6) {
324 RC = &AMDGPU::VReg_192RegClass;
325 }
else if (
Info->VAddrDwords == 7) {
326 RC = &AMDGPU::VReg_224RegClass;
327 }
else if (
Info->VAddrDwords == 8) {
328 RC = &AMDGPU::VReg_256RegClass;
329 }
else if (
Info->VAddrDwords == 9) {
330 RC = &AMDGPU::VReg_288RegClass;
331 }
else if (
Info->VAddrDwords == 10) {
332 RC = &AMDGPU::VReg_320RegClass;
333 }
else if (
Info->VAddrDwords == 11) {
334 RC = &AMDGPU::VReg_352RegClass;
335 }
else if (
Info->VAddrDwords == 12) {
336 RC = &AMDGPU::VReg_384RegClass;
338 RC = &AMDGPU::VReg_512RegClass;
342 unsigned VgprBase = 0;
343 unsigned NextVgpr = 0;
345 bool IsKill = NewAddrDwords ==
Info->VAddrDwords;
346 const unsigned NSAMaxSize =
ST->getNSAMaxSize();
347 const bool IsPartialNSA = NewAddrDwords > NSAMaxSize;
348 const unsigned EndVAddr = IsPartialNSA ? NSAMaxSize :
Info->VAddrOperands;
349 for (
unsigned Idx = 0;
Idx < EndVAddr; ++
Idx) {
351 unsigned Vgpr =
TRI->getHWRegIndex(
Op.getReg());
352 unsigned Dwords =
TRI->getRegSizeInBits(
Op.getReg(), *
MRI) / 32;
353 assert(Dwords > 0 &&
"Un-implemented for less than 32 bit regs");
357 NextVgpr = Vgpr + Dwords;
358 }
else if (Vgpr == NextVgpr) {
359 NextVgpr = Vgpr + Dwords;
370 if (VgprBase + NewAddrDwords > 256)
377 unsigned TFEVal = (TFEIdx == -1) ? 0 :
MI.getOperand(TFEIdx).getImm();
378 unsigned LWEVal = (LWEIdx == -1) ? 0 :
MI.getOperand(LWEIdx).getImm();
380 if (TFEVal || LWEVal) {
382 for (
unsigned i = LWEIdx + 1, e =
MI.getNumOperands(); i !=
e; ++i) {
383 if (
MI.getOperand(i).isReg() &&
MI.getOperand(i).isTied() &&
384 MI.getOperand(i).isImplicit()) {
388 "found more than one tied implicit operand when expecting only 1");
390 MI.untieRegOperand(ToUntie);
396 Info->VDataDwords, NewAddrDwords);
397 MI.setDesc(
TII->get(NewOpcode));
399 MI.getOperand(VAddr0Idx).setIsUndef(IsUndef);
400 MI.getOperand(VAddr0Idx).setIsKill(IsKill);
402 for (
unsigned i = 1; i < EndVAddr; ++i)
403 MI.removeOperand(VAddr0Idx + 1);
408 ToUntie - (EndVAddr - 1));
416 if (!
ST->hasVOP3Literal())
421 MachineFunctionProperties::Property::NoVRegs))
424 if (
TII->hasAnyModifiersSet(
MI))
427 const unsigned Opcode =
MI.getOpcode();
431 unsigned NewOpcode = AMDGPU::INSTRUCTION_LIST_END;
436 if (Src2.
isImm() && !
TII->isInlineConstant(Src2)) {
447 case AMDGPU::V_MAD_F32_e64:
448 NewOpcode = AMDGPU::V_MADAK_F32;
450 case AMDGPU::V_FMA_F32_e64:
451 NewOpcode = AMDGPU::V_FMAAK_F32;
453 case AMDGPU::V_MAD_F16_e64:
454 NewOpcode = AMDGPU::V_MADAK_F16;
456 case AMDGPU::V_FMA_F16_e64:
457 case AMDGPU::V_FMA_F16_gfx9_e64:
458 case AMDGPU::V_FMA_F16_gfx9_fake16_e64:
459 NewOpcode =
ST->hasTrue16BitInsts() ? AMDGPU::V_FMAAK_F16_fake16
460 : AMDGPU::V_FMAAK_F16;
467 if (Src1.
isImm() && !
TII->isInlineConstant(Src1))
469 else if (Src0.
isImm() && !
TII->isInlineConstant(Src0))
477 case AMDGPU::V_MAD_F32_e64:
478 NewOpcode = AMDGPU::V_MADMK_F32;
480 case AMDGPU::V_FMA_F32_e64:
481 NewOpcode = AMDGPU::V_FMAMK_F32;
483 case AMDGPU::V_MAD_F16_e64:
484 NewOpcode = AMDGPU::V_MADMK_F16;
486 case AMDGPU::V_FMA_F16_e64:
487 case AMDGPU::V_FMA_F16_gfx9_e64:
488 case AMDGPU::V_FMA_F16_gfx9_fake16_e64:
489 NewOpcode =
ST->hasTrue16BitInsts() ? AMDGPU::V_FMAMK_F16_fake16
490 : AMDGPU::V_FMAMK_F16;
495 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
504 MI.getOperand(0).getReg())
509 MI.eraseFromParent();
511 TII->removeModOperands(
MI);
512 MI.setDesc(
TII->get(NewOpcode));
521bool SIShrinkInstructions::shrinkScalarLogicOp(
MachineInstr &
MI)
const {
522 unsigned Opc =
MI.getOpcode();
529 if (!SrcImm->
isImm() ||
536 if (Opc == AMDGPU::S_AND_B32) {
539 Opc = AMDGPU::S_BITSET0_B32;
542 Opc = AMDGPU::S_ANDN2_B32;
544 }
else if (Opc == AMDGPU::S_OR_B32) {
547 Opc = AMDGPU::S_BITSET1_B32;
550 Opc = AMDGPU::S_ORN2_B32;
552 }
else if (Opc == AMDGPU::S_XOR_B32) {
555 Opc = AMDGPU::S_XNOR_B32;
569 const bool IsUndef = SrcReg->
isUndef();
570 const bool IsKill = SrcReg->
isKill();
571 MI.setDesc(
TII->get(Opc));
572 if (Opc == AMDGPU::S_BITSET0_B32 ||
573 Opc == AMDGPU::S_BITSET1_B32) {
576 MI.getOperand(2).ChangeToRegister(Dest->
getReg(),
false,
579 MI.tieOperands(0, 2);
591bool SIShrinkInstructions::instAccessReg(
601 }
else if (MO.
getReg() == Reg &&
Reg.isVirtual()) {
611bool SIShrinkInstructions::instReadsReg(
const MachineInstr *
MI,
unsigned Reg,
613 return instAccessReg(
MI->uses(), Reg,
SubReg);
616bool SIShrinkInstructions::instModifiesReg(
const MachineInstr *
MI,
unsigned Reg,
618 return instAccessReg(
MI->defs(), Reg,
SubReg);
622SIShrinkInstructions::getSubRegForIndex(
Register Reg,
unsigned Sub,
624 if (
TRI->getRegSizeInBits(Reg, *
MRI) != 32) {
625 if (
Reg.isPhysical()) {
626 Reg =
TRI->getSubReg(Reg,
TRI->getSubRegFromChannel(
I));
628 Sub =
TRI->getSubRegFromChannel(
I +
TRI->getChannelFromSubReg(Sub));
634void SIShrinkInstructions::dropInstructionKeepingImpDefs(
636 for (
unsigned i =
MI.getDesc().getNumOperands() +
637 MI.getDesc().implicit_uses().size() +
638 MI.getDesc().implicit_defs().size(),
639 e =
MI.getNumOperands();
645 TII->get(AMDGPU::IMPLICIT_DEF),
Op.getReg());
648 MI.eraseFromParent();
672 MovT.
getOpcode() == AMDGPU::V_MOV_B16_t16_e32 ||
684 unsigned Size =
TII->getOpSize(MovT, 0);
688 if (
Size == 2 &&
X.isVirtual())
694 const unsigned SearchLimit = 16;
696 bool KilledT =
false;
699 Iter != E && Count < SearchLimit && !KilledT; ++Iter, ++Count) {
704 if ((MovY->
getOpcode() != AMDGPU::V_MOV_B32_e32 &&
705 MovY->
getOpcode() != AMDGPU::V_MOV_B16_t16_e32 &&
720 if (instReadsReg(&*
I,
X, Xsub) || instModifiesReg(&*
I,
Y, Ysub) ||
721 instModifiesReg(&*
I,
T, Tsub) ||
722 (MovX && instModifiesReg(&*
I,
X, Xsub))) {
726 if (!instReadsReg(&*
I,
Y, Ysub)) {
727 if (!MovX && instModifiesReg(&*
I,
X, Xsub)) {
734 (
I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
735 I->getOpcode() != AMDGPU::V_MOV_B16_t16_e32 &&
736 I->getOpcode() != AMDGPU::COPY) ||
737 I->getOperand(0).getReg() !=
X ||
738 I->getOperand(0).getSubReg() != Xsub) {
743 if (
Size > 4 && (
I->getNumImplicitOperands() > (
I->isCopy() ? 0U : 1U)))
752 LLVM_DEBUG(
dbgs() <<
"Matched v_swap:\n" << MovT << *MovX << *MovY);
758 TII->get(AMDGPU::V_SWAP_B16))
767 for (
unsigned I = 0;
I <
Size / 4; ++
I) {
769 X1 = getSubRegForIndex(
X, Xsub,
I);
770 Y1 = getSubRegForIndex(
Y, Ysub,
I);
772 TII->get(AMDGPU::V_SWAP_B32))
784 Swap->removeOperand(Swap->getNumExplicitOperands());
789 dropInstructionKeepingImpDefs(*MovY);
792 if (
T.isVirtual() &&
MRI->use_nodbg_empty(
T)) {
793 dropInstructionKeepingImpDefs(MovT);
799 if (
Op.isKill() &&
TRI->regsOverlap(
X,
Op.getReg()))
811bool SIShrinkInstructions::tryReplaceDeadSDST(
MachineInstr &
MI)
const {
812 if (!
ST->hasGFX10_3Insts())
822 Op->setReg(
ST->isWave32() ? AMDGPU::SGPR_NULL : AMDGPU::SGPR_NULL64);
831 TII =
ST->getInstrInfo();
832 TRI = &
TII->getRegisterInfo();
834 unsigned VCCReg =
ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
836 std::vector<unsigned> I1Defs;
847 if (
MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
857 if (Src.isImm() &&
MI.getOperand(0).getReg().isPhysical()) {
861 if (ModOpcode != 0) {
862 MI.setDesc(
TII->get(ModOpcode));
863 Src.setImm(
static_cast<int64_t
>(ModImm));
869 if (
ST->hasSwap() && (
MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
870 MI.getOpcode() == AMDGPU::V_MOV_B16_t16_e32 ||
871 MI.getOpcode() == AMDGPU::COPY)) {
872 if (
auto *NextMI = matchSwap(
MI)) {
873 Next = NextMI->getIterator();
879 if (
MI.getOpcode() == AMDGPU::S_ADD_I32 ||
880 MI.getOpcode() == AMDGPU::S_MUL_I32) {
886 if (
TII->commuteInstruction(
MI,
false, 1, 2))
901 unsigned Opc = (
MI.getOpcode() == AMDGPU::S_ADD_I32) ?
902 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
905 MI.setDesc(
TII->get(Opc));
906 MI.tieOperands(0, 1);
912 if (
MI.isCompare() &&
TII->isSOPC(
MI)) {
913 shrinkScalarCompare(
MI);
918 if (
MI.getOpcode() == AMDGPU::S_MOV_B32) {
922 if (Src.isImm() && Dst.getReg().isPhysical()) {
926 MI.setDesc(
TII->get(AMDGPU::S_MOVK_I32));
930 MI.setDesc(
TII->get(ModOpc));
931 Src.setImm(
static_cast<int64_t
>(ModImm));
939 if (
MI.getOpcode() == AMDGPU::S_AND_B32 ||
940 MI.getOpcode() == AMDGPU::S_OR_B32 ||
941 MI.getOpcode() == AMDGPU::S_XOR_B32) {
942 if (shrinkScalarLogicOp(
MI))
946 if (
TII->isMIMG(
MI.getOpcode()) &&
949 MachineFunctionProperties::Property::NoVRegs)) {
954 if (!
TII->isVOP3(
MI))
957 if (
MI.getOpcode() == AMDGPU::V_MAD_F32_e64 ||
958 MI.getOpcode() == AMDGPU::V_FMA_F32_e64 ||
959 MI.getOpcode() == AMDGPU::V_MAD_F16_e64 ||
960 MI.getOpcode() == AMDGPU::V_FMA_F16_e64 ||
961 MI.getOpcode() == AMDGPU::V_FMA_F16_gfx9_e64 ||
962 MI.getOpcode() == AMDGPU::V_FMA_F16_gfx9_fake16_e64) {
967 if (!
TII->hasVALU32BitEncoding(
MI.getOpcode())) {
970 tryReplaceDeadSDST(
MI);
977 if (!
MI.isCommutable() || !
TII->commuteInstruction(
MI) ||
979 tryReplaceDeadSDST(
MI);
986 if (
TII->isVOPC(Op32)) {
1002 MRI->setRegAllocationHint(DstReg, 0, VCCReg);
1005 if (DstReg != VCCReg)
1010 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
1014 TII->getNamedOperand(
MI, AMDGPU::OpName::src2);
1019 MRI->setRegAllocationHint(SReg, 0, VCCReg);
1028 AMDGPU::OpName::sdst);
1033 if (SDst->
getReg() != VCCReg) {
1035 MRI->setRegAllocationHint(SDst->
getReg(), 0, VCCReg);
1042 AMDGPU::OpName::src2);
1043 if (Src2 && Src2->
getReg() != VCCReg) {
1045 MRI->setRegAllocationHint(Src2->
getReg(), 0, VCCReg);
1057 if (
ST->hasVOP3Literal() &&
1059 MachineFunctionProperties::Property::NoVRegs))
1063 !shouldShrinkTrue16(
MI))
1070 ++NumInstructionsShrunk;
1073 copyExtraImplicitOps(*Inst32,
MI);
1076 if (SDst && SDst->
isDead())
1079 MI.eraseFromParent();
1080 foldImmediates(*Inst32);
1088bool SIShrinkInstructionsLegacy::runOnMachineFunction(
MachineFunction &MF) {
1092 return SIShrinkInstructions().run(MF);
unsigned const MachineRegisterInfo * MRI
Provides AMDGPU specific target descriptions.
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define DEBUG_TYPE
The pass tries to use the 32-bit encoding for instructions when possible.
static unsigned canModifyToInlineImmOp32(const SIInstrInfo *TII, const MachineOperand &Src, int32_t &ModifiedImm, bool Scalar)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Class for arbitrary precision integers.
A container for analyses that lazily runs them and caches their results.
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
Represents analyses that only rely on functions' control flow.
This class represents an Operation in the Expression.
FunctionPass class - This class is used to implement most global optimizations.
bool hasOptNone() const
Do not optimize this function (-O0).
Describe properties that are true of each instruction in the target description file.
instr_iterator instr_end()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getNumImplicitOperands() const
Returns the implicit operands number.
const MachineBasicBlock * getParent() const
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void ChangeToFrameIndex(int Idx, unsigned TargetFlags=0)
Replace this operand with a frame index.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
void setIsDead(bool Val=true)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
void setIsKill(bool Val=true)
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
static bool sopkIsZext(unsigned Opcode)
PreservedAnalyses run(MachineFunction &MF, MachineFunctionAnalysisManager &)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
self_iterator getIterator()
A range adaptor for a pair of iterators.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_READONLY int getSOPKOp(uint16_t Opcode)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
LLVM_READONLY int getVOPe32(uint16_t Opcode)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool isTrue16Inst(unsigned Opc)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
PreservedAnalyses getMachineFunctionPassPreservedAnalyses()
Returns the minimum set of Analyses that all machine function passes must preserve.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
FunctionPass * createSIShrinkInstructionsLegacyPass()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
constexpr bool any() const
A pair composed of a register and a sub-register index.