18#define DEBUG_TYPE "si-shrink-instructions"
21 "Number of 64-bit instruction reduced to 32-bit.");
23 "Number of literal constants folded into 32-bit instructions.");
43 bool foldImmediates(
MachineInstr &
MI,
bool TryToCommute =
true)
const;
47 bool isKImmOrKUImmOperand(
const MachineOperand &Src,
bool &IsUnsigned)
const;
78 "SI Shrink Instructions",
false,
false)
80char SIShrinkInstructions::
ID = 0;
83 return new SIShrinkInstructions();
90 bool TryToCommute)
const {
99 if (
Reg.isVirtual()) {
101 if (Def &&
Def->isMoveImmediate()) {
103 bool ConstantFolded =
false;
105 if (
TII->isOperandLegal(
MI, Src0Idx, &MovSrc)) {
106 if (MovSrc.
isImm()) {
108 ConstantFolded =
true;
109 }
else if (MovSrc.
isFI()) {
111 ConstantFolded =
true;
115 ConstantFolded =
true;
119 if (ConstantFolded) {
120 if (
MRI->use_nodbg_empty(Reg))
121 Def->eraseFromParent();
122 ++NumLiteralConstantsFolded;
130 if (TryToCommute &&
MI.isCommutable()) {
131 if (
TII->commuteInstruction(
MI)) {
132 if (foldImmediates(
MI,
false))
136 TII->commuteInstruction(
MI);
145bool SIShrinkInstructions::shouldShrinkTrue16(
MachineInstr &
MI)
const {
146 for (
unsigned I = 0, E =
MI.getNumExplicitOperands();
I != E; ++
I) {
150 assert(!
Reg.isVirtual() &&
"Prior checks should ensure we only shrink "
151 "True16 Instructions post-RA");
152 if (AMDGPU::VGPR_32RegClass.
contains(Reg) &&
153 !AMDGPU::VGPR_32_Lo128RegClass.
contains(Reg))
156 if (AMDGPU::VGPR_16RegClass.
contains(Reg) &&
157 !AMDGPU::VGPR_16_Lo128RegClass.
contains(Reg))
164bool SIShrinkInstructions::isKImmOperand(
const MachineOperand &Src)
const {
166 !
TII->isInlineConstant(*Src.getParent(), Src.getOperandNo());
169bool SIShrinkInstructions::isKUImmOperand(
const MachineOperand &Src)
const {
170 return isUInt<16>(Src.getImm()) &&
171 !
TII->isInlineConstant(*Src.getParent(), Src.getOperandNo());
174bool SIShrinkInstructions::isKImmOrKUImmOperand(
const MachineOperand &Src,
175 bool &IsUnsigned)
const {
178 return !
TII->isInlineConstant(Src);
181 if (isUInt<16>(Src.getImm())) {
183 return !
TII->isInlineConstant(Src);
200 int32_t &ModifiedImm,
bool Scalar) {
201 if (
TII->isInlineConstant(Src))
203 int32_t SrcImm =
static_cast<int32_t
>(Src.getImm());
209 ModifiedImm = ~SrcImm;
210 if (
TII->isInlineConstant(
APInt(32, ModifiedImm)))
211 return AMDGPU::V_NOT_B32_e32;
214 ModifiedImm = reverseBits<int32_t>(SrcImm);
215 if (
TII->isInlineConstant(
APInt(32, ModifiedImm)))
216 return Scalar ? AMDGPU::S_BREV_B32 : AMDGPU::V_BFREV_B32_e32;
223void SIShrinkInstructions::copyExtraImplicitOps(
MachineInstr &NewMI,
226 for (
unsigned i =
MI.getDesc().getNumOperands() +
227 MI.getDesc().implicit_uses().size() +
228 MI.getDesc().implicit_defs().size(),
229 e =
MI.getNumOperands();
237void SIShrinkInstructions::shrinkScalarCompare(
MachineInstr &
MI)
const {
243 if (!
MI.getOperand(0).isReg())
244 TII->commuteInstruction(
MI,
false, 0, 1);
261 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
263 if (isKImmOrKUImmOperand(Src1, HasUImm)) {
265 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
266 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
270 MI.setDesc(
TII->get(SOPKOpc));
293 switch (
Info->MIMGEncoding) {
294 case AMDGPU::MIMGEncGfx10NSA:
295 NewEncoding = AMDGPU::MIMGEncGfx10Default;
297 case AMDGPU::MIMGEncGfx11NSA:
298 NewEncoding = AMDGPU::MIMGEncGfx11Default;
306 unsigned NewAddrDwords =
Info->VAddrDwords;
309 if (
Info->VAddrDwords == 2) {
310 RC = &AMDGPU::VReg_64RegClass;
311 }
else if (
Info->VAddrDwords == 3) {
312 RC = &AMDGPU::VReg_96RegClass;
313 }
else if (
Info->VAddrDwords == 4) {
314 RC = &AMDGPU::VReg_128RegClass;
315 }
else if (
Info->VAddrDwords == 5) {
316 RC = &AMDGPU::VReg_160RegClass;
317 }
else if (
Info->VAddrDwords == 6) {
318 RC = &AMDGPU::VReg_192RegClass;
319 }
else if (
Info->VAddrDwords == 7) {
320 RC = &AMDGPU::VReg_224RegClass;
321 }
else if (
Info->VAddrDwords == 8) {
322 RC = &AMDGPU::VReg_256RegClass;
323 }
else if (
Info->VAddrDwords == 9) {
324 RC = &AMDGPU::VReg_288RegClass;
325 }
else if (
Info->VAddrDwords == 10) {
326 RC = &AMDGPU::VReg_320RegClass;
327 }
else if (
Info->VAddrDwords == 11) {
328 RC = &AMDGPU::VReg_352RegClass;
329 }
else if (
Info->VAddrDwords == 12) {
330 RC = &AMDGPU::VReg_384RegClass;
332 RC = &AMDGPU::VReg_512RegClass;
336 unsigned VgprBase = 0;
337 unsigned NextVgpr = 0;
339 bool IsKill = NewAddrDwords ==
Info->VAddrDwords;
340 const unsigned NSAMaxSize =
ST->getNSAMaxSize();
341 const bool IsPartialNSA = NewAddrDwords > NSAMaxSize;
342 const unsigned EndVAddr = IsPartialNSA ? NSAMaxSize :
Info->VAddrOperands;
343 for (
unsigned Idx = 0;
Idx < EndVAddr; ++
Idx) {
345 unsigned Vgpr =
TRI->getHWRegIndex(
Op.getReg());
346 unsigned Dwords =
TRI->getRegSizeInBits(
Op.getReg(), *
MRI) / 32;
347 assert(Dwords > 0 &&
"Un-implemented for less than 32 bit regs");
351 NextVgpr = Vgpr + Dwords;
352 }
else if (Vgpr == NextVgpr) {
353 NextVgpr = Vgpr + Dwords;
364 if (VgprBase + NewAddrDwords > 256)
371 unsigned TFEVal = (TFEIdx == -1) ? 0 :
MI.getOperand(TFEIdx).getImm();
372 unsigned LWEVal = (LWEIdx == -1) ? 0 :
MI.getOperand(LWEIdx).getImm();
374 if (TFEVal || LWEVal) {
376 for (
unsigned i = LWEIdx + 1, e =
MI.getNumOperands(); i !=
e; ++i) {
377 if (
MI.getOperand(i).isReg() &&
MI.getOperand(i).isTied() &&
378 MI.getOperand(i).isImplicit()) {
382 "found more than one tied implicit operand when expecting only 1");
384 MI.untieRegOperand(ToUntie);
390 Info->VDataDwords, NewAddrDwords);
391 MI.setDesc(
TII->get(NewOpcode));
393 MI.getOperand(VAddr0Idx).setIsUndef(IsUndef);
394 MI.getOperand(VAddr0Idx).setIsKill(IsKill);
396 for (
unsigned i = 1; i < EndVAddr; ++i)
397 MI.removeOperand(VAddr0Idx + 1);
402 ToUntie - (EndVAddr - 1));
410 if (!
ST->hasVOP3Literal())
415 MachineFunctionProperties::Property::NoVRegs))
418 if (
TII->hasAnyModifiersSet(
MI))
421 const unsigned Opcode =
MI.getOpcode();
425 unsigned NewOpcode = AMDGPU::INSTRUCTION_LIST_END;
430 if (Src2.
isImm() && !
TII->isInlineConstant(Src2)) {
441 case AMDGPU::V_MAD_F32_e64:
442 NewOpcode = AMDGPU::V_MADAK_F32;
444 case AMDGPU::V_FMA_F32_e64:
445 NewOpcode = AMDGPU::V_FMAAK_F32;
447 case AMDGPU::V_MAD_F16_e64:
448 NewOpcode = AMDGPU::V_MADAK_F16;
450 case AMDGPU::V_FMA_F16_e64:
451 case AMDGPU::V_FMA_F16_gfx9_e64:
452 NewOpcode =
ST->hasTrue16BitInsts() ? AMDGPU::V_FMAAK_F16_t16
453 : AMDGPU::V_FMAAK_F16;
460 if (Src1.
isImm() && !
TII->isInlineConstant(Src1))
462 else if (Src0.
isImm() && !
TII->isInlineConstant(Src0))
470 case AMDGPU::V_MAD_F32_e64:
471 NewOpcode = AMDGPU::V_MADMK_F32;
473 case AMDGPU::V_FMA_F32_e64:
474 NewOpcode = AMDGPU::V_FMAMK_F32;
476 case AMDGPU::V_MAD_F16_e64:
477 NewOpcode = AMDGPU::V_MADMK_F16;
479 case AMDGPU::V_FMA_F16_e64:
480 case AMDGPU::V_FMA_F16_gfx9_e64:
481 NewOpcode =
ST->hasTrue16BitInsts() ? AMDGPU::V_FMAMK_F16_t16
482 : AMDGPU::V_FMAMK_F16;
487 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
496 MI.getOperand(0).getReg())
501 MI.eraseFromParent();
503 TII->removeModOperands(
MI);
504 MI.setDesc(
TII->get(NewOpcode));
513bool SIShrinkInstructions::shrinkScalarLogicOp(
MachineInstr &
MI)
const {
514 unsigned Opc =
MI.getOpcode();
521 if (!SrcImm->
isImm() ||
528 if (Opc == AMDGPU::S_AND_B32) {
531 Opc = AMDGPU::S_BITSET0_B32;
534 Opc = AMDGPU::S_ANDN2_B32;
536 }
else if (Opc == AMDGPU::S_OR_B32) {
539 Opc = AMDGPU::S_BITSET1_B32;
542 Opc = AMDGPU::S_ORN2_B32;
544 }
else if (Opc == AMDGPU::S_XOR_B32) {
547 Opc = AMDGPU::S_XNOR_B32;
561 const bool IsUndef = SrcReg->
isUndef();
562 const bool IsKill = SrcReg->
isKill();
563 MI.setDesc(
TII->get(Opc));
564 if (Opc == AMDGPU::S_BITSET0_B32 ||
565 Opc == AMDGPU::S_BITSET1_B32) {
568 MI.getOperand(2).ChangeToRegister(Dest->
getReg(),
false,
571 MI.tieOperands(0, 2);
583bool SIShrinkInstructions::instAccessReg(
593 }
else if (MO.
getReg() == Reg &&
Reg.isVirtual()) {
603bool SIShrinkInstructions::instReadsReg(
const MachineInstr *
MI,
unsigned Reg,
605 return instAccessReg(
MI->uses(), Reg,
SubReg);
608bool SIShrinkInstructions::instModifiesReg(
const MachineInstr *
MI,
unsigned Reg,
610 return instAccessReg(
MI->defs(), Reg,
SubReg);
614SIShrinkInstructions::getSubRegForIndex(
Register Reg,
unsigned Sub,
616 if (
TRI->getRegSizeInBits(Reg, *
MRI) != 32) {
617 if (
Reg.isPhysical()) {
618 Reg =
TRI->getSubReg(Reg,
TRI->getSubRegFromChannel(
I));
620 Sub =
TRI->getSubRegFromChannel(
I +
TRI->getChannelFromSubReg(Sub));
626void SIShrinkInstructions::dropInstructionKeepingImpDefs(
628 for (
unsigned i =
MI.getDesc().getNumOperands() +
629 MI.getDesc().implicit_uses().size() +
630 MI.getDesc().implicit_defs().size(),
631 e =
MI.getNumOperands();
637 TII->get(AMDGPU::IMPLICIT_DEF),
Op.getReg());
640 MI.eraseFromParent();
664 MovT.
getOpcode() == AMDGPU::V_MOV_B16_t16_e32 ||
676 unsigned Size =
TII->getOpSize(MovT, 0);
680 if (
Size == 2 &&
X.isVirtual())
686 const unsigned SearchLimit = 16;
688 bool KilledT =
false;
691 Iter != E && Count < SearchLimit && !KilledT; ++Iter, ++Count) {
696 if ((MovY->
getOpcode() != AMDGPU::V_MOV_B32_e32 &&
697 MovY->
getOpcode() != AMDGPU::V_MOV_B16_t16_e32 &&
712 if (instReadsReg(&*
I,
X, Xsub) || instModifiesReg(&*
I,
Y, Ysub) ||
713 instModifiesReg(&*
I,
T, Tsub) ||
714 (MovX && instModifiesReg(&*
I,
X, Xsub))) {
718 if (!instReadsReg(&*
I,
Y, Ysub)) {
719 if (!MovX && instModifiesReg(&*
I,
X, Xsub)) {
726 (
I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
727 I->getOpcode() != AMDGPU::V_MOV_B16_t16_e32 &&
728 I->getOpcode() != AMDGPU::COPY) ||
729 I->getOperand(0).getReg() !=
X ||
730 I->getOperand(0).getSubReg() != Xsub) {
735 if (
Size > 4 && (
I->getNumImplicitOperands() > (
I->isCopy() ? 0U : 1U)))
744 LLVM_DEBUG(
dbgs() <<
"Matched v_swap:\n" << MovT << *MovX << *MovY);
750 TII->get(AMDGPU::V_SWAP_B16))
759 for (
unsigned I = 0;
I <
Size / 4; ++
I) {
761 X1 = getSubRegForIndex(
X, Xsub,
I);
762 Y1 = getSubRegForIndex(
Y, Ysub,
I);
764 TII->get(AMDGPU::V_SWAP_B32))
776 Swap->removeOperand(Swap->getNumExplicitOperands());
781 dropInstructionKeepingImpDefs(*MovY);
784 if (
T.isVirtual() &&
MRI->use_nodbg_empty(
T)) {
785 dropInstructionKeepingImpDefs(MovT);
791 if (
Op.isKill() &&
TRI->regsOverlap(
X,
Op.getReg()))
803bool SIShrinkInstructions::tryReplaceDeadSDST(
MachineInstr &
MI)
const {
804 if (!
ST->hasGFX10_3Insts())
814 Op->setReg(
ST->isWave32() ? AMDGPU::SGPR_NULL : AMDGPU::SGPR_NULL64);
825 TII =
ST->getInstrInfo();
826 TRI = &
TII->getRegisterInfo();
828 unsigned VCCReg =
ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
830 std::vector<unsigned> I1Defs;
841 if (
MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
851 if (Src.isImm() &&
MI.getOperand(0).getReg().isPhysical()) {
855 if (ModOpcode != 0) {
856 MI.setDesc(
TII->get(ModOpcode));
857 Src.setImm(
static_cast<int64_t
>(ModImm));
863 if (
ST->hasSwap() && (
MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
864 MI.getOpcode() == AMDGPU::V_MOV_B16_t16_e32 ||
865 MI.getOpcode() == AMDGPU::COPY)) {
866 if (
auto *NextMI = matchSwap(
MI)) {
867 Next = NextMI->getIterator();
873 if (
MI.getOpcode() == AMDGPU::S_ADD_I32 ||
874 MI.getOpcode() == AMDGPU::S_MUL_I32) {
880 if (
TII->commuteInstruction(
MI,
false, 1, 2))
895 unsigned Opc = (
MI.getOpcode() == AMDGPU::S_ADD_I32) ?
896 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
899 MI.setDesc(
TII->get(Opc));
900 MI.tieOperands(0, 1);
906 if (
MI.isCompare() &&
TII->isSOPC(
MI)) {
907 shrinkScalarCompare(
MI);
912 if (
MI.getOpcode() == AMDGPU::S_MOV_B32) {
916 if (Src.isImm() && Dst.getReg().isPhysical()) {
920 MI.setDesc(
TII->get(AMDGPU::S_MOVK_I32));
924 MI.setDesc(
TII->get(ModOpc));
925 Src.setImm(
static_cast<int64_t
>(ModImm));
933 if (
MI.getOpcode() == AMDGPU::S_AND_B32 ||
934 MI.getOpcode() == AMDGPU::S_OR_B32 ||
935 MI.getOpcode() == AMDGPU::S_XOR_B32) {
936 if (shrinkScalarLogicOp(
MI))
940 if (
TII->isMIMG(
MI.getOpcode()) &&
943 MachineFunctionProperties::Property::NoVRegs)) {
948 if (!
TII->isVOP3(
MI))
951 if (
MI.getOpcode() == AMDGPU::V_MAD_F32_e64 ||
952 MI.getOpcode() == AMDGPU::V_FMA_F32_e64 ||
953 MI.getOpcode() == AMDGPU::V_MAD_F16_e64 ||
954 MI.getOpcode() == AMDGPU::V_FMA_F16_e64 ||
955 MI.getOpcode() == AMDGPU::V_FMA_F16_gfx9_e64) {
960 if (!
TII->hasVALU32BitEncoding(
MI.getOpcode())) {
963 tryReplaceDeadSDST(
MI);
970 if (!
MI.isCommutable() || !
TII->commuteInstruction(
MI) ||
972 tryReplaceDeadSDST(
MI);
979 if (
TII->isVOPC(Op32)) {
995 MRI->setRegAllocationHint(DstReg, 0, VCCReg);
998 if (DstReg != VCCReg)
1003 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
1007 TII->getNamedOperand(
MI, AMDGPU::OpName::src2);
1012 MRI->setRegAllocationHint(SReg, 0, VCCReg);
1021 AMDGPU::OpName::sdst);
1026 if (SDst->
getReg() != VCCReg) {
1028 MRI->setRegAllocationHint(SDst->
getReg(), 0, VCCReg);
1035 AMDGPU::OpName::src2);
1036 if (Src2 && Src2->
getReg() != VCCReg) {
1038 MRI->setRegAllocationHint(Src2->
getReg(), 0, VCCReg);
1050 if (
ST->hasVOP3Literal() &&
1052 MachineFunctionProperties::Property::NoVRegs))
1056 !shouldShrinkTrue16(
MI))
1063 ++NumInstructionsShrunk;
1066 copyExtraImplicitOps(*Inst32,
MI);
1069 if (SDst && SDst->
isDead())
1072 MI.eraseFromParent();
1073 foldImmediates(*Inst32);
unsigned const MachineRegisterInfo * MRI
Provides AMDGPU specific target descriptions.
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define DEBUG_TYPE
The pass tries to use the 32-bit encoding for instructions when possible.
static unsigned canModifyToInlineImmOp32(const SIInstrInfo *TII, const MachineOperand &Src, int32_t &ModifiedImm, bool Scalar)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Class for arbitrary precision integers.
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
This class represents an Operation in the Expression.
FunctionPass class - This class is used to implement most global optimizations.
Describe properties that are true of each instruction in the target description file.
instr_iterator instr_end()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getNumImplicitOperands() const
Returns the implicit operands number.
const MachineBasicBlock * getParent() const
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void ChangeToFrameIndex(int Idx, unsigned TargetFlags=0)
Replace this operand with a frame index.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
void setIsDead(bool Val=true)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
void setIsKill(bool Val=true)
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
static bool sopkIsZext(unsigned Opcode)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
self_iterator getIterator()
A range adaptor for a pair of iterators.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_READONLY int getSOPKOp(uint16_t Opcode)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
LLVM_READONLY int getVOPe32(uint16_t Opcode)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool isTrue16Inst(unsigned Opc)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FunctionPass * createSIShrinkInstructionsPass()
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
constexpr bool any() const
A pair composed of a register and a sub-register index.