18#define DEBUG_TYPE "si-shrink-instructions"
21 "Number of 64-bit instruction reduced to 32-bit.");
23 "Number of literal constants folded into 32-bit instructions.");
43 bool foldImmediates(
MachineInstr &
MI,
bool TryToCommute =
true)
const;
47 bool isKImmOrKUImmOperand(
const MachineOperand &Src,
bool &IsUnsigned)
const;
78 "SI Shrink Instructions",
false,
false)
80char SIShrinkInstructions::
ID = 0;
83 return new SIShrinkInstructions();
90 bool TryToCommute)
const {
99 if (
Reg.isVirtual()) {
101 if (Def &&
Def->isMoveImmediate()) {
103 bool ConstantFolded =
false;
105 if (
TII->isOperandLegal(
MI, Src0Idx, &MovSrc)) {
106 if (MovSrc.
isImm()) {
108 ConstantFolded =
true;
109 }
else if (MovSrc.
isFI()) {
111 ConstantFolded =
true;
115 ConstantFolded =
true;
119 if (ConstantFolded) {
120 if (
MRI->use_nodbg_empty(Reg))
121 Def->eraseFromParent();
122 ++NumLiteralConstantsFolded;
130 if (TryToCommute &&
MI.isCommutable()) {
131 if (
TII->commuteInstruction(
MI)) {
132 if (foldImmediates(
MI,
false))
136 TII->commuteInstruction(
MI);
145bool SIShrinkInstructions::shouldShrinkTrue16(
MachineInstr &
MI)
const {
146 for (
unsigned I = 0, E =
MI.getNumExplicitOperands();
I != E; ++
I) {
150 assert(!
Reg.isVirtual() &&
"Prior checks should ensure we only shrink "
151 "True16 Instructions post-RA");
152 if (AMDGPU::VGPR_32RegClass.
contains(Reg) &&
153 !AMDGPU::VGPR_32_Lo128RegClass.
contains(Reg))
160bool SIShrinkInstructions::isKImmOperand(
const MachineOperand &Src)
const {
162 !
TII->isInlineConstant(*Src.getParent(), Src.getOperandNo());
165bool SIShrinkInstructions::isKUImmOperand(
const MachineOperand &Src)
const {
166 return isUInt<16>(Src.getImm()) &&
167 !
TII->isInlineConstant(*Src.getParent(), Src.getOperandNo());
170bool SIShrinkInstructions::isKImmOrKUImmOperand(
const MachineOperand &Src,
171 bool &IsUnsigned)
const {
174 return !
TII->isInlineConstant(Src);
177 if (isUInt<16>(Src.getImm())) {
179 return !
TII->isInlineConstant(Src);
196 int32_t &ModifiedImm,
bool Scalar) {
197 if (
TII->isInlineConstant(Src))
199 int32_t SrcImm =
static_cast<int32_t
>(Src.getImm());
205 ModifiedImm = ~SrcImm;
206 if (
TII->isInlineConstant(
APInt(32, ModifiedImm)))
207 return AMDGPU::V_NOT_B32_e32;
210 ModifiedImm = reverseBits<int32_t>(SrcImm);
211 if (
TII->isInlineConstant(
APInt(32, ModifiedImm)))
212 return Scalar ? AMDGPU::S_BREV_B32 : AMDGPU::V_BFREV_B32_e32;
219void SIShrinkInstructions::copyExtraImplicitOps(
MachineInstr &NewMI,
222 for (
unsigned i =
MI.getDesc().getNumOperands() +
223 MI.getDesc().implicit_uses().size() +
224 MI.getDesc().implicit_defs().size(),
225 e =
MI.getNumOperands();
233void SIShrinkInstructions::shrinkScalarCompare(
MachineInstr &
MI)
const {
239 if (!
MI.getOperand(0).isReg())
240 TII->commuteInstruction(
MI,
false, 0, 1);
257 if (SOPKOpc == AMDGPU::S_CMPK_EQ_U32 || SOPKOpc == AMDGPU::S_CMPK_LG_U32) {
259 if (isKImmOrKUImmOperand(Src1, HasUImm)) {
261 SOPKOpc = (SOPKOpc == AMDGPU::S_CMPK_EQ_U32) ?
262 AMDGPU::S_CMPK_EQ_I32 : AMDGPU::S_CMPK_LG_I32;
266 MI.setDesc(
TII->get(SOPKOpc));
289 switch (
Info->MIMGEncoding) {
290 case AMDGPU::MIMGEncGfx10NSA:
291 NewEncoding = AMDGPU::MIMGEncGfx10Default;
293 case AMDGPU::MIMGEncGfx11NSA:
294 NewEncoding = AMDGPU::MIMGEncGfx11Default;
302 unsigned NewAddrDwords =
Info->VAddrDwords;
305 if (
Info->VAddrDwords == 2) {
306 RC = &AMDGPU::VReg_64RegClass;
307 }
else if (
Info->VAddrDwords == 3) {
308 RC = &AMDGPU::VReg_96RegClass;
309 }
else if (
Info->VAddrDwords == 4) {
310 RC = &AMDGPU::VReg_128RegClass;
311 }
else if (
Info->VAddrDwords == 5) {
312 RC = &AMDGPU::VReg_160RegClass;
313 }
else if (
Info->VAddrDwords == 6) {
314 RC = &AMDGPU::VReg_192RegClass;
315 }
else if (
Info->VAddrDwords == 7) {
316 RC = &AMDGPU::VReg_224RegClass;
317 }
else if (
Info->VAddrDwords == 8) {
318 RC = &AMDGPU::VReg_256RegClass;
319 }
else if (
Info->VAddrDwords == 9) {
320 RC = &AMDGPU::VReg_288RegClass;
321 }
else if (
Info->VAddrDwords == 10) {
322 RC = &AMDGPU::VReg_320RegClass;
323 }
else if (
Info->VAddrDwords == 11) {
324 RC = &AMDGPU::VReg_352RegClass;
325 }
else if (
Info->VAddrDwords == 12) {
326 RC = &AMDGPU::VReg_384RegClass;
328 RC = &AMDGPU::VReg_512RegClass;
332 unsigned VgprBase = 0;
333 unsigned NextVgpr = 0;
335 bool IsKill = NewAddrDwords ==
Info->VAddrDwords;
336 const unsigned NSAMaxSize =
ST->getNSAMaxSize();
337 const bool IsPartialNSA = NewAddrDwords > NSAMaxSize;
338 const unsigned EndVAddr = IsPartialNSA ? NSAMaxSize :
Info->VAddrOperands;
339 for (
unsigned Idx = 0;
Idx < EndVAddr; ++
Idx) {
341 unsigned Vgpr =
TRI->getHWRegIndex(
Op.getReg());
342 unsigned Dwords =
TRI->getRegSizeInBits(
Op.getReg(), *
MRI) / 32;
343 assert(Dwords > 0 &&
"Un-implemented for less than 32 bit regs");
347 NextVgpr = Vgpr + Dwords;
348 }
else if (Vgpr == NextVgpr) {
349 NextVgpr = Vgpr + Dwords;
360 if (VgprBase + NewAddrDwords > 256)
367 unsigned TFEVal = (TFEIdx == -1) ? 0 :
MI.getOperand(TFEIdx).getImm();
368 unsigned LWEVal = (LWEIdx == -1) ? 0 :
MI.getOperand(LWEIdx).getImm();
370 if (TFEVal || LWEVal) {
372 for (
unsigned i = LWEIdx + 1, e =
MI.getNumOperands(); i !=
e; ++i) {
373 if (
MI.getOperand(i).isReg() &&
MI.getOperand(i).isTied() &&
374 MI.getOperand(i).isImplicit()) {
378 "found more than one tied implicit operand when expecting only 1");
380 MI.untieRegOperand(ToUntie);
386 Info->VDataDwords, NewAddrDwords);
387 MI.setDesc(
TII->get(NewOpcode));
389 MI.getOperand(VAddr0Idx).setIsUndef(IsUndef);
390 MI.getOperand(VAddr0Idx).setIsKill(IsKill);
392 for (
unsigned i = 1; i < EndVAddr; ++i)
393 MI.removeOperand(VAddr0Idx + 1);
398 ToUntie - (EndVAddr - 1));
406 if (!
ST->hasVOP3Literal())
411 MachineFunctionProperties::Property::NoVRegs))
414 if (
TII->hasAnyModifiersSet(
MI))
417 const unsigned Opcode =
MI.getOpcode();
421 unsigned NewOpcode = AMDGPU::INSTRUCTION_LIST_END;
426 if (Src2.
isImm() && !
TII->isInlineConstant(Src2)) {
437 case AMDGPU::V_MAD_F32_e64:
438 NewOpcode = AMDGPU::V_MADAK_F32;
440 case AMDGPU::V_FMA_F32_e64:
441 NewOpcode = AMDGPU::V_FMAAK_F32;
443 case AMDGPU::V_MAD_F16_e64:
444 NewOpcode = AMDGPU::V_MADAK_F16;
446 case AMDGPU::V_FMA_F16_e64:
447 case AMDGPU::V_FMA_F16_gfx9_e64:
448 NewOpcode =
ST->hasTrue16BitInsts() ? AMDGPU::V_FMAAK_F16_t16
449 : AMDGPU::V_FMAAK_F16;
456 if (Src1.
isImm() && !
TII->isInlineConstant(Src1))
458 else if (Src0.
isImm() && !
TII->isInlineConstant(Src0))
466 case AMDGPU::V_MAD_F32_e64:
467 NewOpcode = AMDGPU::V_MADMK_F32;
469 case AMDGPU::V_FMA_F32_e64:
470 NewOpcode = AMDGPU::V_FMAMK_F32;
472 case AMDGPU::V_MAD_F16_e64:
473 NewOpcode = AMDGPU::V_MADMK_F16;
475 case AMDGPU::V_FMA_F16_e64:
476 case AMDGPU::V_FMA_F16_gfx9_e64:
477 NewOpcode =
ST->hasTrue16BitInsts() ? AMDGPU::V_FMAMK_F16_t16
478 : AMDGPU::V_FMAMK_F16;
483 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
492 MI.getOperand(0).getReg())
497 MI.eraseFromParent();
499 TII->removeModOperands(
MI);
500 MI.setDesc(
TII->get(NewOpcode));
509bool SIShrinkInstructions::shrinkScalarLogicOp(
MachineInstr &
MI)
const {
510 unsigned Opc =
MI.getOpcode();
517 if (!SrcImm->
isImm() ||
524 if (Opc == AMDGPU::S_AND_B32) {
527 Opc = AMDGPU::S_BITSET0_B32;
530 Opc = AMDGPU::S_ANDN2_B32;
532 }
else if (Opc == AMDGPU::S_OR_B32) {
535 Opc = AMDGPU::S_BITSET1_B32;
538 Opc = AMDGPU::S_ORN2_B32;
540 }
else if (Opc == AMDGPU::S_XOR_B32) {
543 Opc = AMDGPU::S_XNOR_B32;
557 const bool IsUndef = SrcReg->
isUndef();
558 const bool IsKill = SrcReg->
isKill();
559 MI.setDesc(
TII->get(Opc));
560 if (Opc == AMDGPU::S_BITSET0_B32 ||
561 Opc == AMDGPU::S_BITSET1_B32) {
564 MI.getOperand(2).ChangeToRegister(Dest->
getReg(),
false,
567 MI.tieOperands(0, 2);
579bool SIShrinkInstructions::instAccessReg(
589 }
else if (MO.
getReg() == Reg &&
Reg.isVirtual()) {
599bool SIShrinkInstructions::instReadsReg(
const MachineInstr *
MI,
unsigned Reg,
601 return instAccessReg(
MI->uses(), Reg,
SubReg);
604bool SIShrinkInstructions::instModifiesReg(
const MachineInstr *
MI,
unsigned Reg,
606 return instAccessReg(
MI->defs(), Reg,
SubReg);
610SIShrinkInstructions::getSubRegForIndex(
Register Reg,
unsigned Sub,
612 if (
TRI->getRegSizeInBits(Reg, *
MRI) != 32) {
613 if (
Reg.isPhysical()) {
614 Reg =
TRI->getSubReg(Reg,
TRI->getSubRegFromChannel(
I));
616 Sub =
TRI->getSubRegFromChannel(
I +
TRI->getChannelFromSubReg(Sub));
622void SIShrinkInstructions::dropInstructionKeepingImpDefs(
624 for (
unsigned i =
MI.getDesc().getNumOperands() +
625 MI.getDesc().implicit_uses().size() +
626 MI.getDesc().implicit_defs().size(),
627 e =
MI.getNumOperands();
633 TII->get(AMDGPU::IMPLICIT_DEF),
Op.getReg());
636 MI.eraseFromParent();
671 unsigned Size =
TII->getOpSize(MovT, 0) / 4;
676 const unsigned SearchLimit = 16;
678 bool KilledT =
false;
681 Iter != E && Count < SearchLimit && !KilledT; ++Iter, ++Count) {
686 if ((MovY->
getOpcode() != AMDGPU::V_MOV_B32_e32 &&
702 if (instReadsReg(&*
I,
X, Xsub) || instModifiesReg(&*
I,
Y, Ysub) ||
703 instModifiesReg(&*
I,
T, Tsub) ||
704 (MovX && instModifiesReg(&*
I,
X, Xsub))) {
708 if (!instReadsReg(&*
I,
Y, Ysub)) {
709 if (!MovX && instModifiesReg(&*
I,
X, Xsub)) {
716 (
I->getOpcode() != AMDGPU::V_MOV_B32_e32 &&
717 I->getOpcode() != AMDGPU::COPY) ||
718 I->getOperand(0).getReg() !=
X ||
719 I->getOperand(0).getSubReg() != Xsub) {
724 if (
Size > 1 && (
I->getNumImplicitOperands() > (
I->isCopy() ? 0U : 1U)))
733 LLVM_DEBUG(
dbgs() <<
"Matched v_swap_b32:\n" << MovT << *MovX << *MovY);
735 for (
unsigned I = 0;
I <
Size; ++
I) {
737 X1 = getSubRegForIndex(
X, Xsub,
I);
738 Y1 = getSubRegForIndex(
Y, Ysub,
I);
741 TII->get(AMDGPU::V_SWAP_B32))
753 dropInstructionKeepingImpDefs(*MovY);
756 if (
T.isVirtual() &&
MRI->use_nodbg_empty(
T)) {
757 dropInstructionKeepingImpDefs(MovT);
763 if (
Op.isKill() &&
TRI->regsOverlap(
X,
Op.getReg()))
775bool SIShrinkInstructions::tryReplaceDeadSDST(
MachineInstr &
MI)
const {
776 if (!
ST->hasGFX10_3Insts())
786 Op->setReg(
ST->isWave32() ? AMDGPU::SGPR_NULL : AMDGPU::SGPR_NULL64);
797 TII =
ST->getInstrInfo();
798 TRI = &
TII->getRegisterInfo();
800 unsigned VCCReg =
ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
802 std::vector<unsigned> I1Defs;
813 if (
MI.getOpcode() == AMDGPU::V_MOV_B32_e32) {
823 if (Src.isImm() &&
MI.getOperand(0).getReg().isPhysical()) {
827 if (ModOpcode != 0) {
828 MI.setDesc(
TII->get(ModOpcode));
829 Src.setImm(
static_cast<int64_t
>(ModImm));
835 if (
ST->hasSwap() && (
MI.getOpcode() == AMDGPU::V_MOV_B32_e32 ||
836 MI.getOpcode() == AMDGPU::COPY)) {
837 if (
auto *NextMI = matchSwap(
MI)) {
838 Next = NextMI->getIterator();
844 if (
MI.getOpcode() == AMDGPU::S_ADD_I32 ||
845 MI.getOpcode() == AMDGPU::S_MUL_I32) {
851 if (
TII->commuteInstruction(
MI,
false, 1, 2))
866 unsigned Opc = (
MI.getOpcode() == AMDGPU::S_ADD_I32) ?
867 AMDGPU::S_ADDK_I32 : AMDGPU::S_MULK_I32;
870 MI.setDesc(
TII->get(Opc));
871 MI.tieOperands(0, 1);
877 if (
MI.isCompare() &&
TII->isSOPC(
MI)) {
878 shrinkScalarCompare(
MI);
883 if (
MI.getOpcode() == AMDGPU::S_MOV_B32) {
887 if (Src.isImm() && Dst.getReg().isPhysical()) {
891 MI.setDesc(
TII->get(AMDGPU::S_MOVK_I32));
895 MI.setDesc(
TII->get(ModOpc));
896 Src.setImm(
static_cast<int64_t
>(ModImm));
904 if (
MI.getOpcode() == AMDGPU::S_AND_B32 ||
905 MI.getOpcode() == AMDGPU::S_OR_B32 ||
906 MI.getOpcode() == AMDGPU::S_XOR_B32) {
907 if (shrinkScalarLogicOp(
MI))
911 if (
TII->isMIMG(
MI.getOpcode()) &&
914 MachineFunctionProperties::Property::NoVRegs)) {
919 if (!
TII->isVOP3(
MI))
922 if (
MI.getOpcode() == AMDGPU::V_MAD_F32_e64 ||
923 MI.getOpcode() == AMDGPU::V_FMA_F32_e64 ||
924 MI.getOpcode() == AMDGPU::V_MAD_F16_e64 ||
925 MI.getOpcode() == AMDGPU::V_FMA_F16_e64 ||
926 MI.getOpcode() == AMDGPU::V_FMA_F16_gfx9_e64) {
931 if (!
TII->hasVALU32BitEncoding(
MI.getOpcode())) {
934 tryReplaceDeadSDST(
MI);
941 if (!
MI.isCommutable() || !
TII->commuteInstruction(
MI) ||
943 tryReplaceDeadSDST(
MI);
950 if (
TII->isVOPC(Op32)) {
966 MRI->setRegAllocationHint(DstReg, 0, VCCReg);
969 if (DstReg != VCCReg)
974 if (Op32 == AMDGPU::V_CNDMASK_B32_e32) {
978 TII->getNamedOperand(
MI, AMDGPU::OpName::src2);
983 MRI->setRegAllocationHint(SReg, 0, VCCReg);
992 AMDGPU::OpName::sdst);
997 if (SDst->
getReg() != VCCReg) {
999 MRI->setRegAllocationHint(SDst->
getReg(), 0, VCCReg);
1006 AMDGPU::OpName::src2);
1007 if (Src2 && Src2->
getReg() != VCCReg) {
1009 MRI->setRegAllocationHint(Src2->
getReg(), 0, VCCReg);
1021 if (
ST->hasVOP3Literal() &&
1023 MachineFunctionProperties::Property::NoVRegs))
1027 !shouldShrinkTrue16(
MI))
1034 ++NumInstructionsShrunk;
1037 copyExtraImplicitOps(*Inst32,
MI);
1040 if (SDst && SDst->
isDead())
1043 MI.eraseFromParent();
1044 foldImmediates(*Inst32);
unsigned const MachineRegisterInfo * MRI
Provides AMDGPU specific target descriptions.
Analysis containing CSE Info
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
AMD GCN specific subclass of TargetSubtarget.
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define DEBUG_TYPE
The pass tries to use the 32-bit encoding for instructions when possible.
static unsigned canModifyToInlineImmOp32(const SIInstrInfo *TII, const MachineOperand &Src, int32_t &ModifiedImm, bool Scalar)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Class for arbitrary precision integers.
Represent the analysis usage information of a pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
This class represents an Operation in the Expression.
FunctionPass class - This class is used to implement most global optimizations.
Describe properties that are true of each instruction in the target description file.
instr_iterator instr_end()
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
bool hasProperty(Property P) const
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getNumImplicitOperands() const
Returns the implicit operands number.
const MachineBasicBlock * getParent() const
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
bool hasRegisterImplicitUseOperand(Register Reg) const
Returns true if the MachineInstr has an implicit-use operand of exactly the given register (not consi...
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void ChangeToFrameIndex(int Idx, unsigned TargetFlags=0)
Replace this operand with a frame index.
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
void setIsDead(bool Val=true)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
void ChangeToGA(const GlobalValue *GV, int64_t Offset, unsigned TargetFlags=0)
ChangeToGA - Replace this operand with a new global address operand.
void setIsKill(bool Val=true)
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
static bool sopkIsZext(unsigned Opcode)
StringRef - Represent a constant reference to a string, i.e.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
self_iterator getIterator()
A range adaptor for a pair of iterators.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_READONLY int getSOPKOp(uint16_t Opcode)
LLVM_READONLY const MIMGInfo * getMIMGInfo(unsigned Opc)
int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding, unsigned VDataDwords, unsigned VAddrDwords)
LLVM_READONLY int getVOPe32(uint16_t Opcode)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this a KImm operand?
bool isTrue16Inst(unsigned Opc)
bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
FunctionPass * createSIShrinkInstructionsPass()
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
constexpr bool any() const
A pair composed of a register and a sub-register index.