22 #define DEBUG_TYPE "legalizer" 31 static bool isArtifactCast(
unsigned Opc) {
33 case TargetOpcode::G_TRUNC:
34 case TargetOpcode::G_SEXT:
35 case TargetOpcode::G_ZEXT:
36 case TargetOpcode::G_ANYEXT:
51 assert(
MI.getOpcode() == TargetOpcode::G_ANYEXT);
55 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
61 Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
63 markInstAndDefDead(
MI, *
MRI.getVRegDef(SrcReg), DeadInsts);
76 markInstAndDefDead(
MI, *ExtMI, DeadInsts);
82 auto *SrcMI =
MRI.getVRegDef(SrcReg);
83 if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
84 const LLT DstTy =
MRI.getType(DstReg);
85 if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
86 auto &CstVal = SrcMI->getOperand(1);
88 DstReg, CstVal.getCImm()->getValue().sext(DstTy.
getSizeInBits()));
90 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
94 return tryFoldImplicitDef(
MI, DeadInsts, UpdatedDefs);
101 assert(
MI.getOpcode() == TargetOpcode::G_ZEXT);
105 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
113 LLT DstTy =
MRI.getType(DstReg);
114 if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
115 isConstantUnsupported(DstTy))
118 LLT SrcTy =
MRI.getType(SrcReg);
122 auto Extended = SextSrc ?
Builder.buildSExtOrTrunc(DstTy, SextSrc) :
123 Builder.buildAnyExtOrTrunc(DstTy, TruncSrc);
125 markInstAndDefDead(
MI, *
MRI.getVRegDef(SrcReg), DeadInsts);
134 MI.getOperand(1).setReg(ZextSrc);
137 markDefDead(
MI, *
MRI.getVRegDef(SrcReg), DeadInsts);
143 auto *SrcMI =
MRI.getVRegDef(SrcReg);
144 if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
145 const LLT DstTy =
MRI.getType(DstReg);
146 if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
147 auto &CstVal = SrcMI->getOperand(1);
149 DstReg, CstVal.getCImm()->getValue().zext(DstTy.
getSizeInBits()));
151 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
155 return tryFoldImplicitDef(
MI, DeadInsts, UpdatedDefs);
161 assert(
MI.getOpcode() == TargetOpcode::G_SEXT);
165 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
170 LLT DstTy =
MRI.getType(DstReg);
171 if (isInstUnsupported({TargetOpcode::G_SEXT_INREG, {DstTy}}))
174 LLT SrcTy =
MRI.getType(SrcReg);
175 uint64_t SizeInBits = SrcTy.getScalarSizeInBits();
177 TargetOpcode::G_SEXT_INREG, {DstReg},
178 {
Builder.buildAnyExtOrTrunc(DstTy, TruncSrc), SizeInBits});
179 markInstAndDefDead(
MI, *
MRI.getVRegDef(SrcReg), DeadInsts);
193 markInstAndDefDead(
MI, *
MRI.getVRegDef(SrcReg), DeadInsts);
197 return tryFoldImplicitDef(
MI, DeadInsts, UpdatedDefs);
204 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC);
208 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
212 auto *SrcMI =
MRI.getVRegDef(SrcReg);
213 if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
214 const LLT DstTy =
MRI.getType(DstReg);
215 if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
216 auto &CstVal = SrcMI->getOperand(1);
218 DstReg, CstVal.getCImm()->getValue().trunc(DstTy.
getSizeInBits()));
220 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
227 if (SrcMI->getOpcode() == TargetOpcode::G_MERGE_VALUES) {
228 const Register MergeSrcReg = SrcMI->getOperand(1).getReg();
229 const LLT MergeSrcTy =
MRI.getType(MergeSrcReg);
230 const LLT DstTy =
MRI.getType(DstReg);
238 if (DstSize < MergeSrcSize) {
241 if (isInstUnsupported({TargetOpcode::G_TRUNC, {DstTy, MergeSrcTy}}))
244 LLVM_DEBUG(
dbgs() <<
"Combining G_TRUNC(G_MERGE_VALUES) to G_TRUNC: " 247 Builder.buildTrunc(DstReg, MergeSrcReg);
249 }
else if (DstSize == MergeSrcSize) {
252 dbgs() <<
"Replacing G_TRUNC(G_MERGE_VALUES) with merge input: " 254 replaceRegOrBuildCopy(DstReg, MergeSrcReg,
MRI,
Builder, UpdatedDefs,
256 }
else if (DstSize % MergeSrcSize == 0) {
259 if (isInstUnsupported(
260 {TargetOpcode::G_MERGE_VALUES, {DstTy, MergeSrcTy}}))
264 dbgs() <<
"Combining G_TRUNC(G_MERGE_VALUES) to G_MERGE_VALUES: " 267 const unsigned NumSrcs = DstSize / MergeSrcSize;
268 assert(NumSrcs < SrcMI->getNumOperands() - 1 &&
269 "trunc(merge) should require less inputs than merge");
271 for (
unsigned i = 0; i < NumSrcs; ++i)
272 SrcRegs[i] = SrcMI->getOperand(i + 1).getReg();
274 Builder.buildMerge(DstReg, SrcRegs);
281 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
293 Builder.buildTrunc(DstReg, TruncSrc);
295 markInstAndDefDead(
MI, *
MRI.getVRegDef(TruncSrc), DeadInsts);
306 unsigned Opcode =
MI.getOpcode();
307 assert(Opcode == TargetOpcode::G_ANYEXT || Opcode == TargetOpcode::G_ZEXT ||
308 Opcode == TargetOpcode::G_SEXT);
311 MI.getOperand(1).getReg(),
MRI)) {
314 LLT DstTy =
MRI.getType(DstReg);
316 if (Opcode == TargetOpcode::G_ANYEXT) {
318 if (!isInstLegal({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
321 Builder.buildInstr(TargetOpcode::G_IMPLICIT_DEF, {DstReg}, {});
326 if (isConstantUnsupported(DstTy))
329 Builder.buildConstant(DstReg, 0);
333 markInstAndDefDead(
MI, *
DefMI, DeadInsts);
343 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
345 const unsigned CastOpc = CastMI.
getOpcode();
347 if (!isArtifactCast(CastOpc))
350 const unsigned NumDefs =
MI.getNumOperands() - 1;
353 const LLT CastSrcTy =
MRI.getType(CastSrcReg);
354 const LLT DestTy =
MRI.getType(
MI.getOperand(0).getReg());
355 const LLT SrcTy =
MRI.getType(
MI.getOperand(NumDefs).getReg());
360 if (CastOpc == TargetOpcode::G_TRUNC) {
371 unsigned UnmergeNumElts =
375 if (isInstUnsupported(
376 {TargetOpcode::G_UNMERGE_VALUES, {UnmergeTy, CastSrcTy}}))
380 auto NewUnmerge =
Builder.buildUnmerge(UnmergeTy, CastSrcReg);
382 for (
unsigned I = 0;
I != NumDefs; ++
I) {
385 Builder.buildTrunc(DefReg, NewUnmerge.getReg(
I));
388 markInstAndDefDead(
MI, CastMI, DeadInsts);
400 if (CastSrcSize % DestSize != 0)
404 if (isInstUnsupported(
405 {TargetOpcode::G_UNMERGE_VALUES, {DestTy, CastSrcTy}}))
410 const unsigned NewNumDefs = CastSrcSize / DestSize;
412 for (
unsigned Idx = 0; Idx < NewNumDefs; ++Idx) {
414 DstRegs[Idx] =
MI.getOperand(Idx).getReg();
416 DstRegs[Idx] =
MRI.createGenericVirtualRegister(DestTy);
421 Builder.buildUnmerge(DstRegs, CastSrcReg);
423 markInstAndDefDead(
MI, CastMI, DeadInsts);
438 case TargetOpcode::G_BUILD_VECTOR:
439 case TargetOpcode::G_MERGE_VALUES:
465 case TargetOpcode::G_CONCAT_VECTORS: {
476 if (ConvertOp == TargetOpcode::G_TRUNC)
491 Builder.buildCopy(DstReg, SrcReg);
497 for (
auto &
UseMI :
MRI.use_instructions(DstReg)) {
502 MRI.replaceRegWith(DstReg, SrcReg);
505 for (
auto *
UseMI : UseMIs)
513 if (
Def.getReg() == SearchDef)
525 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
527 unsigned NumDefs =
MI.getNumOperands() - 1;
528 Register SrcReg =
MI.getOperand(NumDefs).getReg();
533 LLT OpTy =
MRI.getType(
MI.getOperand(NumDefs).getReg());
534 LLT DestTy =
MRI.getType(
MI.getOperand(0).getReg());
536 if (SrcDef->
getOpcode() == TargetOpcode::G_UNMERGE_VALUES) {
544 LLT SrcUnmergeSrcTy =
MRI.getType(SrcUnmergeSrc);
550 {TargetOpcode::G_UNMERGE_VALUES, {OpTy, SrcUnmergeSrcTy}});
551 switch (ActionStep.
Action) {
565 auto NewUnmerge =
Builder.buildUnmerge(DestTy, SrcUnmergeSrc);
570 unsigned SrcDefIdx = getDefIndex(*SrcDef, SrcReg);
571 for (
unsigned I = 0;
I != NumDefs; ++
I) {
573 replaceRegOrBuildCopy(
Def, NewUnmerge.getReg(SrcDefIdx * NumDefs +
I),
577 markInstAndDefDead(
MI, *SrcDef, DeadInsts, SrcDefIdx);
582 unsigned ConvertOp = 0;
586 if (isArtifactCast(
SrcOp)) {
591 if (!MergeI || !canFoldMergeOpcode(MergeI->
getOpcode(),
592 ConvertOp, OpTy, DestTy)) {
595 return tryFoldUnmergeCast(
MI, *SrcDef, DeadInsts, UpdatedDefs);
600 if (NumMergeRegs < NumDefs) {
601 if (NumDefs % NumMergeRegs != 0)
612 const unsigned NewNumDefs = NumDefs / NumMergeRegs;
613 for (
unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
615 for (
unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
625 LLT MergeEltTy = MergeSrcTy.
divide(NewNumDefs);
642 for (
unsigned k = 0; k < NewNumDefs; ++k)
643 TmpRegs[k] =
MRI.createGenericVirtualRegister(MergeEltTy);
647 for (
unsigned k = 0; k < NewNumDefs; ++k)
648 Builder.buildInstr(ConvertOp, {DstRegs[k]}, {TmpRegs[k]});
655 }
else if (NumMergeRegs > NumDefs) {
656 if (ConvertOp != 0 || NumMergeRegs % NumDefs != 0)
667 const unsigned NumRegs = NumMergeRegs / NumDefs;
668 for (
unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
670 for (
unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
674 Register DefReg =
MI.getOperand(DefIdx).getReg();
675 Builder.buildMerge(DefReg, Regs);
682 if (!ConvertOp && DestTy != MergeSrcTy)
683 ConvertOp = TargetOpcode::G_BITCAST;
688 for (
unsigned Idx = 0; Idx < NumDefs; ++Idx) {
690 Register DefReg =
MI.getOperand(Idx).getReg();
691 Builder.buildInstr(ConvertOp, {DefReg}, {MergeSrc});
695 markInstAndDefDead(
MI, *MergeI, DeadInsts);
699 assert(DestTy == MergeSrcTy &&
700 "Bitcast and the other kinds of conversions should " 701 "have happened earlier");
704 for (
unsigned Idx = 0; Idx < NumDefs; ++Idx) {
705 Register DstReg =
MI.getOperand(Idx).getReg();
707 replaceRegOrBuildCopy(DstReg, SrcReg,
MRI,
Builder, UpdatedDefs,
712 markInstAndDefDead(
MI, *MergeI, DeadInsts);
718 case TargetOpcode::G_MERGE_VALUES:
719 case TargetOpcode::G_BUILD_VECTOR:
720 case TargetOpcode::G_CONCAT_VECTORS:
730 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT);
744 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
746 if (!MergeI || !isMergeLikeOpcode(MergeI->getOpcode()))
750 LLT DstTy =
MRI.getType(DstReg);
751 LLT SrcTy =
MRI.getType(SrcReg);
755 unsigned Offset =
MI.getOperand(2).getImm();
756 unsigned NumMergeSrcs = MergeI->getNumOperands() - 1;
757 unsigned MergeSrcSize = SrcTy.
getSizeInBits() / NumMergeSrcs;
758 unsigned MergeSrcIdx =
Offset / MergeSrcSize;
761 unsigned EndMergeSrcIdx = (
Offset + ExtractDstSize - 1) / MergeSrcSize;
764 if (MergeSrcIdx != EndMergeSrcIdx)
769 Builder.buildExtract(DstReg, MergeI->getOperand(MergeSrcIdx + 1).getReg(),
770 Offset - MergeSrcIdx * MergeSrcSize);
772 markInstAndDefDead(
MI, *MergeI, DeadInsts);
786 if (!DeadInsts.
empty())
787 deleteMarkedDeadInsts(DeadInsts, WrapperObserver);
794 bool Changed =
false;
795 switch (
MI.getOpcode()) {
798 case TargetOpcode::G_ANYEXT:
799 Changed = tryCombineAnyExt(
MI, DeadInsts, UpdatedDefs);
801 case TargetOpcode::G_ZEXT:
802 Changed = tryCombineZExt(
MI, DeadInsts, UpdatedDefs, WrapperObserver);
804 case TargetOpcode::G_SEXT:
805 Changed = tryCombineSExt(
MI, DeadInsts, UpdatedDefs);
807 case TargetOpcode::G_UNMERGE_VALUES:
809 tryCombineUnmergeValues(
MI, DeadInsts, UpdatedDefs, WrapperObserver);
811 case TargetOpcode::G_MERGE_VALUES:
812 case TargetOpcode::G_BUILD_VECTOR:
813 case TargetOpcode::G_CONCAT_VECTORS:
817 if (U.getOpcode() == TargetOpcode::G_UNMERGE_VALUES ||
818 U.getOpcode() == TargetOpcode::G_TRUNC) {
824 case TargetOpcode::G_EXTRACT:
825 Changed = tryCombineExtract(
MI, DeadInsts, UpdatedDefs);
827 case TargetOpcode::G_TRUNC:
828 Changed = tryCombineTrunc(
MI, DeadInsts, UpdatedDefs, WrapperObserver);
842 while (!UpdatedDefs.
empty()) {
846 switch (
Use.getOpcode()) {
848 case TargetOpcode::G_ANYEXT:
849 case TargetOpcode::G_ZEXT:
850 case TargetOpcode::G_SEXT:
851 case TargetOpcode::G_UNMERGE_VALUES:
852 case TargetOpcode::G_EXTRACT:
853 case TargetOpcode::G_TRUNC:
857 case TargetOpcode::COPY: {
859 if (Copy.isVirtual())
876 switch (
MI.getOpcode()) {
877 case TargetOpcode::COPY:
878 case TargetOpcode::G_TRUNC:
879 case TargetOpcode::G_ZEXT:
880 case TargetOpcode::G_ANYEXT:
881 case TargetOpcode::G_SEXT:
882 case TargetOpcode::G_EXTRACT:
883 return MI.getOperand(1).getReg();
884 case TargetOpcode::G_UNMERGE_VALUES:
885 return MI.getOperand(
MI.getNumOperands() - 1).
getReg();
896 void markDefDead(MachineInstr &
MI, MachineInstr &
DefMI,
897 SmallVectorImpl<MachineInstr *> &DeadInsts,
898 unsigned DefIdx = 0) {
908 MachineInstr *PrevMI = &
MI;
909 while (PrevMI != &
DefMI) {
910 Register PrevRegSrc = getArtifactSrcReg(*PrevMI);
912 MachineInstr *TmpDef =
MRI.getVRegDef(PrevRegSrc);
913 if (
MRI.hasOneUse(PrevRegSrc)) {
914 if (TmpDef != &
DefMI) {
915 assert((TmpDef->getOpcode() == TargetOpcode::COPY ||
916 isArtifactCast(TmpDef->getOpcode())) &&
917 "Expecting copy or artifact cast here");
919 DeadInsts.push_back(TmpDef);
926 if (PrevMI == &
DefMI) {
929 for (MachineOperand &
Def :
DefMI.defs()) {
931 if (!
MRI.use_empty(
Def.getReg())) {
936 if (!
MRI.hasOneUse(
DefMI.getOperand(DefIdx).getReg()))
944 DeadInsts.push_back(&
DefMI);
953 void markInstAndDefDead(MachineInstr &
MI, MachineInstr &
DefMI,
954 SmallVectorImpl<MachineInstr *> &DeadInsts,
955 unsigned DefIdx = 0) {
956 DeadInsts.push_back(&
MI);
957 markDefDead(
MI,
DefMI, DeadInsts, DefIdx);
966 void deleteMarkedDeadInsts(SmallVectorImpl<MachineInstr *> &DeadInsts,
967 GISelObserverWrapper &WrapperObserver) {
968 for (
auto *DeadMI : DeadInsts) {
970 WrapperObserver.erasingInstr(*DeadMI);
971 DeadMI->eraseFromParentAndMarkDBGValuesForRemoval();
978 bool isInstUnsupported(
const LegalityQuery &Query)
const {
979 using namespace LegalizeActions;
984 bool isInstLegal(
const LegalityQuery &Query)
const {
988 bool isConstantUnsupported(LLT Ty)
const {
990 return isInstUnsupported({TargetOpcode::G_CONSTANT, {Ty}});
992 LLT EltTy = Ty.getElementType();
993 return isInstUnsupported({TargetOpcode::G_CONSTANT, {EltTy}}) ||
994 isInstUnsupported({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}});
1002 if (
MRI.getType(TmpReg).isValid())
unsigned TypeIdx
If describing an action, the type index to change. Otherwise zero.
bind_ty< MachineInstr * > m_MInstr(MachineInstr *&MI)
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
UnaryOp_match< SrcTy, TargetOpcode::G_ANYEXT > m_GAnyExt(const SrcTy &Src)
This class represents lattice values for constants.
bool tryCombineZExt(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
LLVM_NODISCARD bool empty() const
unsigned getScalarSizeInBits() const
void push_back(const T &Elt)
APInt zext(unsigned width) const
Zero extend to a new width.
The (vector) operation should be implemented by splitting it into sub-vectors where the operation is ...
LLT getScalarType() const
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
LLT divide(int Factor) const
Return a type that is Factor times smaller.
bool tryFoldImplicitDef(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
Try to fold G_[ASZ]EXT (G_IMPLICIT_DEF).
Or< Preds... > m_any_of(Preds &&... preds)
The operation should be synthesized from multiple instructions acting on a narrower scalar base-type.
LegalizeActionStep getAction(const LegalityQuery &Query) const
Determine what action should be taken to legalize the described instruction.
unsigned getNumOperands() const
Retuns the total number of operands.
A Use represents the edge between a Value definition and its users.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void changingInstr(MachineInstr &MI) override
This instruction is about to be mutated in some way.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
This operation is completely unsupported on the target.
LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
UnaryOp_match< SrcTy, TargetOpcode::COPY > m_Copy(SrcTy &&Src)
And< Preds... > m_all_of(Preds &&... preds)
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
The operation itself must be expressed in terms of simpler actions on this target.
Abstract class that contains various methods for clients to notify about changes.
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
unsigned const MachineRegisterInfo * MRI
Sentinel value for when no action was found in the specified table.
MachineInstrBuilder & UseMI
Helper class to build MachineInstr.
static bool isMergeLikeOpcode(unsigned Opc)
bool tryCombineExtract(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI, const LegalizerInfo &LI)
void changedInstr(MachineInstr &MI) override
This instruction was mutated in some way.
bool tryCombineTrunc(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
static bool canFoldMergeOpcode(unsigned MergeOp, unsigned ConvertOp, LLT OpTy, LLT DestTy)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MachineInstrBuilder MachineInstrBuilder & DefMI
LLT changeNumElements(unsigned NewNumElts) const
Return a vector or scalar with the same element type and the new number of elements.
unsigned getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Promote Memory to Register
LLVM_NODISCARD T pop_back_val()
bool tryCombineAnyExt(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
This file declares the MachineIRBuilder class.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool tryCombineUnmergeValues(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelChangeObserver &Observer)
Class for arbitrary precision integers.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
static unsigned getDefIndex(const MachineInstr &MI, Register SearchDef)
Return the operand index in MI that defines Def.
UnaryOp_match< SrcTy, TargetOpcode::G_SEXT > m_GSExt(const SrcTy &Src)
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
static void replaceRegOrBuildCopy(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI, MachineIRBuilder &Builder, SmallVectorImpl< Register > &UpdatedDefs, GISelChangeObserver &Observer)
Try to replace DstReg with SrcReg or build a COPY instruction depending on the register constraints.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool tryFoldUnmergeCast(MachineInstr &MI, MachineInstr &CastMI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
Representation of each machine instruction.
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
LegalizeAction Action
The action to take or the final answer.
bool tryCombineSExt(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
bool tryCombineInstruction(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, GISelObserverWrapper &WrapperObserver)
Try to combine away MI.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
operand_type_match m_Reg()
Simple wrapper observer that takes several observers, and calls each one for each event.
Register getReg() const
getReg - Returns the register number.
const MachineOperand & getOperand(unsigned i) const
The operation is expected to be selectable directly by the target, and no transformation is necessary...
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
Wrapper class representing virtual and physical registers.