14#ifndef LLVM_CODEGEN_GLOBALISEL_LEGALIZATIONARTIFACTCOMBINER_H
15#define LLVM_CODEGEN_GLOBALISEL_LEGALIZATIONARTIFACTCOMBINER_H
32#define DEBUG_TYPE "legalizer"
41 static bool isArtifactCast(
unsigned Opc) {
43 case TargetOpcode::G_TRUNC:
44 case TargetOpcode::G_SEXT:
45 case TargetOpcode::G_ZEXT:
46 case TargetOpcode::G_ANYEXT:
57 : Builder(
B), MRI(MRI), LI(LI), VT(VT) {}
64 assert(
MI.getOpcode() == TargetOpcode::G_ANYEXT);
66 Builder.setInstrAndDebugLoc(
MI);
68 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
74 if (MRI.getType(DstReg) == MRI.getType(TruncSrc))
78 Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
80 markInstAndDefDead(
MI, *MRI.getVRegDef(SrcReg), DeadInsts);
91 Builder.buildInstr(ExtMI->
getOpcode(), {DstReg}, {ExtSrc});
93 markInstAndDefDead(
MI, *ExtMI, DeadInsts);
98 auto *SrcMI = MRI.getVRegDef(SrcReg);
99 if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
100 const LLT DstTy = MRI.getType(DstReg);
101 if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
102 auto &CstVal = SrcMI->getOperand(1);
103 auto MergedLocation =
107 Builder.setDebugLoc(MergedLocation);
108 Builder.buildConstant(
109 DstReg, CstVal.getCImm()->getValue().sext(DstTy.
getSizeInBits()));
111 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
123 assert(
MI.getOpcode() == TargetOpcode::G_ZEXT);
125 Builder.setInstrAndDebugLoc(
MI);
127 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
135 LLT DstTy = MRI.getType(DstReg);
136 if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
137 isConstantUnsupported(DstTy))
140 LLT SrcTy = MRI.getType(SrcReg);
142 if (SextSrc && (DstTy != MRI.getType(SextSrc)))
143 SextSrc = Builder.buildSExtOrTrunc(DstTy, SextSrc).getReg(0);
144 if (TruncSrc && (DstTy != MRI.getType(TruncSrc)))
145 TruncSrc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc).getReg(0);
147 Register AndSrc = SextSrc ? SextSrc : TruncSrc;
154 if (VT && (VT->getKnownZeroes(AndSrc) | ExtMaskVal).isAllOnes()) {
158 auto Mask = Builder.buildConstant(DstTy, ExtMaskVal);
159 Builder.buildAnd(DstReg, AndSrc, Mask);
161 markInstAndDefDead(
MI, *MRI.getVRegDef(SrcReg), DeadInsts);
170 MI.getOperand(1).setReg(ZextSrc);
173 markDefDead(
MI, *MRI.getVRegDef(SrcReg), DeadInsts);
178 auto *SrcMI = MRI.getVRegDef(SrcReg);
179 if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
180 const LLT DstTy = MRI.getType(DstReg);
181 if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
182 auto &CstVal = SrcMI->getOperand(1);
183 Builder.buildConstant(
184 DstReg, CstVal.getCImm()->getValue().zext(DstTy.
getSizeInBits()));
186 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
198 assert(
MI.getOpcode() == TargetOpcode::G_SEXT);
200 Builder.setInstrAndDebugLoc(
MI);
202 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
207 LLT DstTy = MRI.getType(DstReg);
208 if (isInstUnsupported({TargetOpcode::G_SEXT_INREG, {DstTy}}))
211 LLT SrcTy = MRI.getType(SrcReg);
212 uint64_t SizeInBits = SrcTy.getScalarSizeInBits();
213 if (DstTy != MRI.getType(TruncSrc))
214 TruncSrc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc).getReg(0);
217 if (VT && VT->computeNumSignBits(TruncSrc) >
222 Builder.buildSExtInReg(DstReg, TruncSrc, SizeInBits);
223 markInstAndDefDead(
MI, *MRI.getVRegDef(SrcReg), DeadInsts);
235 Builder.buildInstr(ExtMI->
getOpcode(), {DstReg}, {ExtSrc});
237 markInstAndDefDead(
MI, *MRI.getVRegDef(SrcReg), DeadInsts);
242 auto *SrcMI = MRI.getVRegDef(SrcReg);
243 if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
244 const LLT DstTy = MRI.getType(DstReg);
245 if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
246 auto &CstVal = SrcMI->getOperand(1);
247 Builder.buildConstant(
248 DstReg, CstVal.getCImm()->getValue().sext(DstTy.
getSizeInBits()));
250 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
263 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC);
265 Builder.setInstr(
MI);
267 const LLT DstTy = MRI.getType(DstReg);
268 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
271 auto *SrcMI = MRI.getVRegDef(SrcReg);
272 if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
273 if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
274 auto &CstVal = SrcMI->getOperand(1);
275 Builder.buildConstant(
276 DstReg, CstVal.getCImm()->getValue().trunc(DstTy.
getSizeInBits()));
278 markInstAndDefDead(
MI, *SrcMI, DeadInsts);
286 const Register MergeSrcReg = SrcMerge->getSourceReg(0);
287 const LLT MergeSrcTy = MRI.getType(MergeSrcReg);
295 if (DstSize < MergeSrcSize) {
298 if (isInstUnsupported({TargetOpcode::G_TRUNC, {DstTy, MergeSrcTy}}))
301 LLVM_DEBUG(
dbgs() <<
"Combining G_TRUNC(G_MERGE_VALUES) to G_TRUNC: "
304 Builder.buildTrunc(DstReg, MergeSrcReg);
306 }
else if (DstSize == MergeSrcSize) {
309 dbgs() <<
"Replacing G_TRUNC(G_MERGE_VALUES) with merge input: "
313 }
else if (DstSize % MergeSrcSize == 0) {
316 if (isInstUnsupported(
317 {TargetOpcode::G_MERGE_VALUES, {DstTy, MergeSrcTy}}))
321 dbgs() <<
"Combining G_TRUNC(G_MERGE_VALUES) to G_MERGE_VALUES: "
324 const unsigned NumSrcs = DstSize / MergeSrcSize;
325 assert(NumSrcs < SrcMI->getNumOperands() - 1 &&
326 "trunc(merge) should require less inputs than merge");
328 for (
unsigned i = 0; i < NumSrcs; ++i)
329 SrcRegs[i] = SrcMerge->getSourceReg(i);
331 Builder.buildMergeValues(DstReg, SrcRegs);
338 markInstAndDefDead(
MI, *SrcMerge, DeadInsts);
350 Builder.buildTrunc(DstReg, TruncSrc);
352 markInstAndDefDead(
MI, *MRI.getVRegDef(TruncSrc), DeadInsts);
360 LLT FoundRegTy = MRI.getType(FoundReg);
361 if (DstTy == FoundRegTy) {
362 LLVM_DEBUG(
dbgs() <<
".. Combine G_TRUNC(G_[S,Z,ANY]EXT/G_TRUNC...): "
368 markInstAndDefDead(
MI, *MRI.getVRegDef(SrcReg), DeadInsts);
381 unsigned Opcode =
MI.getOpcode();
382 assert(Opcode == TargetOpcode::G_ANYEXT || Opcode == TargetOpcode::G_ZEXT ||
383 Opcode == TargetOpcode::G_SEXT);
386 MI.getOperand(1).getReg(), MRI)) {
387 Builder.setInstr(
MI);
389 LLT DstTy = MRI.getType(DstReg);
391 if (Opcode == TargetOpcode::G_ANYEXT) {
393 if (!isInstLegal({TargetOpcode::G_IMPLICIT_DEF, {DstTy}}))
396 auto Impl = Builder.buildUndef(DstTy);
403 if (isConstantUnsupported(DstTy))
406 auto Cnst = Builder.buildConstant(DstTy, 0);
412 markInstAndDefDead(
MI, *
DefMI, DeadInsts);
422 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
424 const unsigned CastOpc = CastMI.
getOpcode();
426 if (!isArtifactCast(CastOpc))
429 const unsigned NumDefs =
MI.getNumOperands() - 1;
432 const LLT CastSrcTy = MRI.getType(CastSrcReg);
433 const LLT DestTy = MRI.getType(
MI.getOperand(0).getReg());
434 const LLT SrcTy = MRI.getType(
MI.getOperand(NumDefs).getReg());
439 if (CastOpc == TargetOpcode::G_TRUNC) {
440 if (SrcTy.isVector() && SrcTy.getScalarType() == DestTy.
getScalarType()) {
450 unsigned UnmergeNumElts =
457 if (isInstUnsupported(
458 {TargetOpcode::G_UNMERGE_VALUES, {UnmergeTy, CastSrcTy}}) ||
459 LI.getAction({TargetOpcode::G_TRUNC, {SrcWideTy, UnmergeTy}})
463 Builder.setInstr(
MI);
464 auto NewUnmerge = Builder.buildUnmerge(UnmergeTy, CastSrcReg);
466 for (
unsigned I = 0;
I != NumDefs; ++
I) {
469 Builder.buildTrunc(DefReg, NewUnmerge.getReg(
I));
472 markInstAndDefDead(
MI, CastMI, DeadInsts);
484 if (CastSrcSize % DestSize != 0)
488 if (isInstUnsupported(
489 {TargetOpcode::G_UNMERGE_VALUES, {DestTy, CastSrcTy}}))
494 const unsigned NewNumDefs = CastSrcSize / DestSize;
496 for (
unsigned Idx = 0; Idx < NewNumDefs; ++Idx) {
498 DstRegs[Idx] =
MI.getOperand(Idx).getReg();
500 DstRegs[Idx] = MRI.createGenericVirtualRegister(DestTy);
504 Builder.setInstr(
MI);
505 Builder.buildUnmerge(DstRegs, CastSrcReg);
507 markInstAndDefDead(
MI, CastMI, DeadInsts);
522 case TargetOpcode::G_BUILD_VECTOR:
523 case TargetOpcode::G_MERGE_VALUES:
550 case TargetOpcode::G_CONCAT_VECTORS: {
561 if (ConvertOp == TargetOpcode::G_TRUNC)
576 Builder.buildCopy(DstReg, SrcReg);
582 for (
auto &
UseMI : MRI.use_instructions(DstReg)) {
587 MRI.replaceRegWith(DstReg, SrcReg);
590 for (
auto *
UseMI : UseMIs)
598 if (Def.getReg() == SearchDef)
629 unsigned SrcSize = MRI.getType(Src1Reg).getSizeInBits();
632 unsigned StartSrcIdx = (StartBit / SrcSize) + 1;
634 unsigned InRegOffset = StartBit % SrcSize;
638 if (InRegOffset +
Size > SrcSize)
642 if (InRegOffset == 0 &&
Size == SrcSize) {
643 CurrentBest = SrcReg;
644 return findValueFromDefImpl(SrcReg, 0,
Size, MRI.getType(SrcReg));
647 return findValueFromDefImpl(SrcReg, InRegOffset,
Size,
648 MRI.getType(SrcReg));
662 unsigned SrcSize = MRI.getType(Src1Reg).getSizeInBits();
665 unsigned StartSrcIdx = (StartBit / SrcSize) + 1;
667 unsigned InRegOffset = StartBit % SrcSize;
669 if (InRegOffset != 0)
676 if (
Size > SrcSize) {
677 if (
Size % SrcSize > 0)
680 unsigned NumSrcsUsed =
Size / SrcSize;
685 LLT SrcTy = MRI.getType(Src1Reg);
690 LI.getAction({TargetOpcode::G_BUILD_VECTOR, {NewBVTy, SrcTy}});
695 for (
unsigned SrcIdx = StartSrcIdx; SrcIdx < StartSrcIdx + NumSrcsUsed;
698 MIB.setInstrAndDebugLoc(BV);
699 return MIB.buildBuildVector(NewBVTy, NewSrcs).getReg(0);
702 return BV.
getReg(StartSrcIdx);
712 assert(
MI.getOpcode() == TargetOpcode::G_INSERT);
715 Register ContainerSrcReg =
MI.getOperand(1).getReg();
716 Register InsertedReg =
MI.getOperand(2).getReg();
717 LLT InsertedRegTy = MRI.getType(InsertedReg);
718 unsigned InsertOffset =
MI.getOperand(3).getImm();
756 unsigned InsertedEndBit = InsertOffset + InsertedRegTy.
getSizeInBits();
757 unsigned EndBit = StartBit +
Size;
758 unsigned NewStartBit;
760 if (EndBit <= InsertOffset || InsertedEndBit <= StartBit) {
761 SrcRegToUse = ContainerSrcReg;
762 NewStartBit = StartBit;
763 return findValueFromDefImpl(SrcRegToUse, NewStartBit,
Size,
764 MRI.getType(SrcRegToUse));
766 if (InsertOffset <= StartBit && EndBit <= InsertedEndBit) {
767 SrcRegToUse = InsertedReg;
768 NewStartBit = StartBit - InsertOffset;
769 if (NewStartBit == 0 &&
770 Size == MRI.getType(SrcRegToUse).getSizeInBits())
771 CurrentBest = SrcRegToUse;
772 return findValueFromDefImpl(SrcRegToUse, NewStartBit,
Size,
773 MRI.getType(SrcRegToUse));
787 assert(
MI.getOpcode() == TargetOpcode::G_SEXT ||
788 MI.getOpcode() == TargetOpcode::G_ZEXT ||
789 MI.getOpcode() == TargetOpcode::G_ANYEXT);
793 LLT SrcType = MRI.getType(SrcReg);
797 if (!SrcType.isScalar())
800 if (StartBit +
Size > SrcSize)
803 if (StartBit == 0 && SrcType.getSizeInBits() ==
Size)
804 CurrentBest = SrcReg;
805 return findValueFromDefImpl(SrcReg, StartBit,
Size, SrcType);
815 assert(
MI.getOpcode() == TargetOpcode::G_TRUNC);
819 LLT SrcType = MRI.getType(SrcReg);
822 if (!SrcType.isScalar())
825 return findValueFromDefImpl(SrcReg, StartBit,
Size, SrcType);
833 std::optional<DefinitionAndSourceRegister> DefSrcReg =
836 DefReg = DefSrcReg->Reg;
840 switch (Def->getOpcode()) {
841 case TargetOpcode::G_CONCAT_VECTORS:
843 case TargetOpcode::G_UNMERGE_VALUES: {
844 unsigned DefStartBit = 0;
845 unsigned DefSize = MRI.getType(DefReg).getSizeInBits();
846 for (
const auto &MO : Def->defs()) {
847 if (MO.getReg() == DefReg)
849 DefStartBit += DefSize;
851 Register SrcReg = Def->getOperand(Def->getNumOperands() - 1).getReg();
853 findValueFromDefImpl(SrcReg, StartBit + DefStartBit,
Size, DstTy);
859 if (StartBit == 0 &&
Size == DefSize)
863 case TargetOpcode::G_BUILD_VECTOR:
866 case TargetOpcode::G_INSERT:
867 return findValueFromInsert(*Def, StartBit,
Size);
868 case TargetOpcode::G_TRUNC:
869 return findValueFromTrunc(*Def, StartBit,
Size);
870 case TargetOpcode::G_SEXT:
871 case TargetOpcode::G_ZEXT:
872 case TargetOpcode::G_ANYEXT:
873 return findValueFromExt(*Def, StartBit,
Size);
874 case TargetOpcode::G_IMPLICIT_DEF: {
875 if (MRI.getType(DefReg) == DstTy)
877 MIB.setInstrAndDebugLoc(*Def);
878 return MIB.buildUndef(DstTy).getReg(0);
888 : MRI(Mri), MIB(Builder), LI(
Info) {}
897 Register FoundReg = findValueFromDefImpl(DefReg, StartBit,
Size, DstTy);
898 return FoundReg != DefReg ? FoundReg :
Register();
906 unsigned NumDefs =
MI.getNumDefs();
907 LLT DestTy = MRI.getType(
MI.getReg(0));
910 for (
unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
912 if (MRI.use_nodbg_empty(DefReg)) {
913 DeadDefs[DefIdx] =
true;
920 if (MRI.getType(FoundVal) != DestTy)
927 MI.getOperand(DefIdx).setReg(DefReg);
929 DeadDefs[DefIdx] =
true;
931 return DeadDefs.
all();
935 unsigned &DefOperandIdx) {
939 Unmerge->findRegisterDefOperandIdx(Def,
nullptr);
950 GUnmerge *Unmerge,
unsigned UnmergeIdxStart,
951 unsigned NumElts,
unsigned EltSize,
953 assert(MergeStartIdx + NumElts <=
MI.getNumSources());
954 for (
unsigned i = MergeStartIdx; i < MergeStartIdx + NumElts; ++i) {
955 unsigned EltUnmergeIdx;
957 MI.getSourceReg(i), EltSize, EltUnmergeIdx);
959 if (EltUnmerge == Unmerge) {
961 if (i - MergeStartIdx != EltUnmergeIdx - UnmergeIdxStart)
963 }
else if (!AllowUndef ||
964 MRI.getVRegDef(
MI.getSourceReg(i))->getOpcode() !=
965 TargetOpcode::G_IMPLICIT_DEF)
976 LLT EltTy = MRI.getType(Elt0);
979 unsigned Elt0UnmergeIdx;
985 unsigned NumMIElts =
MI.getNumSources();
987 LLT DstTy = MRI.getType(Dst);
988 Register UnmergeSrc = Unmerge->getSourceReg();
989 LLT UnmergeSrcTy = MRI.getType(UnmergeSrc);
998 if ((DstTy == UnmergeSrcTy) && (Elt0UnmergeIdx == 0)) {
1022 (Elt0UnmergeIdx % NumMIElts == 0) &&
1023 getCoverTy(UnmergeSrcTy, DstTy) == UnmergeSrcTy) {
1027 MIB.setInstrAndDebugLoc(
MI);
1028 auto NewUnmerge = MIB.buildUnmerge(DstTy, Unmerge->getSourceReg());
1029 unsigned DstIdx = (Elt0UnmergeIdx * EltSize) / DstTy.
getSizeInBits();
1031 UpdatedDefs, Observer);
1049 unsigned NumElts = Unmerge->getNumDefs();
1050 for (
unsigned i = 0; i <
MI.getNumSources(); i += NumElts) {
1051 unsigned EltUnmergeIdx;
1053 EltSize, EltUnmergeIdx);
1055 if ((!UnmergeI) || (UnmergeI->getNumDefs() != NumElts) ||
1056 (EltUnmergeIdx != 0))
1061 ConcatSources.
push_back(UnmergeI->getSourceReg());
1064 MIB.setInstrAndDebugLoc(
MI);
1065 MIB.buildMergeLikeInstr(Dst, ConcatSources);
1078 unsigned NumDefs =
MI.getNumDefs();
1080 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1086 LLT OpTy = MRI.getType(SrcReg);
1087 LLT DestTy = MRI.getType(
MI.getReg(0));
1088 unsigned SrcDefIdx =
getDefIndex(*SrcDef, DefSrcReg->Reg);
1090 Builder.setInstrAndDebugLoc(
MI);
1094 markInstAndDefDead(
MI, *SrcDef, DeadInsts, SrcDefIdx);
1104 Register SrcUnmergeSrc = SrcUnmerge->getSourceReg();
1105 LLT SrcUnmergeSrcTy = MRI.getType(SrcUnmergeSrc);
1111 {TargetOpcode::G_UNMERGE_VALUES, {OpTy, SrcUnmergeSrcTy}});
1112 switch (ActionStep.
Action) {
1114 if (!OpTy.
isVector() || !LI.isLegal({TargetOpcode::G_UNMERGE_VALUES,
1115 {DestTy, SrcUnmergeSrcTy}}))
1130 auto NewUnmerge = Builder.buildUnmerge(DestTy, SrcUnmergeSrc);
1135 for (
unsigned I = 0;
I != NumDefs; ++
I) {
1138 MRI, Builder, UpdatedDefs, Observer);
1141 markInstAndDefDead(
MI, *SrcUnmerge, DeadInsts, SrcDefIdx);
1146 unsigned ConvertOp = 0;
1149 unsigned SrcOp = SrcDef->getOpcode();
1150 if (isArtifactCast(
SrcOp)) {
1156 ConvertOp, OpTy, DestTy)) {
1164 if (NumMergeRegs < NumDefs) {
1165 if (NumDefs % NumMergeRegs != 0)
1168 Builder.setInstr(
MI);
1176 const unsigned NewNumDefs = NumDefs / NumMergeRegs;
1177 for (
unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
1179 for (
unsigned j = 0, DefIdx = Idx * NewNumDefs;
j < NewNumDefs;
1184 LLT MergeDstTy = MRI.getType(SrcDef->getOperand(0).getReg());
1189 LLT MergeEltTy = MergeDstTy.
divide(NumMergeRegs);
1204 Register TmpReg = MRI.createGenericVirtualRegister(MergeEltTy);
1205 Builder.buildInstr(ConvertOp, {TmpReg},
1207 Builder.buildUnmerge(DstRegs, TmpReg);
1211 UpdatedDefs.append(DstRegs.
begin(), DstRegs.
end());
1214 }
else if (NumMergeRegs > NumDefs) {
1215 if (ConvertOp != 0 || NumMergeRegs % NumDefs != 0)
1218 Builder.setInstr(
MI);
1226 const unsigned NumRegs = NumMergeRegs / NumDefs;
1227 for (
unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
1229 for (
unsigned j = 0, Idx = NumRegs * DefIdx + 1;
j < NumRegs;
1234 Builder.buildMergeLikeInstr(DefReg, Regs);
1235 UpdatedDefs.push_back(DefReg);
1241 if (!ConvertOp && DestTy != MergeSrcTy) {
1242 if (DestTy.isPointer())
1243 ConvertOp = TargetOpcode::G_INTTOPTR;
1245 ConvertOp = TargetOpcode::G_PTRTOINT;
1247 ConvertOp = TargetOpcode::G_BITCAST;
1251 Builder.setInstr(
MI);
1253 for (
unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1254 Register DefReg =
MI.getOperand(Idx).getReg();
1257 if (!MRI.use_empty(DefReg)) {
1258 Builder.buildInstr(ConvertOp, {DefReg}, {MergeSrc});
1259 UpdatedDefs.push_back(DefReg);
1263 markInstAndDefDead(
MI, *MergeI, DeadInsts);
1267 assert(DestTy == MergeSrcTy &&
1268 "Bitcast and the other kinds of conversions should "
1269 "have happened earlier");
1271 Builder.setInstr(
MI);
1272 for (
unsigned Idx = 0; Idx < NumDefs; ++Idx) {
1273 Register DstReg =
MI.getOperand(Idx).getReg();
1280 markInstAndDefDead(
MI, *MergeI, DeadInsts);
1287 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT);
1302 Register SrcReg = lookThroughCopyInstrs(
MI.getOperand(1).getReg());
1304 if (MergeI && MergeI->
getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1305 Builder.setInstrAndDebugLoc(
MI);
1306 Builder.buildUndef(DstReg);
1308 markInstAndDefDead(
MI, *MergeI, DeadInsts);
1314 LLT DstTy = MRI.getType(DstReg);
1315 LLT SrcTy = MRI.getType(SrcReg);
1319 unsigned Offset =
MI.getOperand(2).getImm();
1321 unsigned MergeSrcSize = SrcTy.getSizeInBits() / NumMergeSrcs;
1322 unsigned MergeSrcIdx =
Offset / MergeSrcSize;
1325 unsigned EndMergeSrcIdx = (
Offset + ExtractDstSize - 1) / MergeSrcSize;
1328 if (MergeSrcIdx != EndMergeSrcIdx)
1332 Builder.setInstr(
MI);
1334 Offset - MergeSrcIdx * MergeSrcSize);
1336 markInstAndDefDead(
MI, *MergeI, DeadInsts);
1352 if (!DeadInsts.
empty())
1353 deleteMarkedDeadInsts(DeadInsts, WrapperObserver);
1361 switch (
MI.getOpcode()) {
1364 case TargetOpcode::G_ANYEXT:
1367 case TargetOpcode::G_ZEXT:
1370 case TargetOpcode::G_SEXT:
1373 case TargetOpcode::G_UNMERGE_VALUES:
1375 UpdatedDefs, WrapperObserver);
1377 case TargetOpcode::G_MERGE_VALUES:
1378 case TargetOpcode::G_BUILD_VECTOR:
1379 case TargetOpcode::G_CONCAT_VECTORS:
1382 for (
MachineInstr &U : MRI.use_instructions(
MI.getOperand(0).getReg())) {
1383 if (U.getOpcode() == TargetOpcode::G_UNMERGE_VALUES ||
1384 U.getOpcode() == TargetOpcode::G_TRUNC) {
1390 UpdatedDefs, WrapperObserver);
1392 case TargetOpcode::G_EXTRACT:
1395 case TargetOpcode::G_TRUNC:
1410 while (!UpdatedDefs.
empty()) {
1414 switch (
Use.getOpcode()) {
1416 case TargetOpcode::G_ANYEXT:
1417 case TargetOpcode::G_ZEXT:
1418 case TargetOpcode::G_SEXT:
1419 case TargetOpcode::G_UNMERGE_VALUES:
1420 case TargetOpcode::G_EXTRACT:
1421 case TargetOpcode::G_TRUNC:
1422 case TargetOpcode::G_BUILD_VECTOR:
1426 case TargetOpcode::G_ASSERT_SEXT:
1427 case TargetOpcode::G_ASSERT_ZEXT:
1428 case TargetOpcode::G_ASSERT_ALIGN:
1429 case TargetOpcode::COPY: {
1431 if (Copy.isVirtual())
1448 switch (
MI.getOpcode()) {
1449 case TargetOpcode::COPY:
1450 case TargetOpcode::G_TRUNC:
1451 case TargetOpcode::G_ZEXT:
1452 case TargetOpcode::G_ANYEXT:
1453 case TargetOpcode::G_SEXT:
1454 case TargetOpcode::G_EXTRACT:
1455 case TargetOpcode::G_ASSERT_SEXT:
1456 case TargetOpcode::G_ASSERT_ZEXT:
1457 case TargetOpcode::G_ASSERT_ALIGN:
1458 return MI.getOperand(1).getReg();
1459 case TargetOpcode::G_UNMERGE_VALUES:
1460 return MI.getOperand(
MI.getNumOperands() - 1).getReg();
1473 unsigned DefIdx = 0) {
1484 while (PrevMI != &
DefMI) {
1485 Register PrevRegSrc = getArtifactSrcReg(*PrevMI);
1488 if (
MRI.hasOneUse(PrevRegSrc)) {
1489 if (TmpDef != &
DefMI) {
1493 "Expecting copy or artifact cast here");
1502 if (PrevMI == &
DefMI) {
1507 if (!
MRI.use_empty(
Def.getReg())) {
1531 unsigned DefIdx = 0) {
1533 markDefDead(
MI,
DefMI, DeadInsts, DefIdx);
1544 for (
auto *DeadMI : DeadInsts) {
1547 DeadMI->eraseFromParent();
1564 bool isConstantUnsupported(
LLT Ty)
const {
1566 return isInstUnsupported({TargetOpcode::G_CONSTANT, {Ty}});
1569 return isInstUnsupported({TargetOpcode::G_CONSTANT, {EltTy}}) ||
1570 isInstUnsupported({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}});
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This contains common code to allow clients to notify changes to machine instr.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Interface for Targets to specify which operations they can successfully select and how the others sho...
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Promote Memory to Register
This file implements the SmallBitVector class.
static constexpr int Concat[]
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static LLVM_ABI DebugLoc getMergedLocation(DebugLoc LocA, DebugLoc LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
static constexpr ElementCount getFixed(ScalarTy MinVal)
Represents a G_BUILD_VECTOR.
Represents a G_CONCAT_VECTORS.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
Simple wrapper observer that takes several observers, and calls each one for each event.
void changedInstr(MachineInstr &MI) override
This instruction was mutated in some way.
void changingInstr(MachineInstr &MI) override
This instruction is about to be mutated in some way.
void erasingInstr(MachineInstr &MI) override
An instruction is about to be erased.
Represents G_BUILD_VECTOR, G_CONCAT_VECTORS or G_MERGE_VALUES.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_UNMERGE_VALUES.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
constexpr LLT getScalarType() const
constexpr LLT divide(int Factor) const
Return a type that is Factor times smaller.
This class provides utilities for finding source registers of specific bit ranges in an artifact.
Register findValueFromDef(Register DefReg, unsigned StartBit, unsigned Size, LLT DstTy)
Try to find a source of the value defined in the def DefReg, starting at position StartBit with size ...
bool tryCombineUnmergeDefs(GUnmerge &MI, GISelChangeObserver &Observer, SmallVectorImpl< Register > &UpdatedDefs)
Try to combine the defs of an unmerge MI by attempting to find values that provides the bits for each...
bool isSequenceFromUnmerge(GMergeLikeInstr &MI, unsigned MergeStartIdx, GUnmerge *Unmerge, unsigned UnmergeIdxStart, unsigned NumElts, unsigned EltSize, bool AllowUndef)
GUnmerge * findUnmergeThatDefinesReg(Register Reg, unsigned Size, unsigned &DefOperandIdx)
bool tryCombineMergeLike(GMergeLikeInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelChangeObserver &Observer)
ArtifactValueFinder(MachineRegisterInfo &Mri, MachineIRBuilder &Builder, const LegalizerInfo &Info)
bool tryFoldUnmergeCast(MachineInstr &MI, MachineInstr &CastMI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
bool tryFoldImplicitDef(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
Try to fold G_[ASZ]EXT (G_IMPLICIT_DEF).
bool tryCombineZExt(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
bool tryCombineInstruction(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, GISelObserverWrapper &WrapperObserver)
Try to combine away MI.
bool tryCombineTrunc(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI, const LegalizerInfo &LI, GISelValueTracking *VT=nullptr)
bool tryCombineSExt(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
static bool canFoldMergeOpcode(unsigned MergeOp, unsigned ConvertOp, LLT OpTy, LLT DestTy)
static unsigned getDefIndex(const MachineInstr &MI, Register SearchDef)
Return the operand index in MI that defines Def.
static void replaceRegOrBuildCopy(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI, MachineIRBuilder &Builder, SmallVectorImpl< Register > &UpdatedDefs, GISelChangeObserver &Observer)
Try to replace DstReg with SrcReg or build a COPY instruction depending on the register constraints.
bool tryCombineUnmergeValues(GUnmerge &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelChangeObserver &Observer)
bool tryCombineExtract(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs)
bool tryCombineAnyExt(MachineInstr &MI, SmallVectorImpl< MachineInstr * > &DeadInsts, SmallVectorImpl< Register > &UpdatedDefs, GISelObserverWrapper &Observer)
LegalizeActionStep getAction(const LegalityQuery &Query) const
Determine what action should be taken to legalize the described instruction.
Helper class to build MachineInstr.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getNumOperands() const
Retuns the total number of operands.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
bool all() const
Returns true if all bits are set.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A Use represents the edge between a Value definition and its users.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Unsupported
This operation is completely unsupported on the target.
@ NotFound
Sentinel value for when no action was found in the specified table.
@ FewerElements
The (vector) operation should be implemented by splitting it into sub-vectors where the operation is ...
@ Legal
The operation is expected to be selectable directly by the target, and no transformation is necessary...
@ Unsupported
This operation is completely unsupported on the target.
@ Lower
The operation itself must be expressed in terms of simpler actions on this target.
@ NarrowScalar
The operation should be synthesized from multiple instructions acting on a narrower scalar base-type.
@ MoreElements
The (vector) operation should be implemented by widening the input vector and ignoring the lanes adde...
operand_type_match m_Reg()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
UnaryOp_match< SrcTy, TargetOpcode::G_SEXT > m_GSExt(const SrcTy &Src)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
Or< Preds... > m_any_of(Preds &&... preds)
bind_ty< MachineInstr * > m_MInstr(MachineInstr *&MI)
And< Preds... > m_all_of(Preds &&... preds)
UnaryOp_match< SrcTy, TargetOpcode::G_ANYEXT > m_GAnyExt(const SrcTy &Src)
UnaryOp_match< SrcTy, TargetOpcode::G_TRUNC > m_GTrunc(const SrcTy &Src)
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
bool isPreISelGenericOptimizationHint(unsigned Opcode)
LLVM_ABI bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
LLVM_ABI Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
LegalizeAction Action
The action to take or the final answer.
unsigned TypeIdx
If describing an action, the type index to change. Otherwise zero.