41#define DEBUG_TYPE "globalisel-utils"
44using namespace MIPatternMatch;
51 return MRI.createVirtualRegister(&RegClass);
63 assert(Reg.isVirtual() &&
"PhysReg not implemented");
69 auto *OldRegClass =
MRI.getRegClassOrNull(Reg);
73 if (ConstrainedReg != Reg) {
80 TII.get(TargetOpcode::COPY), ConstrainedReg)
85 TII.get(TargetOpcode::COPY), Reg)
89 Observer->changingInstr(*RegMO.
getParent());
91 RegMO.
setReg(ConstrainedReg);
93 Observer->changedInstr(*RegMO.
getParent());
95 }
else if (OldRegClass !=
MRI.getRegClassOrNull(Reg)) {
99 Observer->changedInstr(*RegDef);
101 Observer->changingAllUsesOfReg(
MRI, Reg);
102 Observer->finishedChangingAllUsesOfReg();
105 return ConstrainedReg;
115 assert(Reg.isVirtual() &&
"PhysReg not implemented");
128 if (
const auto *SubRC =
TRI.getCommonSubClass(
129 OpRC,
TRI.getConstrainedRegClassForOperand(RegMO,
MRI)))
132 OpRC =
TRI.getAllocatableClass(OpRC);
137 "Register class constraint is required unless either the "
138 "instruction is target independent or the operand is a use");
160 "A selected instruction is expected");
165 for (
unsigned OpI = 0, OpE =
I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
173 assert(MO.
isReg() &&
"Unsupported non-reg operand");
177 if (Reg.isPhysical())
193 int DefIdx =
I.getDesc().getOperandConstraint(OpI,
MCOI::TIED_TO);
194 if (DefIdx != -1 && !
I.isRegTiedToUseOperand(DefIdx))
195 I.tieOperands(DefIdx, OpI);
207 if (
MRI.getType(DstReg) !=
MRI.getType(SrcReg))
211 const auto &DstRBC =
MRI.getRegClassOrRegBank(DstReg);
212 if (!DstRBC || DstRBC ==
MRI.getRegClassOrRegBank(SrcReg))
217 return isa<const RegisterBank *>(DstRBC) &&
MRI.getRegClassOrNull(SrcReg) &&
218 cast<const RegisterBank *>(DstRBC)->covers(
219 *
MRI.getRegClassOrNull(SrcReg));
227 for (
const auto &MO :
MI.all_defs()) {
229 if (Reg.isPhysical() || !
MRI.use_nodbg_empty(Reg))
232 return MI.wouldBeTriviallyDead();
240 bool IsFatal = Severity ==
DS_Error &&
244 if (!R.getLocation().isValid() || IsFatal)
245 R << (
" (in function: " + MF.
getName() +
")").str();
271 MI.getDebugLoc(),
MI.getParent());
283 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
284 "Value found while looking through instrs");
287 return ValAndVReg->Value;
293 assert((Const && Const->getOpcode() == TargetOpcode::G_CONSTANT) &&
294 "expected a G_CONSTANT on Reg");
295 return Const->getOperand(1).getCImm()->getValue();
298std::optional<int64_t>
301 if (Val && Val->getBitWidth() <= 64)
302 return Val->getSExtValue();
320std::optional<ValueAndVReg>
322 bool LookThroughInstrs =
true,
323 bool LookThroughAnyExt =
false) {
327 while ((
MI =
MRI.getVRegDef(VReg)) && !IsConstantOpcode(
MI) &&
329 switch (
MI->getOpcode()) {
330 case TargetOpcode::G_ANYEXT:
331 if (!LookThroughAnyExt)
334 case TargetOpcode::G_TRUNC:
335 case TargetOpcode::G_SEXT:
336 case TargetOpcode::G_ZEXT:
339 MRI.getType(
MI->getOperand(0).getReg()).getSizeInBits()));
340 VReg =
MI->getOperand(1).getReg();
342 case TargetOpcode::COPY:
343 VReg =
MI->getOperand(1).getReg();
347 case TargetOpcode::G_INTTOPTR:
348 VReg =
MI->getOperand(1).getReg();
354 if (!
MI || !IsConstantOpcode(
MI))
358 if (!GetAPCstValue(
MI, Val))
360 for (
auto &Pair :
reverse(SeenOpcodes)) {
361 switch (Pair.first) {
362 case TargetOpcode::G_TRUNC:
363 Val = Val.
trunc(Pair.second);
365 case TargetOpcode::G_ANYEXT:
366 case TargetOpcode::G_SEXT:
367 Val = Val.
sext(Pair.second);
369 case TargetOpcode::G_ZEXT:
370 Val = Val.
zext(Pair.second);
381 return MI->getOpcode() == TargetOpcode::G_CONSTANT;
387 return MI->getOpcode() == TargetOpcode::G_FCONSTANT;
393 unsigned Opc =
MI->getOpcode();
394 return Opc == TargetOpcode::G_CONSTANT || Opc == TargetOpcode::G_FCONSTANT;
420 return getConstantVRegValWithLookThrough<isIConstant, getCImmAsAPInt>(
421 VReg,
MRI, LookThroughInstrs);
426 bool LookThroughAnyExt) {
427 return getConstantVRegValWithLookThrough<isAnyConstant,
428 getCImmOrFPImmAsAPInt>(
429 VReg,
MRI, LookThroughInstrs, LookThroughAnyExt);
435 getConstantVRegValWithLookThrough<isFConstant, getCImmOrFPImmAsAPInt>(
436 VReg,
MRI, LookThroughInstrs);
446 if (TargetOpcode::G_FCONSTANT !=
MI->getOpcode())
448 return MI->getOperand(1).getFPImm();
451std::optional<DefinitionAndSourceRegister>
456 if (!DstTy.isValid())
461 auto SrcTy =
MRI.getType(SrcReg);
462 if (!SrcTy.isValid())
473 std::optional<DefinitionAndSourceRegister> DefSrcReg =
475 return DefSrcReg ? DefSrcReg->MI :
nullptr;
480 std::optional<DefinitionAndSourceRegister> DefSrcReg =
482 return DefSrcReg ? DefSrcReg->Reg :
Register();
489 for (
int i = 0; i < NumParts; ++i)
503 unsigned NumParts =
RegSize / MainSize;
504 unsigned LeftoverSize =
RegSize - NumParts * MainSize;
507 if (LeftoverSize == 0) {
508 for (
unsigned I = 0;
I < NumParts; ++
I)
509 VRegs.
push_back(
MRI.createGenericVirtualRegister(MainTy));
522 unsigned LeftoverNumElts = RegNumElts % MainNumElts;
524 if (MainNumElts % LeftoverNumElts == 0 &&
525 RegNumElts % LeftoverNumElts == 0 &&
527 LeftoverNumElts > 1) {
532 extractParts(Reg, LeftoverTy, RegNumElts / LeftoverNumElts, UnmergeValues,
536 unsigned LeftoverPerMain = MainNumElts / LeftoverNumElts;
537 unsigned NumOfLeftoverVal =
538 ((RegNumElts % MainNumElts) / LeftoverNumElts);
542 for (
unsigned I = 0;
I < UnmergeValues.
size() - NumOfLeftoverVal;
I++) {
544 if (MergeValues.
size() == LeftoverPerMain) {
551 for (
unsigned I = UnmergeValues.
size() - NumOfLeftoverVal;
552 I < UnmergeValues.
size();
I++) {
563 for (
unsigned i = 0; i < RegPieces.
size() - 1; ++i)
566 LeftoverTy =
MRI.getType(LeftoverRegs[0]);
572 for (
unsigned I = 0;
I != NumParts; ++
I) {
573 Register NewReg =
MRI.createGenericVirtualRegister(MainTy);
580 Register NewReg =
MRI.createGenericVirtualRegister(LeftoverTy);
592 LLT RegTy =
MRI.getType(Reg);
598 unsigned LeftoverNumElts = RegNumElts % NumElts;
599 unsigned NumNarrowTyPieces = RegNumElts / NumElts;
602 if (LeftoverNumElts == 0)
603 return extractParts(Reg, NarrowTy, NumNarrowTyPieces, VRegs, MIRBuilder,
614 for (
unsigned i = 0; i < NumNarrowTyPieces; ++i,
Offset += NumElts) {
620 if (LeftoverNumElts == 1) {
645 APF.
convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
661 const APInt &C1 = MaybeOp1Cst->Value;
662 const APInt &C2 = MaybeOp2Cst->Value;
666 case TargetOpcode::G_ADD:
668 case TargetOpcode::G_PTR_ADD:
672 case TargetOpcode::G_AND:
674 case TargetOpcode::G_ASHR:
676 case TargetOpcode::G_LSHR:
678 case TargetOpcode::G_MUL:
680 case TargetOpcode::G_OR:
682 case TargetOpcode::G_SHL:
684 case TargetOpcode::G_SUB:
686 case TargetOpcode::G_XOR:
688 case TargetOpcode::G_UDIV:
689 if (!C2.getBoolValue())
692 case TargetOpcode::G_SDIV:
693 if (!C2.getBoolValue())
696 case TargetOpcode::G_UREM:
697 if (!C2.getBoolValue())
700 case TargetOpcode::G_SREM:
701 if (!C2.getBoolValue())
704 case TargetOpcode::G_SMIN:
706 case TargetOpcode::G_SMAX:
708 case TargetOpcode::G_UMIN:
710 case TargetOpcode::G_UMAX:
717std::optional<APFloat>
731 case TargetOpcode::G_FADD:
732 C1.
add(C2, APFloat::rmNearestTiesToEven);
734 case TargetOpcode::G_FSUB:
735 C1.
subtract(C2, APFloat::rmNearestTiesToEven);
737 case TargetOpcode::G_FMUL:
738 C1.
multiply(C2, APFloat::rmNearestTiesToEven);
740 case TargetOpcode::G_FDIV:
741 C1.
divide(C2, APFloat::rmNearestTiesToEven);
743 case TargetOpcode::G_FREM:
746 case TargetOpcode::G_FCOPYSIGN:
749 case TargetOpcode::G_FMINNUM:
751 case TargetOpcode::G_FMAXNUM:
753 case TargetOpcode::G_FMINIMUM:
755 case TargetOpcode::G_FMAXIMUM:
757 case TargetOpcode::G_FMINNUM_IEEE:
758 case TargetOpcode::G_FMAXNUM_IEEE:
775 auto *SrcVec2 = getOpcodeDef<GBuildVector>(Op2,
MRI);
779 auto *SrcVec1 = getOpcodeDef<GBuildVector>(Op1,
MRI);
784 for (
unsigned Idx = 0, E = SrcVec1->getNumSources();
Idx < E; ++
Idx) {
786 SrcVec2->getSourceReg(
Idx),
MRI);
791 return FoldedElements;
806 return !FPVal->getValueAPF().isNaN() ||
807 (SNaN && !FPVal->getValueAPF().isSignaling());
820 case TargetOpcode::G_FADD:
821 case TargetOpcode::G_FSUB:
822 case TargetOpcode::G_FMUL:
823 case TargetOpcode::G_FDIV:
824 case TargetOpcode::G_FREM:
825 case TargetOpcode::G_FSIN:
826 case TargetOpcode::G_FCOS:
827 case TargetOpcode::G_FTAN:
828 case TargetOpcode::G_FACOS:
829 case TargetOpcode::G_FASIN:
830 case TargetOpcode::G_FATAN:
831 case TargetOpcode::G_FATAN2:
832 case TargetOpcode::G_FCOSH:
833 case TargetOpcode::G_FSINH:
834 case TargetOpcode::G_FTANH:
835 case TargetOpcode::G_FMA:
836 case TargetOpcode::G_FMAD:
842 case TargetOpcode::G_FMINNUM_IEEE:
843 case TargetOpcode::G_FMAXNUM_IEEE: {
853 case TargetOpcode::G_FMINNUM:
854 case TargetOpcode::G_FMAXNUM: {
866 case TargetOpcode::G_FPEXT:
867 case TargetOpcode::G_FPTRUNC:
868 case TargetOpcode::G_FCANONICALIZE:
880 auto PSV = dyn_cast_if_present<const PseudoSourceValue *>(MPO.
V);
881 if (
auto FSPV = dyn_cast_or_null<FixedStackPseudoSourceValue>(PSV)) {
887 if (
const Value *V = dyn_cast_if_present<const Value *>(MPO.
V)) {
889 return V->getPointerAlignment(M->getDataLayout());
907 assert(Def->getParent() == &EntryMBB &&
"live-in copy not in entry block");
918 MRI.setType(LiveIn, RegTy);
936 case TargetOpcode::G_SEXT_INREG: {
937 LLT Ty =
MRI.getType(Op1);
955 case TargetOpcode::G_SEXT:
956 return Val->sext(DstSize);
957 case TargetOpcode::G_ZEXT:
958 case TargetOpcode::G_ANYEXT:
960 return Val->zext(DstSize);
968std::optional<APFloat>
971 assert(Opcode == TargetOpcode::G_SITOFP || Opcode == TargetOpcode::G_UITOFP);
975 APFloat::rmNearestTiesToEven);
981std::optional<SmallVector<unsigned>>
983 std::function<
unsigned(
APInt)> CB) {
984 LLT Ty =
MRI.getType(Src);
986 auto tryFoldScalar = [&](
Register R) -> std::optional<unsigned> {
990 return CB(*MaybeCst);
994 auto *BV = getOpcodeDef<GBuildVector>(Src,
MRI);
997 for (
unsigned SrcIdx = 0; SrcIdx < BV->getNumSources(); ++SrcIdx) {
998 if (
auto MaybeFold = tryFoldScalar(BV->getSourceReg(SrcIdx))) {
1002 return std::nullopt;
1006 if (
auto MaybeCst = tryFoldScalar(Src)) {
1010 return std::nullopt;
1013std::optional<SmallVector<APInt>>
1016 LLT Ty =
MRI.getType(Op1);
1017 if (Ty !=
MRI.getType(Op2))
1018 return std::nullopt;
1024 if (!LHSCst || !RHSCst)
1025 return std::nullopt;
1028 case CmpInst::Predicate::ICMP_EQ:
1029 return APInt(1, LHSCst->eq(*RHSCst));
1030 case CmpInst::Predicate::ICMP_NE:
1031 return APInt(1, LHSCst->ne(*RHSCst));
1032 case CmpInst::Predicate::ICMP_UGT:
1033 return APInt(1, LHSCst->ugt(*RHSCst));
1034 case CmpInst::Predicate::ICMP_UGE:
1035 return APInt(1, LHSCst->uge(*RHSCst));
1036 case CmpInst::Predicate::ICMP_ULT:
1037 return APInt(1, LHSCst->ult(*RHSCst));
1038 case CmpInst::Predicate::ICMP_ULE:
1039 return APInt(1, LHSCst->ule(*RHSCst));
1040 case CmpInst::Predicate::ICMP_SGT:
1041 return APInt(1, LHSCst->sgt(*RHSCst));
1042 case CmpInst::Predicate::ICMP_SGE:
1043 return APInt(1, LHSCst->sge(*RHSCst));
1044 case CmpInst::Predicate::ICMP_SLT:
1045 return APInt(1, LHSCst->slt(*RHSCst));
1046 case CmpInst::Predicate::ICMP_SLE:
1047 return APInt(1, LHSCst->sle(*RHSCst));
1049 return std::nullopt;
1057 auto *BV1 = getOpcodeDef<GBuildVector>(Op1,
MRI);
1058 auto *BV2 = getOpcodeDef<GBuildVector>(Op2,
MRI);
1060 return std::nullopt;
1061 assert(BV1->getNumSources() == BV2->getNumSources() &&
"Invalid vectors");
1062 for (
unsigned I = 0;
I < BV1->getNumSources(); ++
I) {
1063 if (
auto MaybeFold =
1064 TryFoldScalar(BV1->getSourceReg(
I), BV2->getSourceReg(
I))) {
1068 return std::nullopt;
1073 if (
auto MaybeCst = TryFoldScalar(Op1, Op2)) {
1078 return std::nullopt;
1083 std::optional<DefinitionAndSourceRegister> DefSrcReg =
1089 const LLT Ty =
MRI.getType(Reg);
1091 switch (
MI.getOpcode()) {
1092 case TargetOpcode::G_CONSTANT: {
1097 case TargetOpcode::G_SHL: {
1109 case TargetOpcode::G_LSHR: {
1111 if (ConstLHS->isSignMask())
1117 case TargetOpcode::G_BUILD_VECTOR: {
1126 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1132 if (!Const || !Const->zextOrTrunc(
BitWidth).isPowerOf2())
1173 "getLCMType not implemented between fixed and scalable vectors.");
1193 LLT VecTy = OrigTy.
isVector() ? OrigTy : TargetTy;
1194 LLT ScalarTy = OrigTy.
isVector() ? TargetTy : OrigTy;
1229 "getCoverTy not implemented between fixed and scalable vectors.");
1237 if (OrigTyNumElts % TargetTyNumElts == 0)
1240 unsigned NumElts =
alignTo(OrigTyNumElts, TargetTyNumElts);
1260 "getGCDType not implemented between fixed and scalable vectors.");
1300 assert(
MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
1301 "Only G_SHUFFLE_VECTOR can have a splat index!");
1303 auto FirstDefinedIdx =
find_if(Mask, [](
int Elt) {
return Elt >= 0; });
1307 if (FirstDefinedIdx == Mask.end())
1312 int SplatValue = *FirstDefinedIdx;
1314 [&SplatValue](
int Elt) { return Elt >= 0 && Elt != SplatValue; }))
1315 return std::nullopt;
1321 return Opcode == TargetOpcode::G_BUILD_VECTOR ||
1322 Opcode == TargetOpcode::G_BUILD_VECTOR_TRUNC;
1327std::optional<ValueAndVReg> getAnyConstantSplat(
Register VReg,
1332 return std::nullopt;
1334 bool isConcatVectorsOp =
MI->getOpcode() == TargetOpcode::G_CONCAT_VECTORS;
1336 return std::nullopt;
1338 std::optional<ValueAndVReg> SplatValAndReg;
1343 auto ElementValAndReg =
1345 ? getAnyConstantSplat(Element,
MRI, AllowUndef)
1349 if (!ElementValAndReg) {
1350 if (AllowUndef && isa<GImplicitDef>(
MRI.getVRegDef(Element)))
1352 return std::nullopt;
1356 if (!SplatValAndReg)
1357 SplatValAndReg = ElementValAndReg;
1360 if (SplatValAndReg->Value != ElementValAndReg->Value)
1361 return std::nullopt;
1364 return SplatValAndReg;
1371 int64_t SplatValue,
bool AllowUndef) {
1372 if (
auto SplatValAndReg = getAnyConstantSplat(Reg,
MRI, AllowUndef))
1379 int64_t SplatValue,
bool AllowUndef) {
1386 if (
auto SplatValAndReg =
1387 getAnyConstantSplat(Reg,
MRI,
false)) {
1388 if (std::optional<ValueAndVReg> ValAndVReg =
1390 return ValAndVReg->Value;
1393 return std::nullopt;
1402std::optional<int64_t>
1405 if (
auto SplatValAndReg =
1406 getAnyConstantSplat(Reg,
MRI,
false))
1408 return std::nullopt;
1411std::optional<int64_t>
1417std::optional<FPValueAndVReg>
1420 if (
auto SplatValAndReg = getAnyConstantSplat(VReg,
MRI, AllowUndef))
1422 return std::nullopt;
1437std::optional<RegOrConstant>
1439 unsigned Opc =
MI.getOpcode();
1441 return std::nullopt;
1444 auto Reg =
MI.getOperand(1).getReg();
1447 return std::nullopt;
1453 bool AllowFP =
true,
1454 bool AllowOpaqueConstants =
true) {
1455 switch (
MI.getOpcode()) {
1456 case TargetOpcode::G_CONSTANT:
1457 case TargetOpcode::G_IMPLICIT_DEF:
1459 case TargetOpcode::G_FCONSTANT:
1461 case TargetOpcode::G_GLOBAL_VALUE:
1462 case TargetOpcode::G_FRAME_INDEX:
1463 case TargetOpcode::G_BLOCK_ADDR:
1464 case TargetOpcode::G_JUMP_TABLE:
1465 return AllowOpaqueConstants;
1479 for (
unsigned SrcIdx = 0; SrcIdx < BV->
getNumSources(); ++SrcIdx) {
1490 bool AllowFP,
bool AllowOpaqueConstants) {
1497 const unsigned NumOps =
MI.getNumOperands();
1498 for (
unsigned I = 1;
I != NumOps; ++
I) {
1515 return std::nullopt;
1516 const unsigned ScalarSize =
MRI.getType(Def).getScalarSizeInBits();
1517 return APInt(ScalarSize, *MaybeCst,
true);
1522 switch (
MI.getOpcode()) {
1523 case TargetOpcode::G_IMPLICIT_DEF:
1525 case TargetOpcode::G_CONSTANT:
1526 return MI.getOperand(1).getCImm()->isNullValue();
1527 case TargetOpcode::G_FCONSTANT: {
1541 switch (
MI.getOpcode()) {
1542 case TargetOpcode::G_IMPLICIT_DEF:
1544 case TargetOpcode::G_CONSTANT:
1545 return MI.getOperand(1).getCImm()->isAllOnesValue();
1555 std::function<
bool(
const Constant *ConstVal)>
Match,
bool AllowUndefs) {
1558 if (AllowUndefs && Def->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1559 return Match(
nullptr);
1562 if (Def->getOpcode() == TargetOpcode::G_CONSTANT)
1563 return Match(Def->getOperand(1).getCImm());
1565 if (Def->getOpcode() != TargetOpcode::G_BUILD_VECTOR)
1568 for (
unsigned I = 1, E = Def->getNumOperands();
I != E; ++
I) {
1569 Register SrcElt = Def->getOperand(
I).getReg();
1571 if (AllowUndefs && SrcDef->
getOpcode() == TargetOpcode::G_IMPLICIT_DEF) {
1572 if (!
Match(
nullptr))
1577 if (SrcDef->
getOpcode() != TargetOpcode::G_CONSTANT ||
1588 case TargetLowering::UndefinedBooleanContent:
1590 case TargetLowering::ZeroOrOneBooleanContent:
1592 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1599 bool IsVector,
bool IsFP) {
1601 case TargetLowering::UndefinedBooleanContent:
1603 case TargetLowering::ZeroOrOneBooleanContent:
1604 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1613 case TargetLowering::UndefinedBooleanContent:
1614 case TargetLowering::ZeroOrOneBooleanContent:
1616 case TargetLowering::ZeroOrNegativeOneBooleanContent:
1626 if (
Op.isReg() &&
Op.getReg().isVirtual())
1627 DeadInstChain.
insert(
MRI.getVRegDef(
Op.getReg()));
1631 MI.eraseFromParent();
1643 while (!DeadInstChain.
empty()) {
1657 for (
auto &Def :
MI.defs()) {
1658 assert(Def.isReg() &&
"Must be a reg");
1661 for (
auto &MOUse :
MRI.use_operands(Def.getReg())) {
1669 if (!DbgUsers.
empty()) {
1677 case TargetOpcode::G_FABS:
1678 case TargetOpcode::G_FADD:
1679 case TargetOpcode::G_FCANONICALIZE:
1680 case TargetOpcode::G_FCEIL:
1681 case TargetOpcode::G_FCONSTANT:
1682 case TargetOpcode::G_FCOPYSIGN:
1683 case TargetOpcode::G_FCOS:
1684 case TargetOpcode::G_FDIV:
1685 case TargetOpcode::G_FEXP2:
1686 case TargetOpcode::G_FEXP:
1687 case TargetOpcode::G_FFLOOR:
1688 case TargetOpcode::G_FLOG10:
1689 case TargetOpcode::G_FLOG2:
1690 case TargetOpcode::G_FLOG:
1691 case TargetOpcode::G_FMA:
1692 case TargetOpcode::G_FMAD:
1693 case TargetOpcode::G_FMAXIMUM:
1694 case TargetOpcode::G_FMAXNUM:
1695 case TargetOpcode::G_FMAXNUM_IEEE:
1696 case TargetOpcode::G_FMINIMUM:
1697 case TargetOpcode::G_FMINNUM:
1698 case TargetOpcode::G_FMINNUM_IEEE:
1699 case TargetOpcode::G_FMUL:
1700 case TargetOpcode::G_FNEARBYINT:
1701 case TargetOpcode::G_FNEG:
1702 case TargetOpcode::G_FPEXT:
1703 case TargetOpcode::G_FPOW:
1704 case TargetOpcode::G_FPTRUNC:
1705 case TargetOpcode::G_FREM:
1706 case TargetOpcode::G_FRINT:
1707 case TargetOpcode::G_FSIN:
1708 case TargetOpcode::G_FTAN:
1709 case TargetOpcode::G_FACOS:
1710 case TargetOpcode::G_FASIN:
1711 case TargetOpcode::G_FATAN:
1712 case TargetOpcode::G_FATAN2:
1713 case TargetOpcode::G_FCOSH:
1714 case TargetOpcode::G_FSINH:
1715 case TargetOpcode::G_FTANH:
1716 case TargetOpcode::G_FSQRT:
1717 case TargetOpcode::G_FSUB:
1718 case TargetOpcode::G_INTRINSIC_ROUND:
1719 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1720 case TargetOpcode::G_INTRINSIC_TRUNC:
1730 LLT Ty =
MRI.getType(ShiftAmount);
1736 std::optional<ValueAndVReg> Val =
1748 for (
unsigned I = 0;
I < Sources; ++
I) {
1749 std::optional<ValueAndVReg> Val =
1777 bool ConsiderFlagsAndMetadata,
1782 if (
auto *GMI = dyn_cast<GenericMachineInstr>(RegDef))
1783 if (GMI->hasPoisonGeneratingFlags())
1788 case TargetOpcode::G_BUILD_VECTOR:
1789 case TargetOpcode::G_CONSTANT_FOLD_BARRIER:
1791 case TargetOpcode::G_SHL:
1792 case TargetOpcode::G_ASHR:
1793 case TargetOpcode::G_LSHR:
1796 case TargetOpcode::G_FPTOSI:
1797 case TargetOpcode::G_FPTOUI:
1801 case TargetOpcode::G_CTLZ:
1802 case TargetOpcode::G_CTTZ:
1803 case TargetOpcode::G_ABS:
1804 case TargetOpcode::G_CTPOP:
1805 case TargetOpcode::G_BSWAP:
1806 case TargetOpcode::G_BITREVERSE:
1807 case TargetOpcode::G_FSHL:
1808 case TargetOpcode::G_FSHR:
1809 case TargetOpcode::G_SMAX:
1810 case TargetOpcode::G_SMIN:
1811 case TargetOpcode::G_UMAX:
1812 case TargetOpcode::G_UMIN:
1813 case TargetOpcode::G_PTRMASK:
1814 case TargetOpcode::G_SADDO:
1815 case TargetOpcode::G_SSUBO:
1816 case TargetOpcode::G_UADDO:
1817 case TargetOpcode::G_USUBO:
1818 case TargetOpcode::G_SMULO:
1819 case TargetOpcode::G_UMULO:
1820 case TargetOpcode::G_SADDSAT:
1821 case TargetOpcode::G_UADDSAT:
1822 case TargetOpcode::G_SSUBSAT:
1823 case TargetOpcode::G_USUBSAT:
1825 case TargetOpcode::G_SSHLSAT:
1826 case TargetOpcode::G_USHLSAT:
1829 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1832 std::optional<ValueAndVReg> Index =
1836 LLT VecTy =
MRI.getType(Insert->getVectorReg());
1841 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1844 std::optional<ValueAndVReg> Index =
1853 case TargetOpcode::G_SHUFFLE_VECTOR: {
1858 case TargetOpcode::G_FNEG:
1859 case TargetOpcode::G_PHI:
1860 case TargetOpcode::G_SELECT:
1861 case TargetOpcode::G_UREM:
1862 case TargetOpcode::G_SREM:
1863 case TargetOpcode::G_FREEZE:
1864 case TargetOpcode::G_ICMP:
1865 case TargetOpcode::G_FCMP:
1866 case TargetOpcode::G_FADD:
1867 case TargetOpcode::G_FSUB:
1868 case TargetOpcode::G_FMUL:
1869 case TargetOpcode::G_FDIV:
1870 case TargetOpcode::G_FREM:
1871 case TargetOpcode::G_PTR_ADD:
1874 return !isa<GCastOp>(RegDef) && !isa<GBinOp>(RegDef);
1888 case TargetOpcode::G_FREEZE:
1890 case TargetOpcode::G_IMPLICIT_DEF:
1892 case TargetOpcode::G_CONSTANT:
1893 case TargetOpcode::G_FCONSTANT:
1895 case TargetOpcode::G_BUILD_VECTOR: {
1898 for (
unsigned I = 0;
I < NumSources; ++
I)
1904 case TargetOpcode::G_PHI: {
1905 GPhi *Phi = cast<GPhi>(RegDef);
1906 unsigned NumIncoming = Phi->getNumIncomingValues();
1907 for (
unsigned I = 0;
I < NumIncoming; ++
I)
1917 return ::isGuaranteedNotToBeUndefOrPoison(MO.getReg(),
MRI,
Depth + 1,
1928 bool ConsiderFlagsAndMetadata) {
1929 return ::canCreateUndefOrPoison(Reg,
MRI, ConsiderFlagsAndMetadata,
1934 bool ConsiderFlagsAndMetadata =
true) {
1935 return ::canCreateUndefOrPoison(Reg,
MRI, ConsiderFlagsAndMetadata,
1942 return ::isGuaranteedNotToBeUndefOrPoison(Reg,
MRI,
Depth,
1949 return ::isGuaranteedNotToBeUndefOrPoison(Reg,
MRI,
Depth,
1956 return ::isGuaranteedNotToBeUndefOrPoison(Reg,
MRI,
Depth,
1973std::optional<GIConstant>
1978 std::optional<ValueAndVReg> MayBeConstant =
1981 return std::nullopt;
1987 unsigned NumSources = Build->getNumSources();
1988 for (
unsigned I = 0;
I < NumSources; ++
I) {
1989 Register SrcReg = Build->getSourceReg(
I);
1990 std::optional<ValueAndVReg> MayBeConstant =
1993 return std::nullopt;
1999 std::optional<ValueAndVReg> MayBeConstant =
2002 return std::nullopt;
2013std::optional<GFConstant>
2018 std::optional<FPValueAndVReg> MayBeConstant =
2021 return std::nullopt;
2027 unsigned NumSources = Build->getNumSources();
2028 for (
unsigned I = 0;
I < NumSources; ++
I) {
2029 Register SrcReg = Build->getSourceReg(
I);
2030 std::optional<FPValueAndVReg> MayBeConstant =
2033 return std::nullopt;
2039 std::optional<FPValueAndVReg> MayBeConstant =
2042 return std::nullopt;
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool canCreateUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata, UndefPoisonKind Kind)
static bool isGuaranteedNotToBeUndefOrPoison(Register Reg, const MachineRegisterInfo &MRI, unsigned Depth, UndefPoisonKind Kind)
static bool includesPoison(UndefPoisonKind Kind)
static bool includesUndef(UndefPoisonKind Kind)
static void reportGISelDiagnostic(DiagnosticSeverity Severity, MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
static bool shiftAmountKnownInRange(Register ShiftAmount, const MachineRegisterInfo &MRI)
Shifts return poison if shiftwidth is larger than the bitwidth.
bool canCreatePoison(Register Reg, const MachineRegisterInfo &MRI, bool ConsiderFlagsAndMetadata=true)
static bool isBuildVectorOp(unsigned Opcode)
static bool isConstantScalar(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This contains common code to allow clients to notify changes to machine instr.
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
Tracks DebugLocs between checkpoints and verifies that they are transferred.
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
Target-Independent Code Generator Pass Configuration Options pass.
static const char PassName[]
Class recording the (high level) value of a variable.
opStatus divide(const APFloat &RHS, roundingMode RM)
void copySign(const APFloat &RHS)
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
opStatus subtract(const APFloat &RHS, roundingMode RM)
opStatus add(const APFloat &RHS, roundingMode RM)
opStatus convertFromAPInt(const APInt &Input, bool IsSigned, roundingMode RM)
opStatus multiply(const APFloat &RHS, roundingMode RM)
APInt bitcastToAPInt() const
opStatus mod(const APFloat &RHS)
Class for arbitrary precision integers.
APInt udiv(const APInt &RHS) const
Unsigned division operation.
APInt zext(unsigned width) const
Zero extend to a new width.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
APInt trunc(unsigned width) const
Truncate to new width.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
APInt srem(const APInt &RHS) const
Function for signed remainder operation.
APInt sext(unsigned width) const
Sign extend to a new width.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Represent the analysis usage information of a pass.
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
bool isNegative() const
Return true if the sign bit is set.
bool isZero() const
Return true if the value is positive or negative zero.
This is the shared class of boolean and integer constants.
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
Represents a G_BUILD_VECTOR.
An floating-point-like constant.
static std::optional< GFConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
APFloat getScalarValue() const
Returns the value, if this constant is a scalar.
An integer-like constant.
APInt getScalarValue() const
Returns the value, if this constant is a scalar.
static std::optional< GIConstant > getConstant(Register Const, const MachineRegisterInfo &MRI)
Abstract class that contains various methods for clients to notify about changes.
KnownBits getKnownBits(Register R)
void insert(MachineInstr *I)
Add the specified instruction to the worklist if it isn't already in it.
MachineInstr * pop_back_val()
void remove(const MachineInstr *I)
Remove I from the worklist if it exists.
Represents an insert vector element.
Register getSourceReg(unsigned I) const
Returns the I'th source register.
unsigned getNumSources() const
Returns the number of source registers.
Represents a G_SHUFFLE_VECTOR.
ArrayRef< int > getMask() const
Represents a splat vector.
Module * getParent()
Get the module that this global value is contained inside of...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
constexpr bool isScalable() const
Returns true if the LLT is a scalable vector.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
static constexpr LLT scalarOrVector(ElementCount EC, LLT ScalarTy)
This is an important class for using LLVM in a threaded context.
void checkpoint(bool CheckDebugLocs=true)
Call this to indicate that it's a good point to assess whether locations have been lost.
Describe properties that are true of each instruction in the target description file.
Wrapper class representing physical registers. Should be passed by value.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
MachineFunctionProperties & set(Property P)
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
GISelChangeObserver * getObserver() const
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
const MachineBasicBlock & front() const
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Helper class to build MachineInstr.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
iterator_range< mop_iterator > uses()
Returns a range that includes all operands which may be register uses.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Represents a value which can be a Register or a constant.
Holds all the information related to register banks.
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
BooleanContent getBooleanContents(bool isVec, bool isFloat) const
For targets without i1 registers, this gives the nature of the high-bits of boolean values held in ty...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Target-Independent Code Generator Pass Configuration Options.
bool isGlobalISelAbortEnabled() const
Check whether or not GlobalISel should abort on error.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM Value Representation.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr LeafTy multiplyCoefficientBy(ScalarTy RHS) const
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
@ C
The default llvm calling convention, compatible with C.
SpecificConstantMatch m_SpecificICst(int64_t RequestedValue)
Matches a constant equal to RequestedValue.
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
DiagnosticInfoMIROptimization::MachineArgument MNV
This is an optimization pass for GlobalISel generic memory operations.
Register getFunctionLiveInPhysReg(MachineFunction &MF, const TargetInstrInfo &TII, MCRegister PhysReg, const TargetRegisterClass &RC, const DebugLoc &DL, LLT RegTy=LLT())
Return a virtual register corresponding to the incoming argument register PhysReg.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool isBuildVectorAllZeros(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Register constrainOperandRegClass(const MachineFunction &MF, const TargetRegisterInfo &TRI, MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, MachineInstr &InsertPt, const TargetRegisterClass &RegClass, MachineOperand &RegMO)
Constrain the Register operand OpIdx, so that it is now constrained to the TargetRegisterClass passed...
MachineInstr * getOpcodeDef(unsigned Opcode, Register Reg, const MachineRegisterInfo &MRI)
See if Reg is defined by an single def instruction that is Opcode.
const ConstantFP * getConstantFPVRegVal(Register VReg, const MachineRegisterInfo &MRI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
std::optional< APInt > getIConstantVRegVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT, return the corresponding value.
std::optional< APFloat > ConstantFoldIntToFloat(unsigned Opcode, LLT DstTy, Register Src, const MachineRegisterInfo &MRI)
std::optional< APInt > getIConstantSplatVal(const Register Reg, const MachineRegisterInfo &MRI)
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
std::optional< APFloat > ConstantFoldFPBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
std::optional< SmallVector< unsigned > > ConstantFoldCountZeros(Register Src, const MachineRegisterInfo &MRI, std::function< unsigned(APInt)> CB)
Tries to constant fold a counting-zero operation (G_CTLZ or G_CTTZ) on Src.
std::optional< APInt > ConstantFoldExtOp(unsigned Opcode, const Register Op1, uint64_t Imm, const MachineRegisterInfo &MRI)
std::optional< RegOrConstant > getVectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI)
LLVM_READONLY APFloat maximum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 maximum semantics.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
std::optional< APInt > isConstantOrConstantSplatVector(MachineInstr &MI, const MachineRegisterInfo &MRI)
Determines if MI defines a constant integer or a splat vector of constant integers.
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
MachineInstr * getDefIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, folding away any trivial copies.
bool matchUnaryPredicate(const MachineRegisterInfo &MRI, Register Reg, std::function< bool(const Constant *ConstVal)> Match, bool AllowUndefs=false)
Attempt to match a unary predicate against a scalar/splat constant or every element of a constant G_B...
bool isPreISelGenericOptimizationHint(unsigned Opcode)
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
bool isConstTrueVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
Returns true if given the TargetLowering's boolean contents information, the value Val contains a tru...
LLVM_READNONE LLT getLCMType(LLT OrigTy, LLT TargetTy)
Return the least common multiple type of OrigTy and TargetTy, by changing the number of vector elemen...
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
std::optional< APInt > ConstantFoldBinOp(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
const APInt & getIConstantFromReg(Register VReg, const MachineRegisterInfo &MRI)
VReg is defined by a G_CONSTANT, return the corresponding value.
LLVM_READONLY APFloat maxnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 maximumNumber semantics.
bool isConstantOrConstantVector(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowFP=true, bool AllowOpaqueConstants=true)
Return true if the specified instruction is known to be a constant, or a vector of constants.
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
bool canReplaceReg(Register DstReg, Register SrcReg, MachineRegisterInfo &MRI)
Check if DstReg can be replaced with SrcReg depending on the register constraints.
void saveUsesAndErase(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver, SmallInstListTy &DeadInstChain)
void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
std::optional< SmallVector< APInt > > ConstantFoldICmp(unsigned Pred, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
std::optional< ValueAndVReg > getAnyConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true, bool LookThroughAnyExt=false)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT or G_FCONST...
bool isBuildVectorAllOnes(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndef=false)
Return true if the specified instruction is a G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all of the...
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
SmallVector< APInt > ConstantFoldVectorBinop(unsigned Opcode, const Register Op1, const Register Op2, const MachineRegisterInfo &MRI)
Tries to constant fold a vector binop with sources Op1 and Op2.
std::optional< FPValueAndVReg > getFConstantSplat(Register VReg, const MachineRegisterInfo &MRI, bool AllowUndef=true)
Returns a floating point scalar constant of a build vector splat if it exists.
std::optional< APInt > ConstantFoldCastOp(unsigned Opcode, LLT DstTy, const Register Op0, const MachineRegisterInfo &MRI)
void extractParts(Register Reg, LLT Ty, int NumParts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Helper function to split a wide generic register into bitwise blocks with the given Type (which impli...
void getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU)
Modify analysis usage so it preserves passes required for the SelectionDAG fallback.
LLVM_READNONE LLT getCoverTy(LLT OrigTy, LLT TargetTy)
Return smallest type that covers both OrigTy and TargetTy and is multiple of TargetTy.
LLVM_READONLY APFloat minnum(const APFloat &A, const APFloat &B)
Implements IEEE-754 2019 minimumNumber semantics.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool isTargetSpecificOpcode(unsigned Opcode)
Check whether the given Opcode is a target-specific opcode.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
std::optional< FPValueAndVReg > getFConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_FCONSTANT returns it...
bool isConstFalseVal(const TargetLowering &TLI, int64_t Val, bool IsVector, bool IsFP)
constexpr unsigned BitWidth
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
bool isBuildVectorConstantSplat(const Register Reg, const MachineRegisterInfo &MRI, int64_t SplatValue, bool AllowUndef)
Return true if the specified register is defined by G_BUILD_VECTOR or G_BUILD_VECTOR_TRUNC where all ...
void eraseInstr(MachineInstr &MI, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
DiagnosticSeverity
Defines the different supported severity of a diagnostic.
Register constrainRegToClass(MachineRegisterInfo &MRI, const TargetInstrInfo &TII, const RegisterBankInfo &RBI, Register Reg, const TargetRegisterClass &RegClass)
Try to constrain Reg to the specified register class.
int64_t getICmpTrueVal(const TargetLowering &TLI, bool IsVector, bool IsFP)
Returns an integer representing true, as defined by the TargetBooleanContents.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool isPreISelGenericFloatingPointOpcode(unsigned Opc)
Returns whether opcode Opc is a pre-isel generic floating-point opcode, having only floating-point op...
bool isKnownNeverSNaN(Register Val, const MachineRegisterInfo &MRI)
Returns true if Val can be assumed to never be a signaling NaN.
std::optional< DefinitionAndSourceRegister > getDefSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the def instruction for Reg, and underlying value Register folding away any copies.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
void eraseInstrs(ArrayRef< MachineInstr * > DeadInstrs, MachineRegisterInfo &MRI, LostDebugLocObserver *LocObserver=nullptr)
void salvageDebugInfoForDbgValue(const MachineRegisterInfo &MRI, MachineInstr &MI, ArrayRef< MachineOperand * > DbgUsers)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool isKnownNeverNaN(const Value *V, unsigned Depth, const SimplifyQuery &SQ)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
Register getSrcRegIgnoringCopies(Register Reg, const MachineRegisterInfo &MRI)
Find the source register for Reg, folding away any trivial copies.
LLVM_READNONE LLT getGCDType(LLT OrigTy, LLT TargetTy)
Return a type where the total size is the greatest common divisor of OrigTy and TargetTy.
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_READONLY APFloat minimum(const APFloat &A, const APFloat &B)
Implements IEEE 754-2019 minimum semantics.
std::optional< int64_t > getIConstantSplatSExtVal(const Register Reg, const MachineRegisterInfo &MRI)
void extractVectorParts(Register Reg, unsigned NumElts, SmallVectorImpl< Register > &VRegs, MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
Version which handles irregular sub-vector splits.
int getSplatIndex(ArrayRef< int > Mask)
If all non-negative Mask elements are the same value, return that value.
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Align inferAlignFromPtrInfo(MachineFunction &MF, const MachinePointerInfo &MPO)
void reportGISelWarning(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel warning as a missed optimization remark to the LLVMContext's diagnostic stream.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Simple struct used to hold a Register value and the instruction which defines it.
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
unsigned countMinPopulation() const
Returns the number of bits known to be one.
This class contains a discriminated union of information about pointers in memory operands,...
int64_t Offset
Offset - This is an offset from the base Value*.
PointerUnion< const Value *, const PseudoSourceValue * > V
This is the IR pointer value for the access, or it is null if unknown.
Simple struct used to hold a constant integer value and a virtual register.