78#include "llvm/IR/IntrinsicsAArch64.h"
79#include "llvm/IR/IntrinsicsAMDGPU.h"
80#include "llvm/IR/IntrinsicsWebAssembly.h"
111using namespace PatternMatch;
112using namespace SwitchCG;
114#define DEBUG_TYPE "isel"
122 cl::desc(
"Insert the experimental `assertalign` node."),
127 cl::desc(
"Generate low-precision inline sequences "
128 "for some float libcalls"),
134 cl::desc(
"Set the case probability threshold for peeling the case from a "
135 "switch statement. A value greater than 100 will void this "
155 const SDValue *Parts,
unsigned NumParts,
158 std::optional<CallingConv::ID>
CC);
167 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
169 std::optional<CallingConv::ID>
CC = std::nullopt,
170 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
174 PartVT, ValueVT,
CC))
181 assert(NumParts > 0 &&
"No parts to assemble!");
192 unsigned RoundBits = PartBits * RoundParts;
193 EVT RoundVT = RoundBits == ValueBits ?
199 if (RoundParts > 2) {
203 PartVT, HalfVT, V, InChain);
214 if (RoundParts < NumParts) {
216 unsigned OddParts = NumParts - RoundParts;
219 OddVT, V, InChain,
CC);
236 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
247 !PartVT.
isVector() &&
"Unexpected split");
259 if (PartEVT == ValueVT)
263 ValueVT.
bitsLT(PartEVT)) {
276 if (ValueVT.
bitsLT(PartEVT)) {
281 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
296 llvm::Attribute::StrictFP)) {
298 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
310 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
311 ValueVT.
bitsLT(PartEVT)) {
320 const Twine &ErrMsg) {
321 const Instruction *
I = dyn_cast_or_null<Instruction>(V);
325 if (
const CallInst *CI = dyn_cast<CallInst>(
I))
326 if (CI->isInlineAsm()) {
328 *CI, ErrMsg +
", possible invalid constraint for vector type"));
340 const SDValue *Parts,
unsigned NumParts,
343 std::optional<CallingConv::ID> CallConv) {
345 assert(NumParts > 0 &&
"No parts to assemble!");
346 const bool IsABIRegCopy = CallConv.has_value();
355 unsigned NumIntermediates;
360 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
361 NumIntermediates, RegisterVT);
365 NumIntermediates, RegisterVT);
368 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
370 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
373 "Part type sizes don't match!");
377 if (NumIntermediates == NumParts) {
380 for (
unsigned i = 0; i != NumParts; ++i)
382 V, InChain, CallConv);
383 }
else if (NumParts > 0) {
386 assert(NumParts % NumIntermediates == 0 &&
387 "Must expand into a divisible number of parts!");
388 unsigned Factor = NumParts / NumIntermediates;
389 for (
unsigned i = 0; i != NumIntermediates; ++i)
391 IntermediateVT, V, InChain, CallConv);
406 DL, BuiltVectorTy, Ops);
412 if (PartEVT == ValueVT)
428 "Cannot narrow, it would be a lossy transformation");
434 if (PartEVT == ValueVT)
459 }
else if (ValueVT.
bitsLT(PartEVT)) {
468 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
499 std::optional<CallingConv::ID> CallConv);
506 unsigned NumParts,
MVT PartVT,
const Value *V,
507 std::optional<CallingConv::ID> CallConv = std::nullopt,
521 unsigned OrigNumParts = NumParts;
523 "Copying to an illegal type!");
529 EVT PartEVT = PartVT;
530 if (PartEVT == ValueVT) {
531 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
540 assert(NumParts == 1 &&
"Do not know what to promote to!");
551 "Unknown mismatch!");
553 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
554 if (PartVT == MVT::x86mmx)
559 assert(NumParts == 1 && PartEVT != ValueVT);
565 "Unknown mismatch!");
568 if (PartVT == MVT::x86mmx)
575 "Failed to tile the value with PartVT!");
578 if (PartEVT != ValueVT) {
580 "scalar-to-vector conversion failed");
589 if (NumParts & (NumParts - 1)) {
592 "Do not know what to expand to!");
594 unsigned RoundBits = RoundParts * PartBits;
595 unsigned OddParts = NumParts - RoundParts;
604 std::reverse(Parts + RoundParts, Parts + NumParts);
606 NumParts = RoundParts;
618 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
619 for (
unsigned i = 0; i < NumParts; i += StepSize) {
620 unsigned ThisBits = StepSize * PartBits / 2;
623 SDValue &Part1 = Parts[i+StepSize/2];
630 if (ThisBits == PartBits && ThisVT != PartVT) {
638 std::reverse(Parts, Parts + OrigNumParts);
655 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
660 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
662 "Cannot widen to illegal type");
665 }
else if (PartEVT != ValueEVT) {
680 Ops.
append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
691 std::optional<CallingConv::ID> CallConv) {
695 const bool IsABIRegCopy = CallConv.has_value();
698 EVT PartEVT = PartVT;
699 if (PartEVT == ValueVT) {
718 TargetLowering::TypeWidenVector) {
745 "lossy conversion of vector to scalar type");
760 unsigned NumIntermediates;
764 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
769 NumIntermediates, RegisterVT);
772 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
774 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
777 "Mixing scalable and fixed vectors when copying in parts");
779 std::optional<ElementCount> DestEltCnt;
789 if (ValueVT == BuiltVectorTy) {
813 for (
unsigned i = 0; i != NumIntermediates; ++i) {
828 if (NumParts == NumIntermediates) {
831 for (
unsigned i = 0; i != NumParts; ++i)
833 }
else if (NumParts > 0) {
836 assert(NumIntermediates != 0 &&
"division by zero");
837 assert(NumParts % NumIntermediates == 0 &&
838 "Must expand into a divisible number of parts!");
839 unsigned Factor = NumParts / NumIntermediates;
840 for (
unsigned i = 0; i != NumIntermediates; ++i)
847 EVT valuevt, std::optional<CallingConv::ID>
CC)
848 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
849 RegCount(1, regs.
size()), CallConv(
CC) {}
853 std::optional<CallingConv::ID>
CC) {
867 for (
unsigned i = 0; i != NumRegs; ++i)
868 Regs.push_back(Reg + i);
869 RegVTs.push_back(RegisterVT);
871 Reg = Reg.id() + NumRegs;
898 for (
unsigned i = 0; i != NumRegs; ++i) {
904 *Glue =
P.getValue(2);
907 Chain =
P.getValue(1);
936 EVT FromVT(MVT::Other);
940 }
else if (NumSignBits > 1) {
948 assert(FromVT != MVT::Other);
954 RegisterVT, ValueVT, V, Chain,
CallConv);
970 unsigned NumRegs =
Regs.size();
984 NumParts, RegisterVT, V,
CallConv, ExtendKind);
990 for (
unsigned i = 0; i != NumRegs; ++i) {
1002 if (NumRegs == 1 || Glue)
1013 Chain = Chains[NumRegs-1];
1019 unsigned MatchingIdx,
const SDLoc &dl,
1021 std::vector<SDValue> &Ops)
const {
1026 Flag.setMatchingOp(MatchingIdx);
1035 Flag.setRegClass(RC->
getID());
1046 "No 1:1 mapping from clobbers to regs?");
1049 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1054 "If we clobbered the stack pointer, MFI should know about it.");
1063 for (
unsigned i = 0; i != NumRegs; ++i) {
1064 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1065 unsigned TheReg =
Regs[Reg++];
1066 Ops.push_back(DAG.
getRegister(TheReg, RegisterVT));
1076 unsigned RegCount = std::get<0>(CountAndVT);
1077 MVT RegisterVT = std::get<1>(CountAndVT);
1101 UnusedArgNodeMap.clear();
1103 PendingExports.clear();
1104 PendingConstrainedFP.clear();
1105 PendingConstrainedFPStrict.clear();
1113 DanglingDebugInfoMap.clear();
1120 if (Pending.
empty())
1126 unsigned i = 0, e = Pending.
size();
1127 for (; i != e; ++i) {
1129 if (Pending[i].
getNode()->getOperand(0) == Root)
1137 if (Pending.
size() == 1)
1156 PendingConstrainedFP.size() +
1157 PendingConstrainedFPStrict.size());
1159 PendingConstrainedFP.end());
1160 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1161 PendingConstrainedFPStrict.end());
1162 PendingConstrainedFP.clear();
1163 PendingConstrainedFPStrict.clear();
1170 PendingExports.append(PendingConstrainedFPStrict.begin(),
1171 PendingConstrainedFPStrict.end());
1172 PendingConstrainedFPStrict.clear();
1173 return updateRoot(PendingExports);
1180 assert(Variable &&
"Missing variable");
1187 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1194 if (!
N.getNode() && isa<Argument>(
Address))
1202 auto *FINode = dyn_cast<FrameIndexSDNode>(
N.getNode());
1203 if (IsParameter && FINode) {
1206 true,
DL, SDNodeOrder);
1207 }
else if (isa<Argument>(
Address)) {
1211 FuncArgumentDbgValueKind::Declare,
N);
1215 true,
DL, SDNodeOrder);
1222 FuncArgumentDbgValueKind::Declare,
N)) {
1224 <<
" (could not emit func-arg dbg_value)\n");
1235 for (
auto It = FnVarLocs->locs_begin(&
I),
End = FnVarLocs->locs_end(&
I);
1237 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1239 if (It->Values.isKillLocation(It->Expr)) {
1245 It->Values.hasArgList())) {
1248 FnVarLocs->getDILocalVariable(It->VariableID),
1249 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1265 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1267 assert(DLR->getLabel() &&
"Missing label");
1269 DAG.
getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1274 if (SkipDbgVariableRecords)
1284 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1293 if (Values.
empty()) {
1302 [](
Value *V) {
return !V || isa<UndefValue>(V); })) {
1310 SDNodeOrder, IsVariadic)) {
1321 if (
I.isTerminator()) {
1322 HandlePHINodesInSuccessorBlocks(
I.getParent());
1326 if (!isa<DbgInfoIntrinsic>(
I))
1332 bool NodeInserted =
false;
1333 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1334 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1335 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1336 if (PCSectionsMD || MMRA) {
1337 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1338 DAG, [&](
SDNode *) { NodeInserted =
true; });
1344 !isa<GCStatepointInst>(
I))
1348 if (PCSectionsMD || MMRA) {
1349 auto It = NodeMap.find(&
I);
1350 if (It != NodeMap.end()) {
1355 }
else if (NodeInserted) {
1358 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1359 <<
I.getModule()->getName() <<
"]\n";
1368void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1378#define HANDLE_INST(NUM, OPCODE, CLASS) \
1379 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1380#include "llvm/IR/Instruction.def"
1392 for (
const Value *V : Values) {
1417 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1422 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1423 DIVariable *DanglingVariable = DDI.getVariable();
1425 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1427 << printDDI(
nullptr, DDI) <<
"\n");
1433 for (
auto &DDIMI : DanglingDebugInfoMap) {
1434 DanglingDebugInfoVector &DDIV = DDIMI.second;
1438 for (
auto &DDI : DDIV)
1439 if (isMatchingDbgValue(DDI))
1442 erase_if(DDIV, isMatchingDbgValue);
1450 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1451 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1454 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1455 for (
auto &DDI : DDIV) {
1458 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1462 "Expected inlined-at fields to agree");
1471 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1472 FuncArgumentDbgValueKind::Value, Val)) {
1474 << printDDI(V, DDI) <<
"\n");
1481 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1482 << ValSDNodeOrder <<
"\n");
1483 SDV = getDbgValue(Val, Variable, Expr,
DL,
1484 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1489 <<
" in EmitFuncArgumentDbgValue\n");
1491 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1503 DanglingDebugInfo &DDI) {
1508 const Value *OrigV = V;
1512 unsigned SDOrder = DDI.getSDNodeOrder();
1516 bool StackValue =
true;
1525 while (isa<Instruction>(V)) {
1526 const Instruction &VAsInst = *cast<const Instruction>(V);
1541 if (!AdditionalValues.
empty())
1551 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1552 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1560 assert(OrigV &&
"V shouldn't be null");
1565 << printDDI(OrigV, DDI) <<
"\n");
1582 unsigned Order,
bool IsVariadic) {
1587 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1592 for (
const Value *V : Values) {
1594 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1595 isa<ConstantPointerNull>(V)) {
1601 if (
auto *CE = dyn_cast<ConstantExpr>(V))
1602 if (CE->getOpcode() == Instruction::IntToPtr) {
1609 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1620 if (!
N.getNode() && isa<Argument>(V))
1621 N = UnusedArgNodeMap[V];
1626 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1627 FuncArgumentDbgValueKind::Value,
N))
1629 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
1654 bool IsParamOfFunc =
1664 unsigned Reg = VMI->second;
1668 V->getType(), std::nullopt);
1674 unsigned BitsToDescribe = 0;
1676 BitsToDescribe = *VarSize;
1678 BitsToDescribe = Fragment->SizeInBits;
1681 if (
Offset >= BitsToDescribe)
1684 unsigned RegisterSize = RegAndSize.second;
1685 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1686 ? BitsToDescribe -
Offset
1689 Expr,
Offset, FragmentSize);
1693 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1711 false, DbgLoc, Order, IsVariadic);
1718 for (
auto &Pair : DanglingDebugInfoMap)
1719 for (
auto &DDI : Pair.second)
1751 if (
N.getNode())
return N;
1793 if (
const Constant *
C = dyn_cast<Constant>(V)) {
1805 getValue(CPA->getAddrDiscriminator()),
1806 getValue(CPA->getDiscriminator()));
1809 if (isa<ConstantPointerNull>(
C)) {
1810 unsigned AS = V->getType()->getPointerAddressSpace();
1818 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
1821 if (isa<UndefValue>(
C) && !V->getType()->isAggregateType())
1825 visit(CE->getOpcode(), *CE);
1827 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1831 if (isa<ConstantStruct>(
C) || isa<ConstantArray>(
C)) {
1833 for (
const Use &U :
C->operands()) {
1839 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1840 Constants.push_back(
SDValue(Val, i));
1847 dyn_cast<ConstantDataSequential>(
C)) {
1849 for (
unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1853 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1857 if (isa<ArrayType>(CDS->getType()))
1862 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1863 assert((isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C)) &&
1864 "Unknown struct or array constant!");
1868 unsigned NumElts = ValueVTs.
size();
1872 for (
unsigned i = 0; i != NumElts; ++i) {
1873 EVT EltVT = ValueVTs[i];
1874 if (isa<UndefValue>(
C))
1888 if (
const auto *Equiv = dyn_cast<DSOLocalEquivalent>(
C))
1889 return getValue(Equiv->getGlobalValue());
1891 if (
const auto *
NC = dyn_cast<NoCFIValue>(
C))
1894 if (VT == MVT::aarch64svcount) {
1895 assert(
C->isNullValue() &&
"Can only zero this target type!");
1901 assert(
C->isNullValue() &&
"Can only zero this target type!");
1912 VectorType *VecTy = cast<VectorType>(V->getType());
1918 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1919 for (
unsigned i = 0; i != NumElements; ++i)
1925 if (isa<ConstantAggregateZero>(
C)) {
1943 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1952 if (
const Instruction *Inst = dyn_cast<Instruction>(V)) {
1956 Inst->getType(), std::nullopt);
1964 if (
const auto *BB = dyn_cast<BasicBlock>(V))
1970void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
1979 if (IsMSVCCXX || IsCoreCLR)
2006 Value *ParentPad =
I.getCatchSwitchParentPad();
2008 if (isa<ConstantTokenNone>(ParentPad))
2011 SuccessorColor = cast<Instruction>(ParentPad)->
getParent();
2012 assert(SuccessorColor &&
"No parent funclet for catchret!");
2014 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2023void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2067 if (isa<CleanupPadInst>(Pad)) {
2069 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2070 UnwindDests.back().first->setIsEHScopeEntry();
2072 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2075 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2076 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2077 UnwindDests.back().first->setIsEHScopeEntry();
2108 assert(UnwindDests.size() <= 1 &&
2109 "There should be at most one unwind destination for wasm");
2116 if (isa<LandingPadInst>(Pad)) {
2118 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2120 }
else if (isa<CleanupPadInst>(Pad)) {
2123 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2124 UnwindDests.
back().first->setIsEHScopeEntry();
2125 UnwindDests.back().first->setIsEHFuncletEntry();
2127 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2129 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2130 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2132 if (IsMSVCCXX || IsCoreCLR)
2133 UnwindDests.back().first->setIsEHFuncletEntry();
2135 UnwindDests.back().first->setIsEHScopeEntry();
2137 NewEHPadBB = CatchSwitch->getUnwindDest();
2143 if (BPI && NewEHPadBB)
2145 EHPadBB = NewEHPadBB;
2152 auto UnwindDest =
I.getUnwindDest();
2159 for (
auto &UnwindDest : UnwindDests) {
2160 UnwindDest.first->setIsEHPad();
2161 addSuccessorWithProb(
FuncInfo.
MBB, UnwindDest.first, UnwindDest.second);
2173void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2177void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2191 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2211 unsigned NumValues = ValueVTs.
size();
2214 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2215 for (
unsigned i = 0; i != NumValues; ++i) {
2222 if (MemVTs[i] != ValueVTs[i])
2232 MVT::Other, Chains);
2233 }
else if (
I.getNumOperands() != 0) {
2236 unsigned NumValues = ValueVTs.
size();
2240 const Function *
F =
I.getParent()->getParent();
2243 I.getOperand(0)->getType(),
F->getCallingConv(),
2247 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2249 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2253 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2255 for (
unsigned j = 0;
j != NumValues; ++
j) {
2256 EVT VT = ValueVTs[
j];
2268 &Parts[0], NumParts, PartVT, &
I,
CC, ExtendKind);
2275 if (
I.getOperand(0)->getType()->isPointerTy()) {
2277 Flags.setPointerAddrSpace(
2278 cast<PointerType>(
I.getOperand(0)->getType())->getAddressSpace());
2281 if (NeedsRegBlock) {
2282 Flags.setInConsecutiveRegs();
2283 if (j == NumValues - 1)
2284 Flags.setInConsecutiveRegsLast();
2292 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2295 for (
unsigned i = 0; i < NumParts; ++i) {
2308 const Function *
F =
I.getParent()->getParent();
2310 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2313 Flags.setSwiftError();
2332 "LowerReturn didn't return a valid chain!");
2343 if (V->getType()->isEmptyTy())
2348 assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2349 "Unused value assigned virtual registers!");
2359 if (!isa<Instruction>(V) && !isa<Argument>(V))
return;
2372 if (
const Instruction *VI = dyn_cast<Instruction>(V)) {
2374 if (VI->getParent() == FromBB)
2383 if (isa<Argument>(V)) {
2400 const BasicBlock *SrcBB = Src->getBasicBlock();
2401 const BasicBlock *DstBB = Dst->getBasicBlock();
2405 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2415 Src->addSuccessorWithoutProb(Dst);
2418 Prob = getEdgeProbability(Src, Dst);
2419 Src->addSuccessor(Dst, Prob);
2425 return I->getParent() == BB;
2445 if (
const CmpInst *BOp = dyn_cast<CmpInst>(
Cond)) {
2449 if (CurBB == SwitchBB ||
2455 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2460 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2466 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2468 SL->SwitchCases.push_back(CB);
2477 SL->SwitchCases.push_back(CB);
2485 unsigned Depth = 0) {
2490 auto *
I = dyn_cast<Instruction>(V);
2494 if (Necessary !=
nullptr) {
2497 if (Necessary->contains(
I))
2505 for (
unsigned OpIdx = 0, E =
I->getNumOperands(); OpIdx < E; ++OpIdx)
2516 if (
I.getNumSuccessors() != 2)
2519 if (!
I.isConditional())
2531 if (BPI !=
nullptr) {
2537 std::optional<bool> Likely;
2540 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2544 if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2556 if (CostThresh <= 0)
2570 if (
const auto *RhsI = dyn_cast<Instruction>(Rhs))
2581 Value *BrCond =
I.getCondition();
2582 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2583 for (
const auto *U : Ins->users()) {
2585 if (
auto *UIns = dyn_cast<Instruction>(U))
2586 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2599 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2601 for (
const auto &InsPair : RhsDeps) {
2602 if (!ShouldCountInsn(InsPair.first)) {
2603 ToDrop = InsPair.first;
2607 if (ToDrop ==
nullptr)
2609 RhsDeps.erase(ToDrop);
2612 for (
const auto &InsPair : RhsDeps) {
2620 if (CostOfIncluding > CostThresh)
2646 const Value *BOpOp0, *BOpOp1;
2660 if (BOpc == Instruction::And)
2661 BOpc = Instruction::Or;
2662 else if (BOpc == Instruction::Or)
2663 BOpc = Instruction::And;
2669 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->
hasOneUse();
2674 TProb, FProb, InvertCond);
2684 if (Opc == Instruction::Or) {
2705 auto NewTrueProb = TProb / 2;
2706 auto NewFalseProb = TProb / 2 + FProb;
2709 NewFalseProb, InvertCond);
2716 Probs[1], InvertCond);
2718 assert(Opc == Instruction::And &&
"Unknown merge op!");
2738 auto NewTrueProb = TProb + FProb / 2;
2739 auto NewFalseProb = FProb / 2;
2742 NewFalseProb, InvertCond);
2749 Probs[1], InvertCond);
2758 if (Cases.size() != 2)
return true;
2762 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2763 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2764 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2765 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2771 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2772 Cases[0].
CC == Cases[1].
CC &&
2773 isa<Constant>(Cases[0].CmpRHS) &&
2774 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2775 if (Cases[0].
CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2777 if (Cases[0].
CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2784void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2790 if (
I.isUnconditional()) {
2796 if (Succ0MBB != NextBlock(BrMBB) ||
2809 const Value *CondVal =
I.getCondition();
2829 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2830 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2834 const Value *BOp0, *BOp1;
2837 Opcode = Instruction::And;
2839 Opcode = Instruction::Or;
2847 Opcode, BOp0, BOp1))) {
2849 getEdgeProbability(BrMBB, Succ0MBB),
2850 getEdgeProbability(BrMBB, Succ1MBB),
2855 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2859 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2866 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2872 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2875 SL->SwitchCases.clear();
2881 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2902 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2944 if (cast<ConstantInt>(CB.
CmpLHS)->isMinValue(
true)) {
2965 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2990 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2991 assert(JT.Reg &&
"Should lower JT Header first!");
2996 Index.getValue(1), Table, Index);
3005 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3006 const SDLoc &dl = *JT.SL;
3027 JT.Reg = JumpTableReg;
3039 MVT::Other, CopyTo, CMP,
3043 if (JT.MBB != NextBlock(SwitchBB))
3050 if (JT.MBB != NextBlock(SwitchBB))
3078 if (PtrTy != PtrMemTy)
3126 Entry.Node = GuardVal;
3128 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3129 Entry.IsInReg =
true;
3130 Args.push_back(Entry);
3136 getValue(GuardCheckFn), std::move(Args));
3138 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3152 Guard =
DAG.
getLoad(PtrMemTy, dl, Chain, GuardPtr,
3214 bool UsePtrType =
false;
3218 for (
unsigned i = 0, e =
B.Cases.size(); i != e; ++i)
3238 if (!
B.FallthroughUnreachable)
3239 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3240 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3244 if (!
B.FallthroughUnreachable) {
3257 if (
MBB != NextBlock(SwitchBB))
3275 if (PopCount == 1) {
3282 }
else if (PopCount == BB.
Range) {
3301 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3303 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3314 if (NextMBB != NextBlock(SwitchBB))
3321void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3332 assert(!
I.hasOperandBundlesOtherThan(
3333 {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3334 LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3335 LLVMContext::OB_cfguardtarget, LLVMContext::OB_ptrauth,
3336 LLVMContext::OB_clang_arc_attachedcall}) &&
3337 "Cannot lower invokes with arbitrary operand bundles yet!");
3339 const Value *Callee(
I.getCalledOperand());
3340 const Function *Fn = dyn_cast<Function>(Callee);
3341 if (isa<InlineAsm>(Callee))
3342 visitInlineAsm(
I, EHPadBB);
3347 case Intrinsic::donothing:
3349 case Intrinsic::seh_try_begin:
3350 case Intrinsic::seh_scope_begin:
3351 case Intrinsic::seh_try_end:
3352 case Intrinsic::seh_scope_end:
3358 case Intrinsic::experimental_patchpoint_void:
3359 case Intrinsic::experimental_patchpoint:
3360 visitPatchpoint(
I, EHPadBB);
3362 case Intrinsic::experimental_gc_statepoint:
3365 case Intrinsic::wasm_rethrow: {
3380 }
else if (
I.hasDeoptState()) {
3396 if (!isa<GCStatepointInst>(
I)) {
3408 addSuccessorWithProb(InvokeMBB, Return);
3409 for (
auto &UnwindDest : UnwindDests) {
3410 UnwindDest.first->setIsEHPad();
3411 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3420void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3425 assert(!
I.hasOperandBundlesOtherThan(
3426 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3427 "Cannot lower callbrs with arbitrary operand bundles yet!");
3429 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3435 Dests.
insert(
I.getDefaultDest());
3440 for (
unsigned i = 0, e =
I.getNumIndirectDests(); i < e; ++i) {
3443 Target->setIsInlineAsmBrIndirectTarget();
3444 Target->setMachineBlockAddressTaken();
3445 Target->setLabelMustBeEmitted();
3447 if (Dests.
insert(Dest).second)
3458void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3459 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3462void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3464 "Call to landingpad not in landing pad!");
3484 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3514 if (JTB.first.HeaderBB ==
First)
3515 JTB.first.HeaderBB =
Last;
3528 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3530 bool Inserted =
Done.insert(BB).second;
3535 addSuccessorWithProb(IndirectBrMBB, Succ);
3549 if (
const CallInst *Call = dyn_cast_or_null<CallInst>(
I.getPrevNode());
3550 Call &&
Call->doesNotReturn()) {
3554 if (
Call->isNonContinuableTrap())
3561void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3563 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3564 Flags.copyFMF(*FPOp);
3572void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3574 if (
auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&
I)) {
3575 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3576 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3578 if (
auto *ExactOp = dyn_cast<PossiblyExactOperator>(&
I))
3579 Flags.setExact(ExactOp->isExact());
3580 if (
auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&
I))
3581 Flags.setDisjoint(DisjointOp->isDisjoint());
3582 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3583 Flags.copyFMF(*FPOp);
3592void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3601 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3603 "Unexpected shift type");
3614 dyn_cast<const OverflowingBinaryOperator>(&
I)) {
3615 nuw = OFBinOp->hasNoUnsignedWrap();
3616 nsw = OFBinOp->hasNoSignedWrap();
3619 dyn_cast<const PossiblyExactOperator>(&
I))
3620 exact = ExactOp->isExact();
3623 Flags.setExact(exact);
3624 Flags.setNoSignedWrap(nsw);
3625 Flags.setNoUnsignedWrap(nuw);
3631void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3636 Flags.setExact(isa<PossiblyExactOperator>(&
I) &&
3637 cast<PossiblyExactOperator>(&
I)->isExact());
3642void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3661 Flags.setSameSign(
I.hasSameSign());
3669void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3675 auto *FPMO = cast<FPMathOperator>(&
I);
3680 Flags.copyFMF(*FPMO);
3692 return isa<SelectInst>(V);
3696void SelectionDAGBuilder::visitSelect(
const User &
I) {
3700 unsigned NumValues = ValueVTs.
size();
3701 if (NumValues == 0)
return;
3711 bool IsUnaryAbs =
false;
3712 bool Negate =
false;
3715 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3716 Flags.copyFMF(*FPOp);
3718 Flags.setUnpredictable(
3719 cast<SelectInst>(
I).getMetadata(LLVMContext::MD_unpredictable));
3723 EVT VT = ValueVTs[0];
3735 bool UseScalarMinMax = VT.
isVector() &&
3744 switch (SPR.Flavor) {
3750 switch (SPR.NaNBehavior) {
3763 switch (SPR.NaNBehavior) {
3807 for (
unsigned i = 0; i != NumValues; ++i) {
3816 for (
unsigned i = 0; i != NumValues; ++i) {
3830void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3836 if (
auto *Trunc = dyn_cast<TruncInst>(&
I)) {
3837 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3838 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3844void SelectionDAGBuilder::visitZExt(
const User &
I) {
3852 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3853 Flags.setNonNeg(PNI->hasNonNeg());
3858 if (
Flags.hasNonNeg() &&
3867void SelectionDAGBuilder::visitSExt(
const User &
I) {
3876void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3887void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3895void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3903void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3911void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
3917 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3918 Flags.setNonNeg(PNI->hasNonNeg());
3923void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
3931void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
3945void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
3957void SelectionDAGBuilder::visitBitCast(
const User &
I) {
3965 if (DestVT !=
N.getValueType())
3972 else if(
ConstantInt *
C = dyn_cast<ConstantInt>(
I.getOperand(0)))
3979void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
3981 const Value *SV =
I.getOperand(0);
3986 unsigned DestAS =
I.getType()->getPointerAddressSpace();
3994void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4002 InVec, InVal, InIdx));
4005void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4015void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4019 if (
auto *SVI = dyn_cast<ShuffleVectorInst>(&
I))
4020 Mask = SVI->getShuffleMask();
4022 Mask = cast<ConstantExpr>(
I).getShuffleMask();
4028 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4044 unsigned MaskNumElts =
Mask.size();
4046 if (SrcNumElts == MaskNumElts) {
4052 if (SrcNumElts < MaskNumElts) {
4056 if (MaskNumElts % SrcNumElts == 0) {
4060 unsigned NumConcat = MaskNumElts / SrcNumElts;
4061 bool IsConcat =
true;
4063 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4069 if ((
Idx % SrcNumElts != (i % SrcNumElts)) ||
4070 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4071 ConcatSrcs[i / SrcNumElts] != (
int)(
Idx / SrcNumElts))) {
4076 ConcatSrcs[i / SrcNumElts] =
Idx / SrcNumElts;
4083 for (
auto Src : ConcatSrcs) {
4096 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4097 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4114 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4116 if (
Idx >= (
int)SrcNumElts)
4117 Idx -= SrcNumElts - PaddedMaskNumElts;
4125 if (MaskNumElts != PaddedMaskNumElts)
4133 assert(SrcNumElts > MaskNumElts);
4137 int StartIdx[2] = {-1, -1};
4138 bool CanExtract =
true;
4139 for (
int Idx : Mask) {
4144 if (
Idx >= (
int)SrcNumElts) {
4153 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4154 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4158 StartIdx[Input] = NewStartIdx;
4161 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4167 for (
unsigned Input = 0; Input < 2; ++Input) {
4168 SDValue &Src = Input == 0 ? Src1 : Src2;
4169 if (StartIdx[Input] < 0)
4179 for (
int &
Idx : MappedOps) {
4180 if (
Idx >= (
int)SrcNumElts)
4181 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4195 for (
int Idx : Mask) {
4201 SDValue &Src =
Idx < (int)SrcNumElts ? Src1 : Src2;
4202 if (
Idx >= (
int)SrcNumElts)
Idx -= SrcNumElts;
4216 const Value *Op0 =
I.getOperand(0);
4217 const Value *Op1 =
I.getOperand(1);
4218 Type *AggTy =
I.getType();
4220 bool IntoUndef = isa<UndefValue>(Op0);
4221 bool FromUndef = isa<UndefValue>(Op1);
4231 unsigned NumAggValues = AggValueVTs.
size();
4232 unsigned NumValValues = ValValueVTs.
size();
4236 if (!NumAggValues) {
4244 for (; i != LinearIndex; ++i)
4245 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4250 for (; i != LinearIndex + NumValValues; ++i)