79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsAMDGPU.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
114using namespace PatternMatch;
115using namespace SwitchCG;
117#define DEBUG_TYPE "isel"
125 cl::desc(
"Insert the experimental `assertalign` node."),
130 cl::desc(
"Generate low-precision inline sequences "
131 "for some float libcalls"),
137 cl::desc(
"Set the case probability threshold for peeling the case from a "
138 "switch statement. A value greater than 100 will void this "
158 const SDValue *Parts,
unsigned NumParts,
161 std::optional<CallingConv::ID>
CC);
170 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
172 std::optional<CallingConv::ID>
CC = std::nullopt,
173 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
177 PartVT, ValueVT,
CC))
184 assert(NumParts > 0 &&
"No parts to assemble!");
195 unsigned RoundBits = PartBits * RoundParts;
196 EVT RoundVT = RoundBits == ValueBits ?
202 if (RoundParts > 2) {
206 PartVT, HalfVT, V, InChain);
217 if (RoundParts < NumParts) {
219 unsigned OddParts = NumParts - RoundParts;
222 OddVT, V, InChain,
CC);
239 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
250 !PartVT.
isVector() &&
"Unexpected split");
262 if (PartEVT == ValueVT)
266 ValueVT.
bitsLT(PartEVT)) {
279 if (ValueVT.
bitsLT(PartEVT)) {
284 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
299 llvm::Attribute::StrictFP)) {
301 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
313 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
314 ValueVT.
bitsLT(PartEVT)) {
323 const Twine &ErrMsg) {
324 const Instruction *
I = dyn_cast_or_null<Instruction>(V);
328 const char *AsmError =
", possible invalid constraint for vector type";
329 if (
const CallInst *CI = dyn_cast<CallInst>(
I))
330 if (CI->isInlineAsm())
342 const SDValue *Parts,
unsigned NumParts,
345 std::optional<CallingConv::ID> CallConv) {
347 assert(NumParts > 0 &&
"No parts to assemble!");
348 const bool IsABIRegCopy = CallConv.has_value();
357 unsigned NumIntermediates;
362 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
363 NumIntermediates, RegisterVT);
367 NumIntermediates, RegisterVT);
370 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
372 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
375 "Part type sizes don't match!");
379 if (NumIntermediates == NumParts) {
382 for (
unsigned i = 0; i != NumParts; ++i)
384 V, InChain, CallConv);
385 }
else if (NumParts > 0) {
388 assert(NumParts % NumIntermediates == 0 &&
389 "Must expand into a divisible number of parts!");
390 unsigned Factor = NumParts / NumIntermediates;
391 for (
unsigned i = 0; i != NumIntermediates; ++i)
393 IntermediateVT, V, InChain, CallConv);
408 DL, BuiltVectorTy, Ops);
414 if (PartEVT == ValueVT)
430 "Cannot narrow, it would be a lossy transformation");
436 if (PartEVT == ValueVT)
461 }
else if (ValueVT.
bitsLT(PartEVT)) {
470 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
501 std::optional<CallingConv::ID> CallConv);
508 unsigned NumParts,
MVT PartVT,
const Value *V,
509 std::optional<CallingConv::ID> CallConv = std::nullopt,
523 unsigned OrigNumParts = NumParts;
525 "Copying to an illegal type!");
531 EVT PartEVT = PartVT;
532 if (PartEVT == ValueVT) {
533 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
542 assert(NumParts == 1 &&
"Do not know what to promote to!");
553 "Unknown mismatch!");
555 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
556 if (PartVT == MVT::x86mmx)
561 assert(NumParts == 1 && PartEVT != ValueVT);
567 "Unknown mismatch!");
570 if (PartVT == MVT::x86mmx)
577 "Failed to tile the value with PartVT!");
580 if (PartEVT != ValueVT) {
582 "scalar-to-vector conversion failed");
591 if (NumParts & (NumParts - 1)) {
594 "Do not know what to expand to!");
596 unsigned RoundBits = RoundParts * PartBits;
597 unsigned OddParts = NumParts - RoundParts;
606 std::reverse(Parts + RoundParts, Parts + NumParts);
608 NumParts = RoundParts;
620 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
621 for (
unsigned i = 0; i < NumParts; i += StepSize) {
622 unsigned ThisBits = StepSize * PartBits / 2;
625 SDValue &Part1 = Parts[i+StepSize/2];
632 if (ThisBits == PartBits && ThisVT != PartVT) {
640 std::reverse(Parts, Parts + OrigNumParts);
657 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
662 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
664 "Cannot widen to illegal type");
667 }
else if (PartEVT != ValueEVT) {
682 Ops.
append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
693 std::optional<CallingConv::ID> CallConv) {
697 const bool IsABIRegCopy = CallConv.has_value();
700 EVT PartEVT = PartVT;
701 if (PartEVT == ValueVT) {
720 TargetLowering::TypeWidenVector) {
747 "lossy conversion of vector to scalar type");
762 unsigned NumIntermediates;
766 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
771 NumIntermediates, RegisterVT);
774 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
776 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
779 "Mixing scalable and fixed vectors when copying in parts");
781 std::optional<ElementCount> DestEltCnt;
791 if (ValueVT == BuiltVectorTy) {
815 for (
unsigned i = 0; i != NumIntermediates; ++i) {
830 if (NumParts == NumIntermediates) {
833 for (
unsigned i = 0; i != NumParts; ++i)
835 }
else if (NumParts > 0) {
838 assert(NumIntermediates != 0 &&
"division by zero");
839 assert(NumParts % NumIntermediates == 0 &&
840 "Must expand into a divisible number of parts!");
841 unsigned Factor = NumParts / NumIntermediates;
842 for (
unsigned i = 0; i != NumIntermediates; ++i)
849 EVT valuevt, std::optional<CallingConv::ID>
CC)
850 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
851 RegCount(1, regs.
size()), CallConv(
CC) {}
855 std::optional<CallingConv::ID>
CC) {
869 for (
unsigned i = 0; i != NumRegs; ++i)
871 RegVTs.push_back(RegisterVT);
900 for (
unsigned i = 0; i != NumRegs; ++i) {
906 *Glue =
P.getValue(2);
909 Chain =
P.getValue(1);
938 EVT FromVT(MVT::Other);
942 }
else if (NumSignBits > 1) {
950 assert(FromVT != MVT::Other);
956 RegisterVT, ValueVT, V, Chain,
CallConv);
986 NumParts, RegisterVT, V,
CallConv, ExtendKind);
992 for (
unsigned i = 0; i != NumRegs; ++i) {
1004 if (NumRegs == 1 || Glue)
1015 Chain = Chains[NumRegs-1];
1021 unsigned MatchingIdx,
const SDLoc &dl,
1023 std::vector<SDValue> &Ops)
const {
1028 Flag.setMatchingOp(MatchingIdx);
1037 Flag.setRegClass(RC->
getID());
1048 "No 1:1 mapping from clobbers to regs?");
1051 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1056 "If we clobbered the stack pointer, MFI should know about it.");
1065 for (
unsigned i = 0; i != NumRegs; ++i) {
1067 unsigned TheReg =
Regs[Reg++];
1078 unsigned RegCount = std::get<0>(CountAndVT);
1079 MVT RegisterVT = std::get<1>(CountAndVT);
1103 UnusedArgNodeMap.clear();
1105 PendingExports.clear();
1106 PendingConstrainedFP.clear();
1107 PendingConstrainedFPStrict.clear();
1115 DanglingDebugInfoMap.clear();
1122 if (Pending.
empty())
1128 unsigned i = 0, e = Pending.
size();
1129 for (; i != e; ++i) {
1131 if (Pending[i].
getNode()->getOperand(0) == Root)
1139 if (Pending.
size() == 1)
1158 PendingConstrainedFP.size() +
1159 PendingConstrainedFPStrict.size());
1161 PendingConstrainedFP.end());
1162 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1163 PendingConstrainedFPStrict.end());
1164 PendingConstrainedFP.clear();
1165 PendingConstrainedFPStrict.clear();
1172 PendingExports.append(PendingConstrainedFPStrict.begin(),
1173 PendingConstrainedFPStrict.end());
1174 PendingConstrainedFPStrict.clear();
1175 return updateRoot(PendingExports);
1182 assert(Variable &&
"Missing variable");
1189 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1196 if (!
N.getNode() && isa<Argument>(
Address))
1204 auto *FINode = dyn_cast<FrameIndexSDNode>(
N.getNode());
1205 if (IsParameter && FINode) {
1208 true,
DL, SDNodeOrder);
1209 }
else if (isa<Argument>(
Address)) {
1213 FuncArgumentDbgValueKind::Declare,
N);
1217 true,
DL, SDNodeOrder);
1224 FuncArgumentDbgValueKind::Declare,
N)) {
1226 <<
" (could not emit func-arg dbg_value)\n");
1238 for (
auto It = FnVarLocs->locs_begin(&
I),
End = FnVarLocs->locs_end(&
I);
1240 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1242 if (It->Values.isKillLocation(It->Expr)) {
1248 It->Values.hasArgList())) {
1251 FnVarLocs->getDILocalVariable(It->VariableID),
1252 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1268 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1270 assert(DLR->getLabel() &&
"Missing label");
1272 DAG.
getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1277 if (SkipDbgVariableRecords)
1287 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1296 if (Values.
empty()) {
1305 [](
Value *V) {
return !V || isa<UndefValue>(V); })) {
1313 SDNodeOrder, IsVariadic)) {
1324 if (
I.isTerminator()) {
1325 HandlePHINodesInSuccessorBlocks(
I.getParent());
1329 if (!isa<DbgInfoIntrinsic>(
I))
1335 bool NodeInserted =
false;
1336 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1337 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1338 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1339 if (PCSectionsMD || MMRA) {
1340 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1341 DAG, [&](
SDNode *) { NodeInserted =
true; });
1347 !isa<GCStatepointInst>(
I))
1351 if (PCSectionsMD || MMRA) {
1352 auto It = NodeMap.find(&
I);
1353 if (It != NodeMap.end()) {
1358 }
else if (NodeInserted) {
1361 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1362 <<
I.getModule()->getName() <<
"]\n";
1371void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1381#define HANDLE_INST(NUM, OPCODE, CLASS) \
1382 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1383#include "llvm/IR/Instruction.def"
1395 for (
const Value *V : Values) {
1420 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1425 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1426 DIVariable *DanglingVariable = DDI.getVariable();
1428 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1430 << printDDI(
nullptr, DDI) <<
"\n");
1436 for (
auto &DDIMI : DanglingDebugInfoMap) {
1437 DanglingDebugInfoVector &DDIV = DDIMI.second;
1441 for (
auto &DDI : DDIV)
1442 if (isMatchingDbgValue(DDI))
1445 erase_if(DDIV, isMatchingDbgValue);
1453 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1454 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1457 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1458 for (
auto &DDI : DDIV) {
1461 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1465 "Expected inlined-at fields to agree");
1474 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1475 FuncArgumentDbgValueKind::Value, Val)) {
1477 << printDDI(V, DDI) <<
"\n");
1484 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1485 << ValSDNodeOrder <<
"\n");
1486 SDV = getDbgValue(Val, Variable, Expr,
DL,
1487 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1492 <<
" in EmitFuncArgumentDbgValue\n");
1494 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1506 DanglingDebugInfo &DDI) {
1511 const Value *OrigV = V;
1515 unsigned SDOrder = DDI.getSDNodeOrder();
1519 bool StackValue =
true;
1528 while (isa<Instruction>(V)) {
1529 const Instruction &VAsInst = *cast<const Instruction>(V);
1544 if (!AdditionalValues.
empty())
1554 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1555 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1563 assert(OrigV &&
"V shouldn't be null");
1568 << printDDI(OrigV, DDI) <<
"\n");
1585 unsigned Order,
bool IsVariadic) {
1590 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1595 for (
const Value *V : Values) {
1597 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1598 isa<ConstantPointerNull>(V)) {
1604 if (
auto *CE = dyn_cast<ConstantExpr>(V))
1605 if (CE->getOpcode() == Instruction::IntToPtr) {
1612 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1623 if (!
N.getNode() && isa<Argument>(V))
1624 N = UnusedArgNodeMap[V];
1628 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1629 FuncArgumentDbgValueKind::Value,
N))
1631 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
1656 bool IsParamOfFunc =
1666 unsigned Reg = VMI->second;
1670 V->getType(), std::nullopt);
1676 unsigned BitsToDescribe = 0;
1678 BitsToDescribe = *VarSize;
1680 BitsToDescribe = Fragment->SizeInBits;
1683 if (
Offset >= BitsToDescribe)
1686 unsigned RegisterSize = RegAndSize.second;
1687 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1688 ? BitsToDescribe -
Offset
1691 Expr,
Offset, FragmentSize);
1695 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1713 false, DbgLoc, Order, IsVariadic);
1720 for (
auto &Pair : DanglingDebugInfoMap)
1721 for (
auto &DDI : Pair.second)
1753 if (
N.getNode())
return N;
1795 if (
const Constant *
C = dyn_cast<Constant>(V)) {
1807 getValue(CPA->getAddrDiscriminator()),
1808 getValue(CPA->getDiscriminator()));
1811 if (isa<ConstantPointerNull>(
C)) {
1812 unsigned AS = V->getType()->getPointerAddressSpace();
1820 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
1823 if (isa<UndefValue>(
C) && !V->getType()->isAggregateType())
1827 visit(CE->getOpcode(), *CE);
1829 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1833 if (isa<ConstantStruct>(
C) || isa<ConstantArray>(
C)) {
1835 for (
const Use &U :
C->operands()) {
1841 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1849 dyn_cast<ConstantDataSequential>(
C)) {
1851 for (
unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1855 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1859 if (isa<ArrayType>(CDS->getType()))
1864 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1865 assert((isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C)) &&
1866 "Unknown struct or array constant!");
1870 unsigned NumElts = ValueVTs.
size();
1874 for (
unsigned i = 0; i != NumElts; ++i) {
1875 EVT EltVT = ValueVTs[i];
1876 if (isa<UndefValue>(
C))
1890 if (
const auto *Equiv = dyn_cast<DSOLocalEquivalent>(
C))
1891 return getValue(Equiv->getGlobalValue());
1893 if (
const auto *
NC = dyn_cast<NoCFIValue>(
C))
1896 if (VT == MVT::aarch64svcount) {
1897 assert(
C->isNullValue() &&
"Can only zero this target type!");
1902 VectorType *VecTy = cast<VectorType>(V->getType());
1908 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1909 for (
unsigned i = 0; i != NumElements; ++i)
1915 if (isa<ConstantAggregateZero>(
C)) {
1933 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1942 if (
const Instruction *Inst = dyn_cast<Instruction>(V)) {
1946 Inst->getType(), std::nullopt);
1954 if (
const auto *BB = dyn_cast<BasicBlock>(V))
1960void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
1969 if (IsMSVCCXX || IsCoreCLR)
1996 Value *ParentPad =
I.getCatchSwitchParentPad();
1998 if (isa<ConstantTokenNone>(ParentPad))
2001 SuccessorColor = cast<Instruction>(ParentPad)->
getParent();
2002 assert(SuccessorColor &&
"No parent funclet for catchret!");
2004 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2013void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2057 if (isa<CleanupPadInst>(Pad)) {
2059 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2060 UnwindDests.back().first->setIsEHScopeEntry();
2062 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2065 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2066 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2067 UnwindDests.back().first->setIsEHScopeEntry();
2098 assert(UnwindDests.size() <= 1 &&
2099 "There should be at most one unwind destination for wasm");
2106 if (isa<LandingPadInst>(Pad)) {
2108 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2110 }
else if (isa<CleanupPadInst>(Pad)) {
2113 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2114 UnwindDests.
back().first->setIsEHScopeEntry();
2115 UnwindDests.back().first->setIsEHFuncletEntry();
2117 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2119 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2120 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2122 if (IsMSVCCXX || IsCoreCLR)
2123 UnwindDests.back().first->setIsEHFuncletEntry();
2125 UnwindDests.back().first->setIsEHScopeEntry();
2127 NewEHPadBB = CatchSwitch->getUnwindDest();
2133 if (BPI && NewEHPadBB)
2135 EHPadBB = NewEHPadBB;
2142 auto UnwindDest =
I.getUnwindDest();
2149 for (
auto &UnwindDest : UnwindDests) {
2150 UnwindDest.first->setIsEHPad();
2151 addSuccessorWithProb(
FuncInfo.
MBB, UnwindDest.first, UnwindDest.second);
2161void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2165void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2179 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2186 const Function *
F =
I.getParent()->getParent();
2205 unsigned NumValues = ValueVTs.
size();
2208 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2209 for (
unsigned i = 0; i != NumValues; ++i) {
2216 if (MemVTs[i] != ValueVTs[i])
2226 MVT::Other, Chains);
2227 }
else if (
I.getNumOperands() != 0) {
2230 unsigned NumValues = ValueVTs.
size();
2234 const Function *
F =
I.getParent()->getParent();
2237 I.getOperand(0)->getType(),
F->getCallingConv(),
2241 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2243 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2247 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2249 for (
unsigned j = 0;
j != NumValues; ++
j) {
2250 EVT VT = ValueVTs[
j];
2262 &Parts[0], NumParts, PartVT, &
I,
CC, ExtendKind);
2269 if (
I.getOperand(0)->getType()->isPointerTy()) {
2271 Flags.setPointerAddrSpace(
2272 cast<PointerType>(
I.getOperand(0)->getType())->getAddressSpace());
2275 if (NeedsRegBlock) {
2276 Flags.setInConsecutiveRegs();
2277 if (j == NumValues - 1)
2278 Flags.setInConsecutiveRegsLast();
2287 for (
unsigned i = 0; i < NumParts; ++i) {
2300 const Function *
F =
I.getParent()->getParent();
2302 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2305 Flags.setSwiftError();
2324 "LowerReturn didn't return a valid chain!");
2335 if (V->getType()->isEmptyTy())
2340 assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2341 "Unused value assigned virtual registers!");
2351 if (!isa<Instruction>(V) && !isa<Argument>(V))
return;
2364 if (
const Instruction *VI = dyn_cast<Instruction>(V)) {
2366 if (VI->getParent() == FromBB)
2375 if (isa<Argument>(V)) {
2392 const BasicBlock *SrcBB = Src->getBasicBlock();
2393 const BasicBlock *DstBB = Dst->getBasicBlock();
2397 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2407 Src->addSuccessorWithoutProb(Dst);
2410 Prob = getEdgeProbability(Src, Dst);
2411 Src->addSuccessor(Dst, Prob);
2417 return I->getParent() == BB;
2437 if (
const CmpInst *BOp = dyn_cast<CmpInst>(
Cond)) {
2441 if (CurBB == SwitchBB ||
2447 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2452 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2458 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2460 SL->SwitchCases.push_back(CB);
2469 SL->SwitchCases.push_back(CB);
2477 unsigned Depth = 0) {
2482 auto *
I = dyn_cast<Instruction>(V);
2486 if (Necessary !=
nullptr) {
2489 if (Necessary->contains(
I))
2497 for (
unsigned OpIdx = 0, E =
I->getNumOperands(); OpIdx < E; ++OpIdx)
2508 if (
I.getNumSuccessors() != 2)
2511 if (!
I.isConditional())
2523 if (BPI !=
nullptr) {
2529 std::optional<bool> Likely;
2532 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2536 if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2548 if (CostThresh <= 0)
2562 if (
const auto *RhsI = dyn_cast<Instruction>(Rhs))
2573 Value *BrCond =
I.getCondition();
2574 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2575 for (
const auto *U : Ins->users()) {
2577 if (
auto *UIns = dyn_cast<Instruction>(U))
2578 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2591 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2593 for (
const auto &InsPair : RhsDeps) {
2594 if (!ShouldCountInsn(InsPair.first)) {
2595 ToDrop = InsPair.first;
2599 if (ToDrop ==
nullptr)
2601 RhsDeps.erase(ToDrop);
2604 for (
const auto &InsPair : RhsDeps) {
2612 if (CostOfIncluding > CostThresh)
2638 const Value *BOpOp0, *BOpOp1;
2652 if (BOpc == Instruction::And)
2653 BOpc = Instruction::Or;
2654 else if (BOpc == Instruction::Or)
2655 BOpc = Instruction::And;
2661 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->
hasOneUse();
2666 TProb, FProb, InvertCond);
2676 if (Opc == Instruction::Or) {
2697 auto NewTrueProb = TProb / 2;
2698 auto NewFalseProb = TProb / 2 + FProb;
2701 NewFalseProb, InvertCond);
2708 Probs[1], InvertCond);
2710 assert(Opc == Instruction::And &&
"Unknown merge op!");
2730 auto NewTrueProb = TProb + FProb / 2;
2731 auto NewFalseProb = FProb / 2;
2734 NewFalseProb, InvertCond);
2741 Probs[1], InvertCond);
2750 if (Cases.size() != 2)
return true;
2754 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2755 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2756 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2757 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2763 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2764 Cases[0].
CC == Cases[1].
CC &&
2765 isa<Constant>(Cases[0].CmpRHS) &&
2766 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2767 if (Cases[0].
CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2769 if (Cases[0].
CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2776void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2782 if (
I.isUnconditional()) {
2788 if (Succ0MBB != NextBlock(BrMBB) ||
2801 const Value *CondVal =
I.getCondition();
2821 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2823 BOp->
hasOneUse() && !
I.hasMetadata(LLVMContext::MD_unpredictable)) {
2825 const Value *BOp0, *BOp1;
2828 Opcode = Instruction::And;
2830 Opcode = Instruction::Or;
2838 Opcode, BOp0, BOp1))) {
2840 getEdgeProbability(BrMBB, Succ0MBB),
2841 getEdgeProbability(BrMBB, Succ1MBB),
2846 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2850 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2857 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2863 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2866 SL->SwitchCases.clear();
2872 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc());
2891 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2933 if (cast<ConstantInt>(CB.
CmpLHS)->isMinValue(
true)) {
2954 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2978 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2979 assert(JT.Reg != -1U &&
"Should lower JT Header first!");
2993 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2994 const SDLoc &dl = *JT.SL;
3011 unsigned JumpTableReg =
3015 JT.Reg = JumpTableReg;
3027 MVT::Other, CopyTo, CMP,
3031 if (JT.MBB != NextBlock(SwitchBB))
3038 if (JT.MBB != NextBlock(SwitchBB))
3066 if (PtrTy != PtrMemTy)
3114 Entry.Node = GuardVal;
3116 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3117 Entry.IsInReg =
true;
3118 Args.push_back(Entry);
3124 getValue(GuardCheckFn), std::move(Args));
3126 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3140 Guard =
DAG.
getLoad(PtrMemTy, dl, Chain, GuardPtr,
3177 TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3208 bool UsePtrType =
false;
3212 for (
unsigned i = 0, e =
B.Cases.size(); i != e; ++i)
3232 if (!
B.FallthroughUnreachable)
3233 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3234 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3238 if (!
B.FallthroughUnreachable) {
3251 if (
MBB != NextBlock(SwitchBB))
3270 if (PopCount == 1) {
3277 }
else if (PopCount == BB.
Range) {
3296 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3298 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3309 if (NextMBB != NextBlock(SwitchBB))
3316void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3327 assert(!
I.hasOperandBundlesOtherThan(
3328 {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3329 LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3330 LLVMContext::OB_cfguardtarget, LLVMContext::OB_ptrauth,
3331 LLVMContext::OB_clang_arc_attachedcall}) &&
3332 "Cannot lower invokes with arbitrary operand bundles yet!");
3334 const Value *Callee(
I.getCalledOperand());
3335 const Function *Fn = dyn_cast<Function>(Callee);
3336 if (isa<InlineAsm>(Callee))
3337 visitInlineAsm(
I, EHPadBB);
3342 case Intrinsic::donothing:
3344 case Intrinsic::seh_try_begin:
3345 case Intrinsic::seh_scope_begin:
3346 case Intrinsic::seh_try_end:
3347 case Intrinsic::seh_scope_end:
3353 case Intrinsic::experimental_patchpoint_void:
3354 case Intrinsic::experimental_patchpoint:
3355 visitPatchpoint(
I, EHPadBB);
3357 case Intrinsic::experimental_gc_statepoint:
3360 case Intrinsic::wasm_rethrow: {
3375 }
else if (
I.hasDeoptState()) {
3391 if (!isa<GCStatepointInst>(
I)) {
3403 addSuccessorWithProb(InvokeMBB, Return);
3404 for (
auto &UnwindDest : UnwindDests) {
3405 UnwindDest.first->setIsEHPad();
3406 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3415void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3420 assert(!
I.hasOperandBundlesOtherThan(
3421 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3422 "Cannot lower callbrs with arbitrary operand bundles yet!");
3424 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3430 Dests.
insert(
I.getDefaultDest());
3435 for (
unsigned i = 0, e =
I.getNumIndirectDests(); i < e; ++i) {
3438 Target->setIsInlineAsmBrIndirectTarget();
3439 Target->setMachineBlockAddressTaken();
3440 Target->setLabelMustBeEmitted();
3442 if (Dests.
insert(Dest).second)
3453void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3454 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3457void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3459 "Call to landingpad not in landing pad!");
3479 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3509 if (JTB.first.HeaderBB ==
First)
3510 JTB.first.HeaderBB =
Last;
3523 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3525 bool Inserted =
Done.insert(BB).second;
3530 addSuccessorWithProb(IndirectBrMBB, Succ);
3544 if (
const CallInst *Call = dyn_cast_or_null<CallInst>(
I.getPrevNode());
3545 Call &&
Call->doesNotReturn()) {
3549 if (
Call->isNonContinuableTrap())
3556void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3558 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3559 Flags.copyFMF(*FPOp);
3567void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3569 if (
auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&
I)) {
3570 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3571 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3573 if (
auto *ExactOp = dyn_cast<PossiblyExactOperator>(&
I))
3574 Flags.setExact(ExactOp->isExact());
3575 if (
auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&
I))
3576 Flags.setDisjoint(DisjointOp->isDisjoint());
3577 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3578 Flags.copyFMF(*FPOp);
3587void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3596 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3598 "Unexpected shift type");
3609 dyn_cast<const OverflowingBinaryOperator>(&
I)) {
3610 nuw = OFBinOp->hasNoUnsignedWrap();
3611 nsw = OFBinOp->hasNoSignedWrap();
3614 dyn_cast<const PossiblyExactOperator>(&
I))
3615 exact = ExactOp->isExact();
3618 Flags.setExact(exact);
3619 Flags.setNoSignedWrap(nsw);
3620 Flags.setNoUnsignedWrap(nuw);
3626void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3631 Flags.setExact(isa<PossiblyExactOperator>(&
I) &&
3632 cast<PossiblyExactOperator>(&
I)->isExact());
3637void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3660void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3666 auto *FPMO = cast<FPMathOperator>(&
I);
3671 Flags.copyFMF(*FPMO);
3683 return isa<SelectInst>(V);
3687void SelectionDAGBuilder::visitSelect(
const User &
I) {
3691 unsigned NumValues = ValueVTs.
size();
3692 if (NumValues == 0)
return;
3702 bool IsUnaryAbs =
false;
3703 bool Negate =
false;
3706 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3707 Flags.copyFMF(*FPOp);
3709 Flags.setUnpredictable(
3710 cast<SelectInst>(
I).getMetadata(LLVMContext::MD_unpredictable));
3714 EVT VT = ValueVTs[0];
3726 bool UseScalarMinMax = VT.
isVector() &&
3735 switch (SPR.Flavor) {
3741 switch (SPR.NaNBehavior) {
3754 switch (SPR.NaNBehavior) {
3798 for (
unsigned i = 0; i != NumValues; ++i) {
3807 for (
unsigned i = 0; i != NumValues; ++i) {
3821void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3829void SelectionDAGBuilder::visitZExt(
const User &
I) {
3837 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3838 Flags.setNonNeg(PNI->hasNonNeg());
3843 if (
Flags.hasNonNeg() &&
3852void SelectionDAGBuilder::visitSExt(
const User &
I) {
3861void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3872void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3880void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3888void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3896void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
3902 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3903 Flags.setNonNeg(PNI->hasNonNeg());
3908void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
3916void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
3930void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
3942void SelectionDAGBuilder::visitBitCast(
const User &
I) {
3950 if (DestVT !=
N.getValueType())
3957 else if(
ConstantInt *
C = dyn_cast<ConstantInt>(
I.getOperand(0)))
3964void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
3966 const Value *SV =
I.getOperand(0);
3971 unsigned DestAS =
I.getType()->getPointerAddressSpace();
3979void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
3987 InVec, InVal, InIdx));
3990void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4000void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4004 if (
auto *SVI = dyn_cast<ShuffleVectorInst>(&
I))
4005 Mask = SVI->getShuffleMask();
4007 Mask = cast<ConstantExpr>(
I).getShuffleMask();
4013 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4029 unsigned MaskNumElts =
Mask.size();
4031 if (SrcNumElts == MaskNumElts) {
4037 if (SrcNumElts < MaskNumElts) {
4041 if (MaskNumElts % SrcNumElts == 0) {
4045 unsigned NumConcat = MaskNumElts / SrcNumElts;
4046 bool IsConcat =
true;
4048 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4054 if ((
Idx % SrcNumElts != (i % SrcNumElts)) ||
4055 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4056 ConcatSrcs[i / SrcNumElts] != (
int)(
Idx / SrcNumElts))) {
4061 ConcatSrcs[i / SrcNumElts] =
Idx / SrcNumElts;
4068 for (
auto Src : ConcatSrcs) {
4081 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4082 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4099 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4101 if (
Idx >= (
int)SrcNumElts)
4102 Idx -= SrcNumElts - PaddedMaskNumElts;
4110 if (MaskNumElts != PaddedMaskNumElts)
4118 if (SrcNumElts > MaskNumElts) {
4121 int StartIdx[2] = { -1, -1 };
4122 bool CanExtract =
true;
4123 for (
int Idx : Mask) {
4128 if (
Idx >= (
int)SrcNumElts) {
4137 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4138 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4142 StartIdx[Input] = NewStartIdx;
4145 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4151 for (
unsigned Input = 0; Input < 2; ++Input) {
4152 SDValue &Src = Input == 0 ? Src1 : Src2;
4153 if (StartIdx[Input] < 0)
4163 for (
int &
Idx : MappedOps) {
4164 if (
Idx >= (
int)SrcNumElts)
4165 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4180 for (
int Idx : Mask) {
4186 SDValue &Src =
Idx < (int)SrcNumElts ? Src1 : Src2;
4187 if (
Idx >= (
int)SrcNumElts)
Idx -= SrcNumElts;
4201 const Value *Op0 =
I.getOperand(0);
4202 const Value *Op1 =
I.getOperand(1);
4203 Type *AggTy =
I.getType();
4205 bool IntoUndef = isa<UndefValue>(Op0);
4206 bool FromUndef = isa<UndefValue>(Op1);
4216 unsigned NumAggValues = AggValueVTs.
size();
4217 unsigned NumValValues = ValValueVTs.
size();
4221 if (!NumAggValues) {
4229 for (; i != LinearIndex; ++i)
4230 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4235 for (; i != LinearIndex + NumValValues; ++i)
4236 Values[i] = FromUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4240 for (; i != NumAggValues; ++i)
4241 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :