79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsAMDGPU.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
114using namespace PatternMatch;
115using namespace SwitchCG;
117#define DEBUG_TYPE "isel"
125 cl::desc(
"Insert the experimental `assertalign` node."),
130 cl::desc(
"Generate low-precision inline sequences "
131 "for some float libcalls"),
137 cl::desc(
"Set the case probability threshold for peeling the case from a "
138 "switch statement. A value greater than 100 will void this "
158 const SDValue *Parts,
unsigned NumParts,
161 std::optional<CallingConv::ID>
CC);
170 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
172 std::optional<CallingConv::ID>
CC = std::nullopt,
173 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
177 PartVT, ValueVT,
CC))
184 assert(NumParts > 0 &&
"No parts to assemble!");
195 unsigned RoundBits = PartBits * RoundParts;
196 EVT RoundVT = RoundBits == ValueBits ?
202 if (RoundParts > 2) {
206 PartVT, HalfVT, V, InChain);
217 if (RoundParts < NumParts) {
219 unsigned OddParts = NumParts - RoundParts;
222 OddVT, V, InChain,
CC);
239 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
250 !PartVT.
isVector() &&
"Unexpected split");
262 if (PartEVT == ValueVT)
266 ValueVT.
bitsLT(PartEVT)) {
279 if (ValueVT.
bitsLT(PartEVT)) {
284 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
299 llvm::Attribute::StrictFP)) {
301 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
313 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
314 ValueVT.
bitsLT(PartEVT)) {
323 const Twine &ErrMsg) {
324 const Instruction *
I = dyn_cast_or_null<Instruction>(V);
328 const char *AsmError =
", possible invalid constraint for vector type";
329 if (
const CallInst *CI = dyn_cast<CallInst>(
I))
330 if (CI->isInlineAsm())
342 const SDValue *Parts,
unsigned NumParts,
345 std::optional<CallingConv::ID> CallConv) {
347 assert(NumParts > 0 &&
"No parts to assemble!");
348 const bool IsABIRegCopy = CallConv.has_value();
357 unsigned NumIntermediates;
362 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
363 NumIntermediates, RegisterVT);
367 NumIntermediates, RegisterVT);
370 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
372 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
375 "Part type sizes don't match!");
379 if (NumIntermediates == NumParts) {
382 for (
unsigned i = 0; i != NumParts; ++i)
384 V, InChain, CallConv);
385 }
else if (NumParts > 0) {
388 assert(NumParts % NumIntermediates == 0 &&
389 "Must expand into a divisible number of parts!");
390 unsigned Factor = NumParts / NumIntermediates;
391 for (
unsigned i = 0; i != NumIntermediates; ++i)
393 IntermediateVT, V, InChain, CallConv);
408 DL, BuiltVectorTy, Ops);
414 if (PartEVT == ValueVT)
430 "Cannot narrow, it would be a lossy transformation");
436 if (PartEVT == ValueVT)
461 }
else if (ValueVT.
bitsLT(PartEVT)) {
470 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
501 std::optional<CallingConv::ID> CallConv);
508 unsigned NumParts,
MVT PartVT,
const Value *V,
509 std::optional<CallingConv::ID> CallConv = std::nullopt,
523 unsigned OrigNumParts = NumParts;
525 "Copying to an illegal type!");
531 EVT PartEVT = PartVT;
532 if (PartEVT == ValueVT) {
533 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
542 assert(NumParts == 1 &&
"Do not know what to promote to!");
553 "Unknown mismatch!");
555 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
556 if (PartVT == MVT::x86mmx)
561 assert(NumParts == 1 && PartEVT != ValueVT);
567 "Unknown mismatch!");
570 if (PartVT == MVT::x86mmx)
577 "Failed to tile the value with PartVT!");
580 if (PartEVT != ValueVT) {
582 "scalar-to-vector conversion failed");
591 if (NumParts & (NumParts - 1)) {
594 "Do not know what to expand to!");
596 unsigned RoundBits = RoundParts * PartBits;
597 unsigned OddParts = NumParts - RoundParts;
606 std::reverse(Parts + RoundParts, Parts + NumParts);
608 NumParts = RoundParts;
620 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
621 for (
unsigned i = 0; i < NumParts; i += StepSize) {
622 unsigned ThisBits = StepSize * PartBits / 2;
625 SDValue &Part1 = Parts[i+StepSize/2];
632 if (ThisBits == PartBits && ThisVT != PartVT) {
640 std::reverse(Parts, Parts + OrigNumParts);
657 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
662 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
664 "Cannot widen to illegal type");
667 }
else if (PartEVT != ValueEVT) {
682 Ops.
append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
693 std::optional<CallingConv::ID> CallConv) {
697 const bool IsABIRegCopy = CallConv.has_value();
700 EVT PartEVT = PartVT;
701 if (PartEVT == ValueVT) {
720 TargetLowering::TypeWidenVector) {
747 "lossy conversion of vector to scalar type");
762 unsigned NumIntermediates;
766 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
771 NumIntermediates, RegisterVT);
774 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
776 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
779 "Mixing scalable and fixed vectors when copying in parts");
781 std::optional<ElementCount> DestEltCnt;
791 if (ValueVT == BuiltVectorTy) {
815 for (
unsigned i = 0; i != NumIntermediates; ++i) {
830 if (NumParts == NumIntermediates) {
833 for (
unsigned i = 0; i != NumParts; ++i)
835 }
else if (NumParts > 0) {
838 assert(NumIntermediates != 0 &&
"division by zero");
839 assert(NumParts % NumIntermediates == 0 &&
840 "Must expand into a divisible number of parts!");
841 unsigned Factor = NumParts / NumIntermediates;
842 for (
unsigned i = 0; i != NumIntermediates; ++i)
849 EVT valuevt, std::optional<CallingConv::ID>
CC)
850 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
851 RegCount(1, regs.
size()), CallConv(
CC) {}
855 std::optional<CallingConv::ID>
CC) {
869 for (
unsigned i = 0; i != NumRegs; ++i)
871 RegVTs.push_back(RegisterVT);
900 for (
unsigned i = 0; i != NumRegs; ++i) {
906 *Glue =
P.getValue(2);
909 Chain =
P.getValue(1);
938 EVT FromVT(MVT::Other);
942 }
else if (NumSignBits > 1) {
950 assert(FromVT != MVT::Other);
956 RegisterVT, ValueVT, V, Chain,
CallConv);
986 NumParts, RegisterVT, V,
CallConv, ExtendKind);
992 for (
unsigned i = 0; i != NumRegs; ++i) {
1004 if (NumRegs == 1 || Glue)
1015 Chain = Chains[NumRegs-1];
1021 unsigned MatchingIdx,
const SDLoc &dl,
1023 std::vector<SDValue> &Ops)
const {
1028 Flag.setMatchingOp(MatchingIdx);
1037 Flag.setRegClass(RC->
getID());
1048 "No 1:1 mapping from clobbers to regs?");
1051 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1056 "If we clobbered the stack pointer, MFI should know about it.");
1065 for (
unsigned i = 0; i != NumRegs; ++i) {
1067 unsigned TheReg =
Regs[Reg++];
1078 unsigned RegCount = std::get<0>(CountAndVT);
1079 MVT RegisterVT = std::get<1>(CountAndVT);
1103 UnusedArgNodeMap.clear();
1105 PendingExports.clear();
1106 PendingConstrainedFP.clear();
1107 PendingConstrainedFPStrict.clear();
1115 DanglingDebugInfoMap.clear();
1122 if (Pending.
empty())
1128 unsigned i = 0, e = Pending.
size();
1129 for (; i != e; ++i) {
1131 if (Pending[i].
getNode()->getOperand(0) == Root)
1139 if (Pending.
size() == 1)
1158 PendingConstrainedFP.size() +
1159 PendingConstrainedFPStrict.size());
1161 PendingConstrainedFP.end());
1162 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1163 PendingConstrainedFPStrict.end());
1164 PendingConstrainedFP.clear();
1165 PendingConstrainedFPStrict.clear();
1172 PendingExports.append(PendingConstrainedFPStrict.begin(),
1173 PendingConstrainedFPStrict.end());
1174 PendingConstrainedFPStrict.clear();
1175 return updateRoot(PendingExports);
1182 assert(Variable &&
"Missing variable");
1189 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1196 if (!
N.getNode() && isa<Argument>(
Address))
1204 auto *FINode = dyn_cast<FrameIndexSDNode>(
N.getNode());
1205 if (IsParameter && FINode) {
1208 true,
DL, SDNodeOrder);
1209 }
else if (isa<Argument>(
Address)) {
1213 FuncArgumentDbgValueKind::Declare,
N);
1217 true,
DL, SDNodeOrder);
1224 FuncArgumentDbgValueKind::Declare,
N)) {
1226 <<
" (could not emit func-arg dbg_value)\n");
1238 for (
auto It = FnVarLocs->locs_begin(&
I),
End = FnVarLocs->locs_end(&
I);
1240 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1242 if (It->Values.isKillLocation(It->Expr)) {
1248 It->Values.hasArgList())) {
1251 FnVarLocs->getDILocalVariable(It->VariableID),
1252 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1268 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1270 assert(DLR->getLabel() &&
"Missing label");
1272 DAG.
getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1277 if (SkipDbgVariableRecords)
1287 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1296 if (Values.
empty()) {
1305 [](
Value *V) {
return !V || isa<UndefValue>(V); })) {
1313 SDNodeOrder, IsVariadic)) {
1324 if (
I.isTerminator()) {
1325 HandlePHINodesInSuccessorBlocks(
I.getParent());
1329 if (!isa<DbgInfoIntrinsic>(
I))
1335 bool NodeInserted =
false;
1336 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1337 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1338 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1339 if (PCSectionsMD || MMRA) {
1340 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1341 DAG, [&](
SDNode *) { NodeInserted =
true; });
1347 !isa<GCStatepointInst>(
I))
1351 if (PCSectionsMD || MMRA) {
1352 auto It = NodeMap.find(&
I);
1353 if (It != NodeMap.end()) {
1358 }
else if (NodeInserted) {
1361 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1362 <<
I.getModule()->getName() <<
"]\n";
1371void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1381#define HANDLE_INST(NUM, OPCODE, CLASS) \
1382 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1383#include "llvm/IR/Instruction.def"
1395 for (
const Value *V : Values) {
1420 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1425 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1426 DIVariable *DanglingVariable = DDI.getVariable();
1428 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1430 << printDDI(
nullptr, DDI) <<
"\n");
1436 for (
auto &DDIMI : DanglingDebugInfoMap) {
1437 DanglingDebugInfoVector &DDIV = DDIMI.second;
1441 for (
auto &DDI : DDIV)
1442 if (isMatchingDbgValue(DDI))
1445 erase_if(DDIV, isMatchingDbgValue);
1453 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1454 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1457 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1458 for (
auto &DDI : DDIV) {
1461 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1465 "Expected inlined-at fields to agree");
1474 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1475 FuncArgumentDbgValueKind::Value, Val)) {
1477 << printDDI(V, DDI) <<
"\n");
1484 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1485 << ValSDNodeOrder <<
"\n");
1486 SDV = getDbgValue(Val, Variable, Expr,
DL,
1487 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1492 <<
" in EmitFuncArgumentDbgValue\n");
1494 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1506 DanglingDebugInfo &DDI) {
1511 const Value *OrigV = V;
1515 unsigned SDOrder = DDI.getSDNodeOrder();
1519 bool StackValue =
true;
1528 while (isa<Instruction>(V)) {
1529 const Instruction &VAsInst = *cast<const Instruction>(V);
1544 if (!AdditionalValues.
empty())
1554 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1555 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1563 assert(OrigV &&
"V shouldn't be null");
1568 << printDDI(OrigV, DDI) <<
"\n");
1585 unsigned Order,
bool IsVariadic) {
1590 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1595 for (
const Value *V : Values) {
1597 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1598 isa<ConstantPointerNull>(V)) {
1604 if (
auto *CE = dyn_cast<ConstantExpr>(V))
1605 if (CE->getOpcode() == Instruction::IntToPtr) {
1612 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1623 if (!
N.getNode() && isa<Argument>(V))
1624 N = UnusedArgNodeMap[V];
1628 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1629 FuncArgumentDbgValueKind::Value,
N))
1631 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
1656 bool IsParamOfFunc =
1666 unsigned Reg = VMI->second;
1670 V->getType(), std::nullopt);
1676 unsigned BitsToDescribe = 0;
1678 BitsToDescribe = *VarSize;
1680 BitsToDescribe = Fragment->SizeInBits;
1683 if (
Offset >= BitsToDescribe)
1686 unsigned RegisterSize = RegAndSize.second;
1687 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1688 ? BitsToDescribe -
Offset
1691 Expr,
Offset, FragmentSize);
1695 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1713 false, DbgLoc, Order, IsVariadic);
1720 for (
auto &Pair : DanglingDebugInfoMap)
1721 for (
auto &DDI : Pair.second)
1753 if (
N.getNode())
return N;
1795 if (
const Constant *
C = dyn_cast<Constant>(V)) {
1807 getValue(CPA->getAddrDiscriminator()),
1808 getValue(CPA->getDiscriminator()));
1811 if (isa<ConstantPointerNull>(
C)) {
1812 unsigned AS = V->getType()->getPointerAddressSpace();
1820 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
1823 if (isa<UndefValue>(
C) && !V->getType()->isAggregateType())
1827 visit(CE->getOpcode(), *CE);
1829 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1833 if (isa<ConstantStruct>(
C) || isa<ConstantArray>(
C)) {
1835 for (
const Use &U :
C->operands()) {
1841 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1849 dyn_cast<ConstantDataSequential>(
C)) {
1851 for (
unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1855 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1859 if (isa<ArrayType>(CDS->getType()))
1864 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1865 assert((isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C)) &&
1866 "Unknown struct or array constant!");
1870 unsigned NumElts = ValueVTs.
size();
1874 for (
unsigned i = 0; i != NumElts; ++i) {
1875 EVT EltVT = ValueVTs[i];
1876 if (isa<UndefValue>(
C))
1890 if (
const auto *Equiv = dyn_cast<DSOLocalEquivalent>(
C))
1891 return getValue(Equiv->getGlobalValue());
1893 if (
const auto *
NC = dyn_cast<NoCFIValue>(
C))
1896 if (VT == MVT::aarch64svcount) {
1897 assert(
C->isNullValue() &&
"Can only zero this target type!");
1902 VectorType *VecTy = cast<VectorType>(V->getType());
1908 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1909 for (
unsigned i = 0; i != NumElements; ++i)
1915 if (isa<ConstantAggregateZero>(
C)) {
1933 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1942 if (
const Instruction *Inst = dyn_cast<Instruction>(V)) {
1946 Inst->getType(), std::nullopt);
1954 if (
const auto *BB = dyn_cast<BasicBlock>(V))
1960void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
1969 if (IsMSVCCXX || IsCoreCLR)
1996 Value *ParentPad =
I.getCatchSwitchParentPad();
1998 if (isa<ConstantTokenNone>(ParentPad))
2001 SuccessorColor = cast<Instruction>(ParentPad)->
getParent();
2002 assert(SuccessorColor &&
"No parent funclet for catchret!");
2004 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2013void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2057 if (isa<CleanupPadInst>(Pad)) {
2059 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2060 UnwindDests.back().first->setIsEHScopeEntry();
2062 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2065 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2066 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2067 UnwindDests.back().first->setIsEHScopeEntry();
2098 assert(UnwindDests.size() <= 1 &&
2099 "There should be at most one unwind destination for wasm");
2106 if (isa<LandingPadInst>(Pad)) {
2108 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2110 }
else if (isa<CleanupPadInst>(Pad)) {
2113 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2114 UnwindDests.
back().first->setIsEHScopeEntry();
2115 UnwindDests.back().first->setIsEHFuncletEntry();
2117 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2119 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2120 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2122 if (IsMSVCCXX || IsCoreCLR)
2123 UnwindDests.back().first->setIsEHFuncletEntry();
2125 UnwindDests.back().first->setIsEHScopeEntry();
2127 NewEHPadBB = CatchSwitch->getUnwindDest();
2133 if (BPI && NewEHPadBB)
2135 EHPadBB = NewEHPadBB;
2142 auto UnwindDest =
I.getUnwindDest();
2149 for (
auto &UnwindDest : UnwindDests) {
2150 UnwindDest.first->setIsEHPad();
2151 addSuccessorWithProb(
FuncInfo.
MBB, UnwindDest.first, UnwindDest.second);
2161void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2165void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2179 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2186 const Function *
F =
I.getParent()->getParent();
2205 unsigned NumValues = ValueVTs.
size();
2208 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2209 for (
unsigned i = 0; i != NumValues; ++i) {
2216 if (MemVTs[i] != ValueVTs[i])
2226 MVT::Other, Chains);
2227 }
else if (
I.getNumOperands() != 0) {
2230 unsigned NumValues = ValueVTs.
size();
2234 const Function *
F =
I.getParent()->getParent();
2237 I.getOperand(0)->getType(),
F->getCallingConv(),
2241 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2243 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2247 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2249 for (
unsigned j = 0;
j != NumValues; ++
j) {
2250 EVT VT = ValueVTs[
j];
2262 &Parts[0], NumParts, PartVT, &
I,
CC, ExtendKind);
2269 if (
I.getOperand(0)->getType()->isPointerTy()) {
2271 Flags.setPointerAddrSpace(
2272 cast<PointerType>(
I.getOperand(0)->getType())->getAddressSpace());
2275 if (NeedsRegBlock) {
2276 Flags.setInConsecutiveRegs();
2277 if (j == NumValues - 1)
2278 Flags.setInConsecutiveRegsLast();
2287 for (
unsigned i = 0; i < NumParts; ++i) {
2300 const Function *
F =
I.getParent()->getParent();
2302 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2305 Flags.setSwiftError();
2324 "LowerReturn didn't return a valid chain!");
2335 if (V->getType()->isEmptyTy())
2340 assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2341 "Unused value assigned virtual registers!");
2351 if (!isa<Instruction>(V) && !isa<Argument>(V))
return;
2364 if (
const Instruction *VI = dyn_cast<Instruction>(V)) {
2366 if (VI->getParent() == FromBB)
2375 if (isa<Argument>(V)) {
2392 const BasicBlock *SrcBB = Src->getBasicBlock();
2393 const BasicBlock *DstBB = Dst->getBasicBlock();
2397 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2407 Src->addSuccessorWithoutProb(Dst);
2410 Prob = getEdgeProbability(Src, Dst);
2411 Src->addSuccessor(Dst, Prob);
2417 return I->getParent() == BB;
2437 if (
const CmpInst *BOp = dyn_cast<CmpInst>(
Cond)) {
2441 if (CurBB == SwitchBB ||
2447 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2452 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2458 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2460 SL->SwitchCases.push_back(CB);
2469 SL->SwitchCases.push_back(CB);
2477 unsigned Depth = 0) {
2482 auto *
I = dyn_cast<Instruction>(V);
2486 if (Necessary !=
nullptr) {
2489 if (Necessary->contains(
I))
2497 for (
unsigned OpIdx = 0, E =
I->getNumOperands(); OpIdx < E; ++OpIdx)
2508 if (
I.getNumSuccessors() != 2)
2511 if (!
I.isConditional())
2523 if (BPI !=
nullptr) {
2529 std::optional<bool> Likely;
2532 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2536 if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2548 if (CostThresh <= 0)
2562 if (
const auto *RhsI = dyn_cast<Instruction>(Rhs))
2573 Value *BrCond =
I.getCondition();
2574 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2575 for (
const auto *U : Ins->users()) {
2577 if (
auto *UIns = dyn_cast<Instruction>(U))
2578 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2591 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2593 for (
const auto &InsPair : RhsDeps) {
2594 if (!ShouldCountInsn(InsPair.first)) {
2595 ToDrop = InsPair.first;
2599 if (ToDrop ==
nullptr)
2601 RhsDeps.erase(ToDrop);
2604 for (
const auto &InsPair : RhsDeps) {
2612 if (CostOfIncluding > CostThresh)
2638 const Value *BOpOp0, *BOpOp1;
2652 if (BOpc == Instruction::And)
2653 BOpc = Instruction::Or;
2654 else if (BOpc == Instruction::Or)
2655 BOpc = Instruction::And;
2661 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->
hasOneUse();
2666 TProb, FProb, InvertCond);
2676 if (Opc == Instruction::Or) {
2697 auto NewTrueProb = TProb / 2;
2698 auto NewFalseProb = TProb / 2 + FProb;
2701 NewFalseProb, InvertCond);
2708 Probs[1], InvertCond);
2710 assert(Opc == Instruction::And &&
"Unknown merge op!");
2730 auto NewTrueProb = TProb + FProb / 2;
2731 auto NewFalseProb = FProb / 2;
2734 NewFalseProb, InvertCond);
2741 Probs[1], InvertCond);
2750 if (Cases.size() != 2)
return true;
2754 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2755 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2756 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2757 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2763 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2764 Cases[0].
CC == Cases[1].
CC &&
2765 isa<Constant>(Cases[0].CmpRHS) &&
2766 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2767 if (Cases[0].
CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2769 if (Cases[0].
CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2776void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2782 if (
I.isUnconditional()) {
2788 if (Succ0MBB != NextBlock(BrMBB) ||
2801 const Value *CondVal =
I.getCondition();
2821 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2822 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2826 const Value *BOp0, *BOp1;
2829 Opcode = Instruction::And;
2831 Opcode = Instruction::Or;
2839 Opcode, BOp0, BOp1))) {
2841 getEdgeProbability(BrMBB, Succ0MBB),
2842 getEdgeProbability(BrMBB, Succ1MBB),
2847 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2851 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2858 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2864 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2867 SL->SwitchCases.clear();
2873 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2894 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2936 if (cast<ConstantInt>(CB.
CmpLHS)->isMinValue(
true)) {
2957 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2982 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2983 assert(JT.Reg != -1U &&
"Should lower JT Header first!");
2997 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2998 const SDLoc &dl = *JT.SL;
3015 unsigned JumpTableReg =
3019 JT.Reg = JumpTableReg;
3031 MVT::Other, CopyTo, CMP,
3035 if (JT.MBB != NextBlock(SwitchBB))
3042 if (JT.MBB != NextBlock(SwitchBB))
3070 if (PtrTy != PtrMemTy)
3118 Entry.Node = GuardVal;
3120 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3121 Entry.IsInReg =
true;
3122 Args.push_back(Entry);
3128 getValue(GuardCheckFn), std::move(Args));
3130 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3144 Guard =
DAG.
getLoad(PtrMemTy, dl, Chain, GuardPtr,
3181 TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3212 bool UsePtrType =
false;
3216 for (
unsigned i = 0, e =
B.Cases.size(); i != e; ++i)
3236 if (!
B.FallthroughUnreachable)
3237 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3238 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3242 if (!
B.FallthroughUnreachable) {
3255 if (
MBB != NextBlock(SwitchBB))
3274 if (PopCount == 1) {
3281 }
else if (PopCount == BB.
Range) {
3300 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3302 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3313 if (NextMBB != NextBlock(SwitchBB))
3320void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3331 assert(!
I.hasOperandBundlesOtherThan(
3332 {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3333 LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3334 LLVMContext::OB_cfguardtarget, LLVMContext::OB_ptrauth,
3335 LLVMContext::OB_clang_arc_attachedcall}) &&
3336 "Cannot lower invokes with arbitrary operand bundles yet!");
3338 const Value *Callee(
I.getCalledOperand());
3339 const Function *Fn = dyn_cast<Function>(Callee);
3340 if (isa<InlineAsm>(Callee))
3341 visitInlineAsm(
I, EHPadBB);
3346 case Intrinsic::donothing:
3348 case Intrinsic::seh_try_begin:
3349 case Intrinsic::seh_scope_begin:
3350 case Intrinsic::seh_try_end:
3351 case Intrinsic::seh_scope_end:
3357 case Intrinsic::experimental_patchpoint_void:
3358 case Intrinsic::experimental_patchpoint:
3359 visitPatchpoint(
I, EHPadBB);
3361 case Intrinsic::experimental_gc_statepoint:
3364 case Intrinsic::wasm_rethrow: {
3379 }
else if (
I.hasDeoptState()) {
3395 if (!isa<GCStatepointInst>(
I)) {
3407 addSuccessorWithProb(InvokeMBB, Return);
3408 for (
auto &UnwindDest : UnwindDests) {
3409 UnwindDest.first->setIsEHPad();
3410 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3419void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3424 assert(!
I.hasOperandBundlesOtherThan(
3425 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3426 "Cannot lower callbrs with arbitrary operand bundles yet!");
3428 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3434 Dests.
insert(
I.getDefaultDest());
3439 for (
unsigned i = 0, e =
I.getNumIndirectDests(); i < e; ++i) {
3442 Target->setIsInlineAsmBrIndirectTarget();
3443 Target->setMachineBlockAddressTaken();
3444 Target->setLabelMustBeEmitted();
3446 if (Dests.
insert(Dest).second)
3457void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3458 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3461void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3463 "Call to landingpad not in landing pad!");
3483 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3513 if (JTB.first.HeaderBB ==
First)
3514 JTB.first.HeaderBB =
Last;
3527 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3529 bool Inserted =
Done.insert(BB).second;
3534 addSuccessorWithProb(IndirectBrMBB, Succ);
3548 if (
const CallInst *Call = dyn_cast_or_null<CallInst>(
I.getPrevNode());
3549 Call &&
Call->doesNotReturn()) {
3553 if (
Call->isNonContinuableTrap())
3560void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3562 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3563 Flags.copyFMF(*FPOp);
3571void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3573 if (
auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&
I)) {
3574 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3575 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3577 if (
auto *ExactOp = dyn_cast<PossiblyExactOperator>(&
I))
3578 Flags.setExact(ExactOp->isExact());
3579 if (
auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&
I))
3580 Flags.setDisjoint(DisjointOp->isDisjoint());
3581 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3582 Flags.copyFMF(*FPOp);
3591void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3600 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3602 "Unexpected shift type");
3613 dyn_cast<const OverflowingBinaryOperator>(&
I)) {
3614 nuw = OFBinOp->hasNoUnsignedWrap();
3615 nsw = OFBinOp->hasNoSignedWrap();
3618 dyn_cast<const PossiblyExactOperator>(&
I))
3619 exact = ExactOp->isExact();
3622 Flags.setExact(exact);
3623 Flags.setNoSignedWrap(nsw);
3624 Flags.setNoUnsignedWrap(nuw);
3630void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3635 Flags.setExact(isa<PossiblyExactOperator>(&
I) &&
3636 cast<PossiblyExactOperator>(&
I)->isExact());
3641void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3664void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3670 auto *FPMO = cast<FPMathOperator>(&
I);
3675 Flags.copyFMF(*FPMO);
3687 return isa<SelectInst>(V);
3691void SelectionDAGBuilder::visitSelect(
const User &
I) {
3695 unsigned NumValues = ValueVTs.
size();
3696 if (NumValues == 0)
return;
3706 bool IsUnaryAbs =
false;
3707 bool Negate =
false;
3710 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3711 Flags.copyFMF(*FPOp);
3713 Flags.setUnpredictable(
3714 cast<SelectInst>(
I).getMetadata(LLVMContext::MD_unpredictable));
3718 EVT VT = ValueVTs[0];
3730 bool UseScalarMinMax = VT.
isVector() &&
3739 switch (SPR.Flavor) {
3745 switch (SPR.NaNBehavior) {
3758 switch (SPR.NaNBehavior) {
3802 for (
unsigned i = 0; i != NumValues; ++i) {
3811 for (
unsigned i = 0; i != NumValues; ++i) {
3825void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3833void SelectionDAGBuilder::visitZExt(
const User &
I) {
3841 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3842 Flags.setNonNeg(PNI->hasNonNeg());
3847 if (
Flags.hasNonNeg() &&
3856void SelectionDAGBuilder::visitSExt(
const User &
I) {
3865void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3876void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3884void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3892void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3900void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
3906 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3907 Flags.setNonNeg(PNI->hasNonNeg());
3912void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
3920void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
3934void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
3946void SelectionDAGBuilder::visitBitCast(
const User &
I) {
3954 if (DestVT !=
N.getValueType())
3961 else if(
ConstantInt *
C = dyn_cast<ConstantInt>(
I.getOperand(0)))
3968void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
3970 const Value *SV =
I.getOperand(0);
3975 unsigned DestAS =
I.getType()->getPointerAddressSpace();
3983void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
3991 InVec, InVal, InIdx));
3994void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4004void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4008 if (
auto *SVI = dyn_cast<ShuffleVectorInst>(&
I))
4009 Mask = SVI->getShuffleMask();
4011 Mask = cast<ConstantExpr>(
I).getShuffleMask();
4017 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4033 unsigned MaskNumElts =
Mask.size();
4035 if (SrcNumElts == MaskNumElts) {
4041 if (SrcNumElts < MaskNumElts) {
4045 if (MaskNumElts % SrcNumElts == 0) {
4049 unsigned NumConcat = MaskNumElts / SrcNumElts;
4050 bool IsConcat =
true;
4052 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4058 if ((
Idx % SrcNumElts != (i % SrcNumElts)) ||
4059 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4060 ConcatSrcs[i / SrcNumElts] != (
int)(
Idx / SrcNumElts))) {
4065 ConcatSrcs[i / SrcNumElts] =
Idx / SrcNumElts;
4072 for (
auto Src : ConcatSrcs) {
4085 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4086 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4103 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4105 if (
Idx >= (
int)SrcNumElts)
4106 Idx -= SrcNumElts - PaddedMaskNumElts;
4114 if (MaskNumElts != PaddedMaskNumElts)
4122 if (SrcNumElts > MaskNumElts) {
4125 int StartIdx[2] = { -1, -1 };
4126 bool CanExtract =
true;
4127 for (
int Idx : Mask) {
4132 if (
Idx >= (
int)SrcNumElts) {
4141 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4142 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4146 StartIdx[Input] = NewStartIdx;
4149 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4155 for (
unsigned Input = 0; Input < 2; ++Input) {
4156 SDValue &Src = Input == 0 ? Src1 : Src2;
4157 if (StartIdx[Input] < 0)
4167 for (
int &
Idx : MappedOps) {
4168 if (
Idx >= (
int)SrcNumElts)
4169 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4184 for (
int Idx : Mask) {
4190 SDValue &Src =
Idx < (int)SrcNumElts ? Src1 : Src2;
4191 if (
Idx >= (
int)SrcNumElts)
Idx -= SrcNumElts;
4205 const Value *Op0 =
I.getOperand(0);
4206 const Value *Op1 =
I.getOperand(1);
4207 Type *AggTy =
I.getType();
4209 bool IntoUndef = isa<UndefValue>(Op0);
4210 bool FromUndef = isa<UndefValue>(Op1);
4220 unsigned NumAggValues = AggValueVTs.
size();
4221 unsigned NumValValues = ValValueVTs.
size();
4225 if (!NumAggValues) {
4233 for (; i != LinearIndex; ++i)
4234 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4239 for (; i != LinearIndex + NumValValues; ++i)
4240 Values[i] = FromUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4244 for (; i != NumAggValues; ++i)
4245 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4254 const Value *Op0 =
I.getOperand(0);
4256 Type *ValTy =
I.getType();
4257 bool OutOfUndef = isa<UndefValue>(Op0);
4265 unsigned NumValValues = ValValueVTs.
size();
4268 if (!NumValValues) {
4277 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4278 Values[i - LinearIndex] =
4287void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4288 Value *Op0 =
I.getOperand(0);
4299 bool IsVectorGEP =
I.getType()->isVectorTy();
4301 IsVectorGEP ? cast<VectorType>(
I.getType())->getElementCount()
4304 if (IsVectorGEP && !
N.getValueType().isVector()) {
4312 const Value *
Idx = GTI.getOperand();
4313 if (
StructType *StTy = GTI.getStructTypeOrNull()) {
4314 unsigned Field = cast<Constant>(
Idx)->getUniqueInteger().getZExtValue();
4325 Flags.setNoUnsignedWrap(
true);
4341 bool ElementScalable = ElementSize.
isScalable();
4345 const auto *
C = dyn_cast<Constant>(
Idx);
4346 if (
C && isa<VectorType>(
C->getType()))
4347 C =
C->getSplatValue();
4349 const auto *CI = dyn_cast_or_null<ConstantInt>(
C);
4350 if (CI && CI->isZero())
4352 if (CI && !ElementScalable) {
4367 Flags.setNoUnsignedWrap(
true);
4380 VectorElementCount);
4388 if (ElementScalable) {
4389 EVT VScaleTy =
N.getValueType().getScalarType();
4399 if (ElementMul != 1) {
4400 if (ElementMul.isPowerOf2()) {
4401 unsigned Amt = ElementMul.logBase2();
4403 N.getValueType(), IdxN,
4409 N.getValueType(), IdxN, Scale);
4415 N.getValueType(),
N, IdxN);
4426 if (PtrMemTy != PtrTy && !cast<GEPOperator>(
I).isInBounds())
4432void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4439 Type *Ty =
I.getAllocatedType();
4443 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4467 if (*Alignment <= StackAlign)
4468 Alignment = std::nullopt;
4475 Flags.setNoUnsignedWrap(
true);
4485 DAG.
getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4501 if (!
I.hasMetadata(LLVMContext::MD_noundef))
4503 return I.getMetadata(LLVMContext::MD_range);
4507 if (
const auto *CB = dyn_cast<CallBase>(&
I)) {
4509 if (CB->hasRetAttr(Attribute::NoUndef))
4510 return CB->getRange();
4514 return std::nullopt;
4517void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4519 return visitAtomicLoad(
I);
4522 const Value *SV =
I.getOperand(0);
4526 if (
const Argument *Arg = dyn_cast<Argument>(SV)) {
4527 if (Arg->hasSwiftErrorAttr())
4528 return visitLoadFromSwiftError(
I);
4531 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4532 if (Alloca->isSwiftError())
4533 return visitLoadFromSwiftError(
I);
4539 Type *Ty =
I.getType();
4543 unsigned NumValues = ValueVTs.
size();
4547 Align Alignment =
I.getAlign();
4550 bool isVolatile =
I.isVolatile();
4555 bool ConstantMemory =
false;
4568 ConstantMemory =
true;
4583 unsigned ChainI = 0;
4584 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4607 MMOFlags, AAInfo, Ranges);
4608 Chains[ChainI] =
L.getValue(1);
4610 if (MemVTs[i] != ValueVTs[i])
4616 if (!ConstantMemory) {
4629void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4631 "call visitStoreToSwiftError when backend supports swifterror");
4635 const Value *SrcV =
I.getOperand(0);
4637 SrcV->
getType(), ValueVTs, &Offsets, 0);
4638 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4639 "expect a single EVT for swifterror");
4648 SDValue(Src.getNode(), Src.getResNo()));
4652void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4654 "call visitLoadFromSwiftError when backend supports swifterror");
4657 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4658 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4659 "Support volatile, non temporal, invariant for load_from_swift_error");
4661 const Value *SV =
I.getOperand(0);
4662 Type *Ty =
I.getType();
4667 I.getAAMetadata()))) &&
4668 "load_from_swift_error should not be constant memory");
4673 ValueVTs, &Offsets, 0);
4674 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4675 "expect a single EVT for swifterror");
4685void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4687 return visitAtomicStore(
I);
4689 const Value *SrcV =
I.getOperand(0);
4690 const Value *PtrV =
I.getOperand(1);
4696 if (
const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4697 if (Arg->hasSwiftErrorAttr())
4698 return visitStoreToSwiftError(
I);
4701 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4702 if (Alloca->isSwiftError())
4703 return visitStoreToSwiftError(
I);
4710 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4711 unsigned NumValues = ValueVTs.
size();
4724 Align Alignment =
I.getAlign();
4729 unsigned ChainI = 0;
4730 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4747 if (MemVTs[i] != ValueVTs[i])
4750 DAG.
getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4751 Chains[ChainI] = St;
4760void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4761 bool IsCompressing) {
4767 Src0 =
I.getArgOperand(0);
4768 Ptr =
I.getArgOperand(1);
4769 Alignment = cast<ConstantInt>(
I.getArgOperand(2))->getAlignValue();
4770 Mask =
I.getArgOperand(3);
4775 Src0 =
I.getArgOperand(0);
4776 Ptr =
I.getArgOperand(1);
4777 Mask =
I.getArgOperand(2);
4778 Alignment =
I.getParamAlign(1).valueOrOne();
4781 Value *PtrOperand, *MaskOperand, *Src0Operand;
4784 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4786 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4796 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4841 assert(
Ptr->getType()->isVectorTy() &&
"Unexpected pointer type");
4844 if (
auto *
C = dyn_cast<Constant>(
Ptr)) {
4845 C =
C->getSplatValue();
4851 ElementCount NumElts = cast<VectorType>(
Ptr->getType())->getElementCount();
4860 if (!
GEP ||
GEP->getParent() != CurBB)
4863 if (
GEP->getNumOperands() != 2)
4866 const Value *BasePtr =
GEP->getPointerOperand();
4867 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
4873 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
4878 if (ScaleVal != 1 &&
4891void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
4899 Align Alignment = cast<ConstantInt>(
I.getArgOperand(2))
4900 ->getMaybeAlignValue()
4911 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
4931 Ops, MMO, IndexType,
false);
4936void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
4942 Ptr =
I.getArgOperand(0);
4943 Alignment = cast<ConstantInt>(
I.getArgOperand(1))->getAlignValue();
4944 Mask =
I.getArgOperand(2);
4945 Src0 =
I.getArgOperand(3);
4950 Ptr =
I.getArgOperand(0);
4951 Alignment =
I.getParamAlign(0).valueOrOne();
4952 Mask =
I.getArgOperand(1);
4953 Src0 =
I.getArgOperand(2);
4956 Value *PtrOperand, *MaskOperand, *Src0Operand;
4959 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4961 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4979 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5005void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5015 Align Alignment = cast<ConstantInt>(
I.getArgOperand(1))
5016 ->getMaybeAlignValue()
5028 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
5074 AAMDNodes(),
nullptr, SSID, SuccessOrdering, FailureOrdering);
5077 dl, MemVT, VTs, InChain,
5088void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5091 switch (
I.getOperation()) {
5141void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5155void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5175 nullptr, SSID, Order);
5191void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5213 nullptr, SSID, Ordering);
5229void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5230 unsigned Intrinsic) {
5235 bool HasChain = !
F->doesNotAccessMemory();
5237 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5264 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5265 const Value *Arg =
I.getArgOperand(i);
5266 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5273 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5274 assert(CI->getBitWidth() <= 64 &&
5275 "large intrinsic immediates not handled");
5293 if (
auto *FPMO = dyn_cast<FPMathOperator>(&
I))
5294 Flags.copyFMF(*FPMO);
5301 auto *Token = Bundle->Inputs[0].get();
5303 assert(Ops.
back().getValueType() != MVT::Glue &&
5304 "Did not expected another glue node here.");
5312 if (IsTgtIntrinsic) {
5320 else if (
Info.fallbackAddressSpace)
5324 Info.size,
I.getAAMetadata());
5325 }
else if (!HasChain) {
5327 }
else if (!
I.getType()->isVoidTy()) {
5341 if (!
I.getType()->isVoidTy()) {
5342 if (!isa<VectorType>(
I.getType()))
5414 SDValue TwoToFractionalPartOfX;
5491 if (
Op.getValueType() == MVT::f32 &&
5515 if (
Op.getValueType() == MVT::f32 &&
5614 if (
Op.getValueType() == MVT::f32 &&
5698 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5711 if (
Op.getValueType() == MVT::f32 &&
5788 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5799 if (
Op.getValueType() == MVT::f32 &&
5812 bool IsExp10 =
false;
5813 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5817 IsExp10 = LHSC->isExactlyValue(Ten);
5844 unsigned Val = RHSC->getSExtValue();
5873 CurSquare, CurSquare);
5878 if (RHSC->getSExtValue() < 0)
5892 EVT VT =
LHS.getValueType();
5915 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
5919 Opcode, VT, ScaleInt);
5954 switch (
N.getOpcode()) {
5957 Regs.emplace_back(cast<RegisterSDNode>(
Op)->
getReg(),
5958 Op.getValueType().getSizeInBits());
5983bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5986 const Argument *Arg = dyn_cast<Argument>(V);
6000 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6007 auto *NewDIExpr = FragExpr;
6014 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6017 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6018 return BuildMI(MF,
DL, Inst, Indirect, Reg, Variable, FragExpr);
6022 if (Kind == FuncArgumentDbgValueKind::Value) {
6027 if (!IsInEntryBlock)
6043 bool VariableIsFunctionInputArg = Variable->
isParameter() &&
6044 !
DL->getInlinedAt();
6046 if (!IsInPrologue && !VariableIsFunctionInputArg)
6080 if (VariableIsFunctionInputArg) {
6085 return !NodeMap[
V].getNode();
6090 bool IsIndirect =
false;
6091 std::optional<MachineOperand>
Op;
6094 if (FI != std::numeric_limits<int>::max())
6098 if (!
Op &&
N.getNode()) {
6101 if (ArgRegsAndSizes.
size() == 1)
6102 Reg = ArgRegsAndSizes.
front().first;
6104 if (Reg &&
Reg.isVirtual()) {
6112 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6116 if (!
Op &&
N.getNode()) {
6121 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
6130 for (
const auto &RegAndSize : SplitRegs) {
6134 int RegFragmentSizeInBits = RegAndSize.second;
6136 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6139 if (
Offset >= ExprFragmentSizeInBits)
6143 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6144 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6149 Expr,
Offset, RegFragmentSizeInBits);
6150 Offset += RegAndSize.second;
6153 if (!FragmentExpr) {
6160 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6161 Kind != FuncArgumentDbgValueKind::Value);
6172 V->getType(), std::nullopt);
6173 if (RFV.occupiesMultipleRegs()) {
6174 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6179 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6180 }
else if (ArgRegsAndSizes.
size() > 1) {
6183 splitMultiRegDbgValue(ArgRegsAndSizes);
6192 "Expected inlined-at fields to agree");
6196 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6198 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6211 unsigned DbgSDNodeOrder) {
6212 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
6224 false, dl, DbgSDNodeOrder);
6227 false, dl, DbgSDNodeOrder);
6231 switch (Intrinsic) {
6232 case Intrinsic::smul_fix:
6234 case Intrinsic::umul_fix:
6236 case Intrinsic::smul_fix_sat:
6238 case Intrinsic::umul_fix_sat:
6240 case Intrinsic::sdiv_fix:
6242 case Intrinsic::udiv_fix:
6244 case Intrinsic::sdiv_fix_sat:
6246 case Intrinsic::udiv_fix_sat:
6253void SelectionDAGBuilder::lowerCallToExternalSymbol(
const CallInst &
I,
6254 const char *FunctionName) {
6255 assert(FunctionName &&
"FunctionName must not be nullptr");
6265 assert(cast<CallBase>(PreallocatedSetup)
6268 "expected call_preallocated_setup Value");
6269 for (
const auto *U : PreallocatedSetup->
users()) {
6270 auto *UseCall = cast<CallBase>(U);
6271 const Function *Fn = UseCall->getCalledFunction();
6272 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6282bool SelectionDAGBuilder::visitEntryValueDbgValue(
6289 const Argument *Arg = cast<Argument>(Values[0]);
6295 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6296 "couldn't find an associated register for the Argument\n");
6299 Register ArgVReg = ArgIt->getSecond();
6302 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6304 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6308 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6309 "couldn't find a physical register\n");
6314void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6315 unsigned Intrinsic) {
6317 switch (Intrinsic) {
6318 case Intrinsic::experimental_convergence_anchor:
6321 case Intrinsic::experimental_convergence_entry:
6324 case Intrinsic::experimental_convergence_loop: {
6326 auto *Token = Bundle->Inputs[0].get();
6334void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6335 unsigned IntrinsicID) {
6338 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6339 "Tried to lower unsupported histogram type");
6360 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
6386 Ops, MMO, IndexType);
6393void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6394 unsigned Intrinsic) {
6401 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
6402 Flags.copyFMF(*FPOp);
6404 switch (Intrinsic) {
6407 visitTargetIntrinsic(
I, Intrinsic);
6409 case Intrinsic::vscale: {
6414 case Intrinsic::vastart: visitVAStart(
I);
return;
6415 case Intrinsic::vaend: visitVAEnd(
I);
return;
6416 case Intrinsic::vacopy: visitVACopy(
I);
return;
6417 case Intrinsic::returnaddress:
6422 case Intrinsic::addressofreturnaddress:
6427 case Intrinsic::sponentry:
6432 case Intrinsic::frameaddress:
6437 case Intrinsic::read_volatile_register:
6438 case Intrinsic::read_register: {
6442 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6450 case Intrinsic::write_register: {
6452 Value *RegValue =
I.getArgOperand(1);
6455 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6460 case Intrinsic::memcpy: {
6461 const auto &MCI = cast<MemCpyInst>(
I);
6466 Align DstAlign = MCI.getDestAlign().valueOrOne();
6467 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6468 Align Alignment = std::min(DstAlign, SrcAlign);
6469 bool isVol = MCI.isVolatile();
6474 false, &
I, std::nullopt,
6477 I.getAAMetadata(),
AA);
6478 updateDAGForMaybeTailCall(MC);
6481 case Intrinsic::memcpy_inline: {
6482 const auto &MCI = cast<MemCpyInlineInst>(
I);
6486 assert(isa<ConstantSDNode>(
Size) &&
"memcpy_inline needs constant size");
6488 Align DstAlign = MCI.getDestAlign().valueOrOne();
6489 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6490 Align Alignment = std::min(DstAlign, SrcAlign);
6491 bool isVol = MCI.isVolatile();
6495 true, &
I, std::nullopt,
6498 I.getAAMetadata(),
AA);
6499 updateDAGForMaybeTailCall(MC);
6502 case Intrinsic::memset: {
6503 const auto &MSI = cast<MemSetInst>(
I);
6508 Align Alignment = MSI.getDestAlign().valueOrOne();
6509 bool isVol = MSI.isVolatile();
6512 Root, sdl, Op1, Op2, Op3, Alignment, isVol,
false,
6514 updateDAGForMaybeTailCall(MS);
6517 case Intrinsic::memset_inline: {
6518 const auto &MSII = cast<MemSetInlineInst>(
I);
6522 assert(isa<ConstantSDNode>(
Size) &&
"memset_inline needs constant size");
6524 Align DstAlign = MSII.getDestAlign().valueOrOne();
6525 bool isVol = MSII.isVolatile();
6531 updateDAGForMaybeTailCall(MC);
6534 case Intrinsic::memmove: {
6535 const auto &MMI = cast<MemMoveInst>(
I);
6540 Align DstAlign = MMI.getDestAlign().valueOrOne();
6541 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6542 Align Alignment = std::min(DstAlign, SrcAlign);
6543 bool isVol = MMI.isVolatile();
6551 I.getAAMetadata(),
AA);
6552 updateDAGForMaybeTailCall(MM);
6555 case Intrinsic::memcpy_element_unordered_atomic: {
6561 Type *LengthTy =
MI.getLength()->getType();
6562 unsigned ElemSz =
MI.getElementSizeInBytes();
6568 updateDAGForMaybeTailCall(MC);
6571 case Intrinsic::memmove_element_unordered_atomic: {
6572 auto &
MI = cast<AtomicMemMoveInst>(
I);
6577 Type *LengthTy =
MI.getLength()->getType();
6578 unsigned ElemSz =
MI.getElementSizeInBytes();
6584 updateDAGForMaybeTailCall(MC);
6587 case Intrinsic::memset_element_unordered_atomic: {
6588 auto &
MI = cast<AtomicMemSetInst>(
I);
6593 Type *LengthTy =
MI.getLength()->getType();
6594 unsigned ElemSz =
MI.getElementSizeInBytes();
6599 updateDAGForMaybeTailCall(MC);
6602 case Intrinsic::call_preallocated_setup: {
6611 case Intrinsic::call_preallocated_arg: {
6626 case Intrinsic::dbg_declare: {
6627 const auto &DI = cast<DbgDeclareInst>(
I);
6630 if (AssignmentTrackingEnabled ||
6633 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DI <<
"\n");
6639 assert(!DI.hasArgList() &&
"Only dbg.value should currently use DIArgList");
6644 case Intrinsic::dbg_label: {
6647 assert(Label &&
"Missing label");
6654 case Intrinsic::dbg_assign: {
6656 if (AssignmentTrackingEnabled)
6662 case Intrinsic::dbg_value: {
6664 if (AssignmentTrackingEnabled)
6684 SDNodeOrder, IsVariadic))
6690 case Intrinsic::eh_typeid_for: {
6699 case Intrinsic::eh_return_i32:
6700 case Intrinsic::eh_return_i64:
6708 case Intrinsic::eh_unwind_init:
6711 case Intrinsic::eh_dwarf_cfa:
6716 case Intrinsic::eh_sjlj_callsite: {
6717 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(0));
6723 case Intrinsic::eh_sjlj_functioncontext: {
6727 cast<AllocaInst>(
I.getArgOperand(0)->stripPointerCasts());
6732 case Intrinsic::eh_sjlj_setjmp: {
6742 case Intrinsic::eh_sjlj_longjmp:
6746 case Intrinsic::eh_sjlj_setup_dispatch:
6750 case Intrinsic::masked_gather:
6751 visitMaskedGather(
I);
6753 case Intrinsic::masked_load:
6756 case Intrinsic::masked_scatter:
6757 visitMaskedScatter(
I);
6759 case Intrinsic::masked_store:
6760 visitMaskedStore(
I);
6762 case Intrinsic::masked_expandload:
6763 visitMaskedLoad(
I,
true );
6765 case Intrinsic::masked_compressstore:
6766 visitMaskedStore(
I,
true );
6768 case Intrinsic::powi:
6772 case Intrinsic::log:
6775 case Intrinsic::log2:
6779 case Intrinsic::log10:
6783 case Intrinsic::exp:
6786 case Intrinsic::exp2:
6790 case Intrinsic::pow:
6794 case Intrinsic::sqrt:
6795 case Intrinsic::fabs:
6796 case Intrinsic::sin:
6797 case Intrinsic::cos:
6798 case Intrinsic::tan:
6799 case Intrinsic::asin:
6800 case Intrinsic::acos:
6801 case Intrinsic::atan:
6802 case Intrinsic::sinh:
6803 case Intrinsic::cosh:
6804 case Intrinsic::tanh:
6805 case Intrinsic::exp10:
6806 case Intrinsic::floor:
6807 case Intrinsic::ceil:
6808 case Intrinsic::trunc:
6809 case Intrinsic::rint:
6810 case Intrinsic::nearbyint:
6811 case Intrinsic::round:
6812 case Intrinsic::roundeven:
6813 case Intrinsic::canonicalize: {
6816 switch (Intrinsic) {
6818 case Intrinsic::sqrt: Opcode =
ISD::FSQRT;
break;
6819 case Intrinsic::fabs: Opcode =
ISD::FABS;
break;
6820 case Intrinsic::sin: Opcode =
ISD::FSIN;
break;
6821 case Intrinsic::cos: Opcode =
ISD::FCOS;
break;
6822 case Intrinsic::tan: Opcode =
ISD::FTAN;
break;
6823 case Intrinsic::asin: Opcode =
ISD::FASIN;
break;
6824 case Intrinsic::acos: Opcode =
ISD::FACOS;
break;
6825 case Intrinsic::atan: Opcode =
ISD::FATAN;
break;
6826 case Intrinsic::sinh: Opcode =
ISD::FSINH;
break;
6827 case Intrinsic::cosh: Opcode =
ISD::FCOSH;
break;
6828 case Intrinsic::tanh: Opcode =
ISD::FTANH;
break;
6829 case Intrinsic::exp10: Opcode =
ISD::FEXP10;
break;
6830 case Intrinsic::floor: Opcode =
ISD::FFLOOR;
break;
6831 case Intrinsic::ceil: Opcode =
ISD::FCEIL;
break;
6832 case Intrinsic::trunc: Opcode =
ISD::FTRUNC;
break;
6833 case Intrinsic::rint: Opcode =
ISD::FRINT;
break;
6835 case Intrinsic::round: Opcode =
ISD::FROUND;
break;
6846 case Intrinsic::lround:
6847 case Intrinsic::llround:
6848 case Intrinsic::lrint:
6849 case Intrinsic::llrint: {
6852 switch (Intrinsic) {
6854 case Intrinsic::lround: Opcode =
ISD::LROUND;
break;
6856 case Intrinsic::lrint: Opcode =
ISD::LRINT;
break;
6857 case Intrinsic::llrint: Opcode =
ISD::LLRINT;
break;
6866 case Intrinsic::minnum:
6872 case Intrinsic::maxnum:
6878 case Intrinsic::minimum:
6884 case Intrinsic::maximum:
6890 case Intrinsic::minimumnum:
6896 case Intrinsic::maximumnum:
6902 case Intrinsic::copysign:
6908 case Intrinsic::ldexp:
6914 case Intrinsic::frexp: {
6922 case Intrinsic::arithmetic_fence: {
6928 case Intrinsic::fma:
6934#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6935 case Intrinsic::INTRINSIC:
6936#include "llvm/IR/ConstrainedOps.def"
6937 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(
I));
6939#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6940#include "llvm/IR/VPIntrinsics.def"
6941 visitVectorPredicationIntrinsic(cast<VPIntrinsic>(
I));
6943 case Intrinsic::fptrunc_round: {
6946 Metadata *MD = cast<MetadataAsValue>(
I.getArgOperand(1))->getMetadata();
6947 std::optional<RoundingMode> RoundMode =
6954 Flags.copyFMF(*cast<FPMathOperator>(&
I));
6966 case Intrinsic::fmuladd: {
6987 case Intrinsic::convert_to_fp16:
6994 case Intrinsic::convert_from_fp16:
7000 case Intrinsic::fptosi_sat: {
7007 case Intrinsic::fptoui_sat: {
7014 case Intrinsic::set_rounding:
7020 case Intrinsic::is_fpclass: {
7025 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
7030 Flags.setNoFPExcept(
7031 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7046 case Intrinsic::get_fpenv: {
7061 int SPFI = cast<FrameIndexSDNode>(Temp.
getNode())->getIndex();
7068 Res =
DAG.
getLoad(EnvVT, sdl, Chain, Temp, MPI);
7074 case Intrinsic::set_fpenv: {
7088 int SPFI = cast<FrameIndexSDNode>(Temp.
getNode())->getIndex();
7091 Chain =
DAG.
getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7101 case Intrinsic::reset_fpenv:
7104 case Intrinsic::get_fpmode:
7113 case Intrinsic::set_fpmode:
7118 case Intrinsic::reset_fpmode: {
7123 case Intrinsic::pcmarker: {
7128 case Intrinsic::readcyclecounter: {
7136 case Intrinsic::readsteadycounter: {
7144 case Intrinsic::bitreverse:
7149 case Intrinsic::bswap:
7154 case Intrinsic::cttz: {
7156 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(1));
7162 case Intrinsic::ctlz: {
7164 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(1));
7170 case Intrinsic::ctpop: {
7176 case Intrinsic::fshl:
7177 case Intrinsic::fshr: {
7178 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7182 EVT VT =
X.getValueType();
7193 case Intrinsic::sadd_sat: {
7199 case Intrinsic::uadd_sat: {
7205 case Intrinsic::ssub_sat: {
7211 case Intrinsic::usub_sat: {
7217 case Intrinsic::sshl_sat: {
7223 case Intrinsic::ushl_sat: {
7229 case Intrinsic::smul_fix:
7230 case Intrinsic::umul_fix:
7231 case Intrinsic::smul_fix_sat:
7232 case Intrinsic::umul_fix_sat: {
7240 case Intrinsic::sdiv_fix:
7241 case Intrinsic::udiv_fix:
7242 case Intrinsic::sdiv_fix_sat:
7243 case Intrinsic::udiv_fix_sat: {
7248 Op1, Op2, Op3,
DAG, TLI));
7251 case Intrinsic::smax: {
7257 case Intrinsic::smin: {
7263 case Intrinsic::umax: {
7269 case Intrinsic::umin: {
7275 case Intrinsic::abs: {
7281 case Intrinsic::scmp: {
7288 case Intrinsic::ucmp: {
7295 case Intrinsic::stacksave: {
7303 case Intrinsic::stackrestore:
7307 case Intrinsic::get_dynamic_area_offset: {
7322 case Intrinsic::stackguard: {
7343 case Intrinsic::stackprotector: {
7364 Chain, sdl, Src, FIN,
7371 case Intrinsic::objectsize:
7374 case Intrinsic::is_constant:
7377 case Intrinsic::annotation:
7378 case Intrinsic::ptr_annotation:
7379 case Intrinsic::launder_invariant_group:
7380 case Intrinsic::strip_invariant_group:
7385 case Intrinsic::assume:
7386 case Intrinsic::experimental_noalias_scope_decl:
7387 case Intrinsic::var_annotation:
7388 case Intrinsic::sideeffect:
7393 case Intrinsic::codeview_annotation: {
7397 Metadata *MD = cast<MetadataAsValue>(
I.getArgOperand(0))->getMetadata();
7404 case Intrinsic::init_trampoline: {
7405 const Function *
F = cast<Function>(
I.getArgOperand(1)->stripPointerCasts());
7420 case Intrinsic::adjust_trampoline:
7425 case Intrinsic::gcroot: {
7427 "only valid in functions with gc specified, enforced by Verifier");
7429 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7430 const Constant *TypeMap = cast<Constant>(
I.getArgOperand(1));
7436 case Intrinsic::gcread:
7437 case Intrinsic::gcwrite:
7439 case Intrinsic::get_rounding:
7445 case Intrinsic::expect:
7450 case Intrinsic::ubsantrap:
7451 case Intrinsic::debugtrap:
7452 case Intrinsic::trap: {
7454 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7455 if (TrapFuncName.
empty()) {
7456 switch (Intrinsic) {
7457 case Intrinsic::trap:
7460 case Intrinsic::debugtrap:
7463 case Intrinsic::ubsantrap:
7467 cast<ConstantInt>(
I.getArgOperand(0))->getZExtValue(), sdl,
7473 I.hasFnAttr(Attribute::NoMerge));
7477 if (Intrinsic == Intrinsic::ubsantrap) {
7479 Args[0].Val =
I.getArgOperand(0);
7481 Args[0].Ty =
Args[0].Val->getType();
7485 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7490 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7496 case Intrinsic::allow_runtime_check:
7497 case Intrinsic::allow_ubsan_check:
7501 case Intrinsic::uadd_with_overflow:
7502 case Intrinsic::sadd_with_overflow:
7503 case Intrinsic::usub_with_overflow:
7504 case Intrinsic::ssub_with_overflow:
7505 case Intrinsic::umul_with_overflow:
7506 case Intrinsic::smul_with_overflow: {
7508 switch (Intrinsic) {
7510 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7511 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7512 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7513 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7514 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7515 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7521 EVT OverflowVT = MVT::i1;
7530 case Intrinsic::prefetch: {
7532 unsigned rw = cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue();
7545 std::nullopt, Flags);
7554 case Intrinsic::lifetime_start:
7555 case Intrinsic::lifetime_end: {
7556 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7561 const int64_t ObjectSize =
7562 cast<ConstantInt>(
I.getArgOperand(0))->getSExtValue();
7567 for (
const Value *Alloca : Allocas) {
7568 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7571 if (!LifetimeObject)
7591 case Intrinsic::pseudoprobe: {
7592 auto Guid = cast<ConstantInt>(
I.getArgOperand(0))->getZExtValue();
7593 auto Index = cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue();
7594 auto Attr = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
7599 case Intrinsic::invariant_start:
7604 case Intrinsic::invariant_end:
7607 case Intrinsic::clear_cache: {
7612 {InputChain, StartVal, EndVal});
7617 case Intrinsic::donothing:
7618 case Intrinsic::seh_try_begin:
7619 case Intrinsic::seh_scope_begin:
7620 case Intrinsic::seh_try_end:
7621 case Intrinsic::seh_scope_end:
7624 case Intrinsic::experimental_stackmap:
7627 case Intrinsic::experimental_patchpoint_void:
7628 case Intrinsic::experimental_patchpoint:
7631 case Intrinsic::experimental_gc_statepoint:
7634 case Intrinsic::experimental_gc_result:
7635 visitGCResult(cast<GCResultInst>(
I));
7637 case Intrinsic::experimental_gc_relocate:
7638 visitGCRelocate(cast<GCRelocateInst>(
I));
7640 case Intrinsic::instrprof_cover:
7642 case Intrinsic::instrprof_increment:
7644 case Intrinsic::instrprof_timestamp:
7646 case Intrinsic::instrprof_value_profile:
7648 case Intrinsic::instrprof_mcdc_parameters:
7650 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7652 case Intrinsic::localescape: {
7658 for (
unsigned Idx = 0, E =
I.arg_size();
Idx < E; ++
Idx) {
7659 Value *Arg =
I.getArgOperand(
Idx)->stripPointerCasts();
7660 if (isa<ConstantPointerNull>(Arg))
7664 "can only escape static allocas");
7669 TII->get(TargetOpcode::LOCAL_ESCAPE))
7677 case Intrinsic::localrecover: {
7682 auto *Fn = cast<Function>(
I.getArgOperand(0)->stripPointerCasts());
7683 auto *
Idx = cast<ConstantInt>(
I.getArgOperand(2));
7685 unsigned(
Idx->getLimitedValue(std::numeric_limits<int>::max()));
7706 case Intrinsic::eh_exceptionpointer:
7707 case Intrinsic::eh_exceptioncode: {
7709 const auto *CPI = cast<CatchPadInst>(
I.getArgOperand(0));
7714 if (Intrinsic == Intrinsic::eh_exceptioncode)
7719 case Intrinsic::xray_customevent: {
7748 case Intrinsic::xray_typedevent: {
7775 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7781 case Intrinsic::experimental_deoptimize:
7784 case Intrinsic::experimental_stepvector:
7787 case Intrinsic::vector_reduce_fadd:
7788 case Intrinsic::vector_reduce_fmul:
7789 case Intrinsic::vector_reduce_add:
7790 case Intrinsic::vector_reduce_mul:
7791 case Intrinsic::vector_reduce_and:
7792 case Intrinsic::vector_reduce_or:
7793 case Intrinsic::vector_reduce_xor:
7794 case Intrinsic::vector_reduce_smax:
7795 case Intrinsic::vector_reduce_smin:
7796 case Intrinsic::vector_reduce_umax:
7797 case Intrinsic::vector_reduce_umin:
7798 case Intrinsic::vector_reduce_fmax:
7799 case Intrinsic::vector_reduce_fmin:
7800 case Intrinsic::vector_reduce_fmaximum:
7801 case Intrinsic::vector_reduce_fminimum:
7802 visitVectorReduce(
I, Intrinsic);
7805 case Intrinsic::icall_branch_funnel: {
7814 "llvm.icall.branch.funnel operand must be a GlobalValue");
7817 struct BranchFunnelTarget {
7823 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7826 if (ElemBase !=
Base)
7828 "to the same GlobalValue");
7831 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7834 "llvm.icall.branch.funnel operand must be a GlobalValue");
7840 [](
const BranchFunnelTarget &T1,
const BranchFunnelTarget &T2) {
7841 return T1.Offset < T2.Offset;
7844 for (
auto &
T : Targets) {
7859 case Intrinsic::wasm_landingpad_index:
7865 case Intrinsic::aarch64_settag:
7866 case Intrinsic::aarch64_settag_zero: {
7868 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
7877 case Intrinsic::amdgcn_cs_chain: {
7878 assert(
I.arg_size() == 5 &&
"Additional args not supported yet");
7879 assert(cast<ConstantInt>(
I.getOperand(4))->isZero() &&
7880 "Non-zero flags not supported yet");
7896 for (
unsigned Idx : {2, 3, 1}) {
7899 Arg.
Ty =
I.getOperand(
Idx)->getType();
7901 Args.push_back(Arg);
7904 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
7905 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
7906 Args[2].IsInReg =
true;
7911 .setCallee(
CC,
RetTy, Callee, std::move(Args))
7914 .setConvergent(
I.isConvergent());
7916 std::pair<SDValue, SDValue>
Result =
7920 "Should've lowered as tail call");
7925 case Intrinsic::ptrmask: {
7945 case Intrinsic::threadlocal_address: {
7949 case Intrinsic::get_active_lane_mask: {
7952 EVT ElementVT =
Index.getValueType();
7955 visitTargetIntrinsic(
I, Intrinsic);
7973 case Intrinsic::experimental_get_vector_length: {
7974 assert(cast<ConstantInt>(
I.getOperand(1))->getSExtValue() > 0 &&
7975 "Expected positive VF");
7976 unsigned VF = cast<ConstantInt>(
I.getOperand(1))->getZExtValue();
7977 bool IsScalable = cast<ConstantInt>(
I.getOperand(2))->isOne();
7983 visitTargetIntrinsic(
I, Intrinsic);
7992 if (CountVT.
bitsLT(VT)) {
8007 case Intrinsic::experimental_vector_partial_reduce_add: {
8016 std::deque<SDValue> Subvectors;
8017 Subvectors.push_back(
getValue(
I.getOperand(0)));
8018 for (
unsigned i = 0; i < ScaleFactor; i++) {
8021 {OpNode, SourceIndex}));
8025 while (Subvectors.size() > 1) {
8027 {Subvectors[0], Subvectors[1]}));
8028 Subvectors.pop_front();
8029 Subvectors.pop_front();
8032 assert(Subvectors.size() == 1 &&
8033 "There should only be one subvector after tree flattening");
8038 case Intrinsic::experimental_cttz_elts: {
8041 EVT OpVT =
Op.getValueType();
8044 visitTargetIntrinsic(
I, Intrinsic);
8059 !cast<ConstantSDNode>(
getValue(
I.getOperand(1)))->isZero();
8061 if (isa<ScalableVectorType>(
I.getOperand(0)->getType()))
8089 case Intrinsic::vector_insert: {
8097 if (
Index.getValueType() != VectorIdxTy)
8105 case Intrinsic::vector_extract: {
8113 if (
Index.getValueType() != VectorIdxTy)
8120 case Intrinsic::vector_reverse:
8121 visitVectorReverse(
I);
8123 case Intrinsic::vector_splice:
8124 visitVectorSplice(
I);
8126 case Intrinsic::callbr_landingpad:
8127 visitCallBrLandingPad(
I);
8129 case Intrinsic::vector_interleave2:
8130 visitVectorInterleave(
I);
8132 case Intrinsic::vector_deinterleave2:
8133 visitVectorDeinterleave(
I);
8135 case Intrinsic::experimental_vector_compress:
8142 case Intrinsic::experimental_convergence_anchor:
8143 case Intrinsic::experimental_convergence_entry:
8144 case Intrinsic::experimental_convergence_loop:
8145 visitConvergenceControl(
I, Intrinsic);
8147 case Intrinsic::experimental_vector_histogram_add: {
8148 visitVectorHistogram(
I, Intrinsic);
8154void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8182 PendingConstrainedFP.push_back(OutChain);
8188 PendingConstrainedFPStrict.push_back(OutChain);
8200 Flags.setNoFPExcept(
true);
8202 if (
auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
8203 Flags.copyFMF(*FPOp);
8208#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8209 case Intrinsic::INTRINSIC: \
8210 Opcode = ISD::STRICT_##DAGN; \
8212#include "llvm/IR/ConstrainedOps.def"
8213 case Intrinsic::experimental_constrained_fmuladd: {
8220 pushOutChain(
Mul, EB);
8241 auto *
FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
8251 pushOutChain(Result, EB);
8258 std::optional<unsigned> ResOPC;
8260 case Intrinsic::vp_ctlz: {
8261 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8262 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8265 case Intrinsic::vp_cttz: {
8266 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8267 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8270 case Intrinsic::vp_cttz_elts: {
8271 bool IsZeroPoison = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8272 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8275#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8276 case Intrinsic::VPID: \
8277 ResOPC = ISD::VPSD; \
8279#include "llvm/IR/VPIntrinsics.def"
8284 "Inconsistency: no SDNode available for this VPIntrinsic!");
8286 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8287 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8289 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8290 : ISD::VP_REDUCE_FMUL;
8296void SelectionDAGBuilder::visitVPLoad(
8322void SelectionDAGBuilder::visitVPGather(
8358 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8364void SelectionDAGBuilder::visitVPStore(
8368 EVT VT = OpValues[0].getValueType();
8386void SelectionDAGBuilder::visitVPScatter(
8391 EVT VT = OpValues[0].getValueType();
8421 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8422 OpValues[2], OpValues[3]},
8428void SelectionDAGBuilder::visitVPStridedLoad(
8447 OpValues[2], OpValues[3], MMO,
8455void SelectionDAGBuilder::visitVPStridedStore(
8459 EVT VT = OpValues[0].getValueType();
8471 DAG.
getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8479void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8504 "Unexpected target EVL type");
8513void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8520 if (
const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8521 return visitVPCmp(*CmpI);
8532 "Unexpected target EVL type");
8536 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8538 if (
I == EVLParamPos)
8546 if (
auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8553 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8555 case ISD::VP_GATHER:
8556 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8558 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8559 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8562 visitVPStore(VPIntrin, OpValues);
8564 case ISD::VP_SCATTER:
8565 visitVPScatter(VPIntrin, OpValues);
8567 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8568 visitVPStridedStore(VPIntrin, OpValues);
8570 case ISD::VP_FMULADD: {
8571 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8573 if (
auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8580 ISD::VP_FMUL,
DL, VTs,
8581 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8584 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8589 case ISD::VP_IS_FPCLASS: {
8592 auto Constant = OpValues[1]->getAsZExtVal();
8595 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8599 case ISD::VP_INTTOPTR: {
8610 case ISD::VP_PTRTOINT: {
8625 case ISD::VP_CTLZ_ZERO_UNDEF:
8627 case ISD::VP_CTTZ_ZERO_UNDEF:
8628 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8629 case ISD::VP_CTTZ_ELTS: {
8631 DAG.
getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8650 if (CallSiteIndex) {
8664 assert(BeginLabel &&
"BeginLabel should've been set");
8678 assert(
II &&
"II should've been set");
8689std::pair<SDValue, SDValue>
8703 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
8706 "Non-null chain expected with non-tail call!");
8707 assert((Result.second.getNode() || !Result.first.getNode()) &&
8708 "Null value expected with tail call!");
8710 if (!Result.second.getNode()) {
8717 PendingExports.clear();
8732 bool isTailCall,
bool isMustTailCall,
8742 const Value *SwiftErrorVal =
nullptr;
8748 auto *Caller = CB.
getParent()->getParent();
8749 if (Caller->getFnAttribute(
"disable-tail-calls").getValueAsString() ==
8750 "true" && !isMustTailCall)
8757 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8766 if (V->getType()->isEmptyTy())
8770 Entry.Node = ArgNode; Entry.Ty = V->getType();
8772 Entry.setAttributes(&CB,
I - CB.
arg_begin());
8784 Args.push_back(Entry);
8788 if (Entry.IsSRet && isa<Instruction>(V))
8796 Value *V = Bundle->Inputs[0];
8798 Entry.Node = ArgNode;
8799 Entry.Ty = V->getType();
8800 Entry.IsCFGuardTarget =
true;
8801 Args.push_back(Entry);
8819 "Target doesn't support calls with kcfi operand bundles.");
8820 CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8827 auto *Token = Bundle->Inputs[0].get();
8828 ConvControlToken =
getValue(Token);
8846 "This target doesn't support calls with ptrauth operand bundles.");
8850 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
8852 if (Result.first.getNode()) {
8874 if (
const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
8893 bool ConstantMemory =
false;
8898 ConstantMemory =
true;
8909 if (!ConstantMemory)
8916void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
8930bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
8931 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
8945 if (Res.first.getNode()) {
8946 processIntegerCallValue(
I, Res.first,
true);
8960 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
8983 switch (NumBitsToCompare) {
8995 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9013 processIntegerCallValue(
I, Cmp,
false);
9022bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9023 const Value *Src =
I.getArgOperand(0);
9028 std::pair<SDValue, SDValue> Res =
9032 if (Res.first.getNode()) {
9046bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9054 Align Alignment = std::min(DstAlign, SrcAlign);
9063 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9067 "** memcpy should not be lowered as TailCall in mempcpy context **");
9085bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9086 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9089 std::pair<SDValue, SDValue> Res =
9094 if (Res.first.getNode()) {
9108bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9109 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9112 std::pair<SDValue, SDValue> Res =
9117 if (Res.first.getNode()) {
9118 processIntegerCallValue(
I, Res.first,
true);
9131bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9132 const Value *Arg0 =
I.getArgOperand(0);
9135 std::pair<SDValue, SDValue> Res =
9138 if (Res.first.getNode()) {
9139 processIntegerCallValue(
I, Res.first,
false);
9152bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9153 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9156 std::pair<SDValue, SDValue> Res =
9160 if (Res.first.getNode()) {
9161 processIntegerCallValue(
I, Res.first,
false);
9174bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9177 if (!
I.onlyReadsMemory())
9181 Flags.copyFMF(cast<FPMathOperator>(
I));
9194bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9197 if (!
I.onlyReadsMemory())
9201 Flags.copyFMF(cast<FPMathOperator>(
I));
9210void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9212 if (
I.isInlineAsm()) {
9220 if (
F->isDeclaration()) {
9222 unsigned IID =
F->getIntrinsicID();
9225 IID =
II->getIntrinsicID(
F);
9228 visitIntrinsicCall(
I, IID);
9237 if (!
I.isNoBuiltin() && !
I.isStrictFP() && !
F->hasLocalLinkage() &&
9243 if (visitMemCmpBCmpCall(
I))
9246 case LibFunc_copysign:
9247 case LibFunc_copysignf:
9248 case LibFunc_copysignl:
9251 if (
I.onlyReadsMemory()) {
9255 LHS.getValueType(), LHS, RHS));
9277 case LibFunc_fminimum_num:
9278 case LibFunc_fminimum_numf:
9279 case LibFunc_fminimum_numl:
9283 case LibFunc_fmaximum_num:
9284 case LibFunc_fmaximum_numf:
9285 case LibFunc_fmaximum_numl:
9346 case LibFunc_sqrt_finite:
9347 case LibFunc_sqrtf_finite:
9348 case LibFunc_sqrtl_finite:
9353 case LibFunc_floorf:
9354 case LibFunc_floorl:
9358 case LibFunc_nearbyint:
9359 case LibFunc_nearbyintf:
9360 case LibFunc_nearbyintl:
9377 case LibFunc_roundf:
9378 case LibFunc_roundl:
9383 case LibFunc_truncf:
9384 case LibFunc_truncl:
9401 case LibFunc_exp10f:
9402 case LibFunc_exp10l:
9407 case LibFunc_ldexpf:
9408 case LibFunc_ldexpl:
9412 case LibFunc_memcmp:
9413 if (visitMemCmpBCmpCall(
I))
9416 case LibFunc_mempcpy:
9417 if (visitMemPCpyCall(
I))
9420 case LibFunc_memchr:
9421 if (visitMemChrCall(
I))
9424 case LibFunc_strcpy:
9425 if (visitStrCpyCall(
I,
false))
9428 case LibFunc_stpcpy:
9429 if (visitStrCpyCall(
I,
true))
9432 case LibFunc_strcmp:
9433 if (visitStrCmpCall(
I))
9436 case LibFunc_strlen:
9437 if (visitStrLenCall(
I))
9440 case LibFunc_strnlen:
9441 if (visitStrNLenCall(
I))
9456 assert(!
I.hasOperandBundlesOtherThan(
9457 {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
9458 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
9459 LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi,
9460 LLVMContext::OB_convergencectrl}) &&
9461 "Cannot lower calls with arbitrary operand bundles!");
9465 if (
I.hasDeoptState())
9481 const auto *Key = cast<ConstantInt>(PAB->Inputs[0]);
9482 const Value *Discriminator = PAB->Inputs[1];
9484 assert(Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9485 assert(Discriminator->getType()->isIntegerTy(64) &&
9486 "Invalid ptrauth discriminator");
9490 if (
const auto *CalleeCPA = dyn_cast<ConstantPtrAuth>(CalleeV))
9491 if (CalleeCPA->isKnownCompatibleWith(Key, Discriminator,
9497 assert(!isa<Function>(CalleeV) &&
"invalid direct ptrauth call");
9532 for (
const auto &Code : Codes)
9547 SDISelAsmOperandInfo &MatchingOpInfo,
9549 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9555 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9557 OpInfo.ConstraintVT);
9558 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9560 MatchingOpInfo.ConstraintVT);
9561 if ((OpInfo.ConstraintVT.isInteger() !=
9562 MatchingOpInfo.ConstraintVT.isInteger()) ||
9563 (MatchRC.second != InputRC.second)) {
9566 " with a matching output constraint of"
9567 " incompatible type!");
9569 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9576 SDISelAsmOperandInfo &OpInfo,
9589 const Value *OpVal = OpInfo.CallOperandVal;
9590 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9591 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9607 DL.getPrefTypeAlign(Ty),
false,
9610 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9613 OpInfo.CallOperand = StackSlot;
9626static std::optional<unsigned>
9628 SDISelAsmOperandInfo &OpInfo,
9629 SDISelAsmOperandInfo &RefOpInfo) {
9640 return std::nullopt;
9644 unsigned AssignedReg;
9647 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9650 return std::nullopt;
9655 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9657 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9666 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9671 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9676 OpInfo.CallOperand =
9678 OpInfo.ConstraintVT = RegVT;
9682 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9685 OpInfo.CallOperand =
9687 OpInfo.ConstraintVT = VT;
9694 if (OpInfo.isMatchingInputConstraint())
9695 return std::nullopt;
9697 EVT ValueVT = OpInfo.ConstraintVT;
9698 if (OpInfo.ConstraintVT == MVT::Other)
9702 unsigned NumRegs = 1;
9703 if (OpInfo.ConstraintVT != MVT::Other)
9718 I = std::find(
I, RC->
end(), AssignedReg);
9719 if (
I == RC->
end()) {
9722 return {AssignedReg};
9726 for (; NumRegs; --NumRegs, ++
I) {
9727 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
9732 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
9733 return std::nullopt;
9738 const std::vector<SDValue> &AsmNodeOperands) {
9741 for (; OperandNo; --OperandNo) {
9743 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9746 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
9747 "Skipped past definitions?");
9748 CurOp +=
F.getNumOperandRegisters() + 1;
9759 explicit ExtraFlags(
const CallBase &Call) {
9761 if (
IA->hasSideEffects())
9763 if (
IA->isAlignStack())
9765 if (
Call.isConvergent())
9786 unsigned get()
const {
return Flags; }
9793 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(
Op)) {
9794 auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9809void SelectionDAGBuilder::visitInlineAsm(
const CallBase &Call,
9822 bool HasSideEffect =
IA->hasSideEffects();
9823 ExtraFlags ExtraInfo(Call);
9825 for (
auto &
T : TargetConstraints) {
9826 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
9827 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
9829 if (OpInfo.CallOperandVal)
9830 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
9833 HasSideEffect = OpInfo.hasMemory(TLI);
9842 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9845 return emitInlineAsmError(Call,
"constraint '" +
Twine(
T.ConstraintCode) +
9846 "' expects an integer constant "
9849 ExtraInfo.update(
T);
9856 bool EmitEHLabels = isa<InvokeInst>(Call);
9858 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
9860 bool IsCallBr = isa<CallBrInst>(Call);
9862 if (IsCallBr || EmitEHLabels) {
9871 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
9876 IA->collectAsmStrs(AsmStrs);
9879 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9887 if (OpInfo.hasMatchingInput()) {
9888 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
9919 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
9922 OpInfo.isIndirect =
false;
9929 !OpInfo.isIndirect) {
9930 assert((OpInfo.isMultipleAlternative ||
9932 "Can only indirectify direct input operands!");
9938 OpInfo.CallOperandVal =
nullptr;
9941 OpInfo.isIndirect =
true;
9947 std::vector<SDValue> AsmNodeOperands;
9948 AsmNodeOperands.push_back(
SDValue());
9955 const MDNode *SrcLoc =
Call.getMetadata(
"srcloc");
9965 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9967 SDISelAsmOperandInfo &RefOpInfo =
9968 OpInfo.isMatchingInputConstraint()
9969 ? ConstraintOperands[OpInfo.getMatchedOperand()]
9971 const auto RegError =
9976 const char *
RegName =
TRI.getName(*RegError);
9977 emitInlineAsmError(Call,
"register '" +
Twine(
RegName) +
9978 "' allocated for constraint '" +
9979 Twine(OpInfo.ConstraintCode) +
9980 "' does not match required type");
9984 auto DetectWriteToReservedRegister = [&]() {
9987 for (
unsigned Reg : OpInfo.AssignedRegs.Regs) {
9989 TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
9991 emitInlineAsmError(Call,
"write to reserved register '" +
10000 !OpInfo.isMatchingInputConstraint())) &&
10001 "Only address as input operand is allowed.");
10003 switch (OpInfo.Type) {
10009 "Failed to convert memory constraint code to constraint id.");
10013 OpFlags.setMemConstraint(ConstraintID);
10016 AsmNodeOperands.push_back(OpInfo.CallOperand);
10021 if (OpInfo.AssignedRegs.Regs.empty()) {
10022 emitInlineAsmError(
10023 Call,
"couldn't allocate output register for constraint '" +
10024 Twine(OpInfo.ConstraintCode) +
"'");
10028 if (DetectWriteToReservedRegister())
10033 OpInfo.AssignedRegs.AddInlineAsmOperands(
10042 SDValue InOperandVal = OpInfo.CallOperand;
10044 if (OpInfo.isMatchingInputConstraint()) {
10050 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
10051 if (OpInfo.isIndirect) {
10053 emitInlineAsmError(Call,
"inline asm not supported yet: "
10054 "don't know how to handle tied "
10055 "indirect register inputs");
10063 auto *
R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
10065 MVT RegVT =
R->getSimpleValueType(0);
10069 :
TRI.getMinimalPhysRegClass(TiedReg);
10070 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
10077 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &Call);
10079 OpInfo.getMatchedOperand(), dl,
DAG,
10084 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
10085 assert(
Flag.getNumOperandRegisters() == 1 &&
10086 "Unexpected number of operands");
10089 Flag.clearMemConstraint();
10090 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10093 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10104 std::vector<SDValue> Ops;
10109 if (isa<ConstantSDNode>(InOperandVal)) {
10110 emitInlineAsmError(Call,
"value out of range for constraint '" +
10111 Twine(OpInfo.ConstraintCode) +
"'");
10115 emitInlineAsmError(Call,
10116 "invalid operand for inline asm constraint '" +
10117 Twine(OpInfo.ConstraintCode) +
"'");
10130 assert((OpInfo.isIndirect ||
10132 "Operand must be indirect to be a mem!");
10135 "Memory operands expect pointer values");
10140 "Failed to convert memory constraint code to constraint id.");
10144 ResOpType.setMemConstraint(ConstraintID);
10148 AsmNodeOperands.push_back(InOperandVal);
10156 "Failed to convert memory constraint code to constraint id.");
10160 SDValue AsmOp = InOperandVal;
10162 auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
10170 ResOpType.setMemConstraint(ConstraintID);
10172 AsmNodeOperands.push_back(
10175 AsmNodeOperands.push_back(AsmOp);
10181 emitInlineAsmError(Call,
"unknown asm constraint '" +
10182 Twine(OpInfo.ConstraintCode) +
"'");
10187 if (OpInfo.isIndirect) {
10188 emitInlineAsmError(
10189 Call,
"Don't know how to handle indirect register inputs yet "
10190 "for constraint '" +
10191 Twine(OpInfo.ConstraintCode) +
"'");
10196 if (OpInfo.AssignedRegs.Regs.empty()) {
10197 emitInlineAsmError(Call,
10198 "couldn't allocate input reg for constraint '" +
10199 Twine(OpInfo.ConstraintCode) +
"'");
10203 if (DetectWriteToReservedRegister())
10208 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue,
10212 0, dl,
DAG, AsmNodeOperands);
10218 if (!OpInfo.AssignedRegs.Regs.empty())
10228 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10232 DAG.
getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10243 if (
StructType *StructResult = dyn_cast<StructType>(CallResultType))
10244 ResultTypes = StructResult->elements();
10245 else if (!CallResultType->
isVoidTy())
10246 ResultTypes =
ArrayRef(CallResultType);
10248 auto CurResultType = ResultTypes.
begin();
10249 auto handleRegAssign = [&](
SDValue V) {
10250 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10251 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10264 if (ResultVT !=
V.getValueType() &&
10267 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10268 V.getValueType().isInteger()) {
10274 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10280 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10284 if (OpInfo.AssignedRegs.Regs.empty())
10287 switch (OpInfo.ConstraintType) {
10291 Chain, &Glue, &Call);
10303 assert(
false &&
"Unexpected unknown constraint");
10307 if (OpInfo.isIndirect) {
10308 const Value *
Ptr = OpInfo.CallOperandVal;
10309 assert(
Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10315 assert(!
Call.getType()->isVoidTy() &&
"Bad inline asm!");
10318 handleRegAssign(V);
10320 handleRegAssign(Val);
10326 if (!ResultValues.
empty()) {
10327 assert(CurResultType == ResultTypes.
end() &&
10328 "Mismatch in number of ResultTypes");
10330 "Mismatch in number of output operands in asm result");
10338 if (!OutChains.
empty())
10341 if (EmitEHLabels) {
10342 Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
10346 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10351void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &Call,
10352 const Twine &Message) {
10361 if (ValueVTs.
empty())
10365 for (
const EVT &VT : ValueVTs)
10371void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10378void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10384 DL.getABITypeAlign(
I.getType()).value());
10387 if (
I.getType()->isPointerTy())
10393void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10400void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10412 std::optional<ConstantRange> CR =
getRange(
I);
10414 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10417 APInt Lo = CR->getUnsignedMin();
10418 if (!
Lo.isMinValue())
10421 APInt Hi = CR->getUnsignedMax();
10422 unsigned Bits = std::max(
Hi.getActiveBits(),
10431 unsigned NumVals =
Op.getNode()->getNumValues();
10438 for (
unsigned I = 1;
I != NumVals; ++
I)
10452 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10455 Args.reserve(NumArgs);
10459 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10460 ArgI != ArgE; ++ArgI) {
10461 const Value *V = Call->getOperand(ArgI);
10463 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10467 Entry.Ty = V->getType();
10468 Entry.setAttributes(Call, ArgI);
10469 Args.push_back(Entry);
10474 .
setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10503 for (
unsigned I = StartIdx;
I < Call.arg_size();
I++) {
10518void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10552 assert(
ID.getValueType() == MVT::i64);
10583void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10599 if (
auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10602 else if (
auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10604 SDLoc(SymbolicCallee),
10605 SymbolicCallee->getValueType(0));
10615 "Not enough arguments provided to the patchpoint intrinsic");
10618 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10636 "Expected a callseq node.");
10638 bool HasGlue =
Call->getGluedNode();
10668 unsigned NumCallRegArgs =
Call->getNumOperands() - (HasGlue ? 4 : 3);
10669 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10678 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10689 if (IsAnyRegCC && HasDef) {
10694 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
10718 if (IsAnyRegCC && HasDef) {
10730void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
10731 unsigned Intrinsic) {
10735 if (
I.arg_size() > 1)
10741 if (
auto *FPMO = dyn_cast<FPMathOperator>(&
I))
10744 switch (Intrinsic) {
10745 case Intrinsic::vector_reduce_fadd:
10753 case Intrinsic::vector_reduce_fmul:
10761 case Intrinsic::vector_reduce_add:
10764 case Intrinsic::vector_reduce_mul:
10767 case Intrinsic::vector_reduce_and:
10770 case Intrinsic::vector_reduce_or:
10773 case Intrinsic::vector_reduce_xor:
10776 case Intrinsic::vector_reduce_smax:
10779 case Intrinsic::vector_reduce_smin:
10782 case Intrinsic::vector_reduce_umax:
10785 case Intrinsic::vector_reduce_umin:
10788 case Intrinsic::vector_reduce_fmax:
10791 case Intrinsic::vector_reduce_fmin:
10794 case Intrinsic::vector_reduce_fmaximum:
10797 case Intrinsic::vector_reduce_fminimum:
10811 Attrs.push_back(Attribute::SExt);
10813 Attrs.push_back(Attribute::ZExt);
10815 Attrs.push_back(Attribute::InReg);
10825std::pair<SDValue, SDValue>
10839 RetTys.
swap(OldRetTys);
10840 Offsets.swap(OldOffsets);
10842 for (
size_t i = 0, e = OldRetTys.
size(); i != e; ++i) {
10843 EVT RetVT = OldRetTys[i];
10847 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
10848 RetTys.
append(NumRegs, RegisterVT);
10849 for (
unsigned j = 0; j != NumRegs; ++j)
10862 int DemoteStackIdx = -100;
10873 DL.getAllocaAddrSpace());
10877 Entry.Node = DemoteStackSlot;
10878 Entry.Ty = StackSlotPtrType;
10879 Entry.IsSExt =
false;
10880 Entry.IsZExt =
false;
10881 Entry.IsInReg =
false;
10882 Entry.IsSRet =
true;
10883 Entry.IsNest =
false;
10884 Entry.IsByVal =
false;
10885 Entry.IsByRef =
false;
10886 Entry.IsReturned =
false;
10887 Entry.IsSwiftSelf =
false;
10888 Entry.IsSwiftAsync =
false;
10889 Entry.IsSwiftError =
false;
10890 Entry.IsCFGuardTarget =
false;
10891 Entry.Alignment = Alignment;
10903 for (
unsigned I = 0, E = RetTys.
size();
I != E; ++
I) {
10905 if (NeedsRegBlock) {
10906 Flags.setInConsecutiveRegs();
10907 if (
I == RetTys.
size() - 1)
10908 Flags.setInConsecutiveRegsLast();
10910 EVT VT = RetTys[
I];
10915 for (
unsigned i = 0; i != NumRegs; ++i) {
10917 MyFlags.
Flags = Flags;
10918 MyFlags.
VT = RegisterVT;
10919 MyFlags.
ArgVT = VT;
10924 cast<PointerType>(CLI.
RetTy)->getAddressSpace());
10932 CLI.
Ins.push_back(MyFlags);
10946 CLI.
Ins.push_back(MyFlags);
10954 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
10958 Type *FinalType = Args[i].Ty;
10959 if (Args[i].IsByVal)
10960 FinalType = Args[i].IndirectType;
10963 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
Value != NumValues;
10968 Args[i].Node.getResNo() +
Value);
10975 Flags.setOrigAlign(OriginalAlignment);
10977 if (Args[i].Ty->isPointerTy()) {
10978 Flags.setPointer();
10979 Flags.setPointerAddrSpace(
10980 cast<PointerType>(Args[i].Ty)->getAddressSpace());
10982 if (Args[i].IsZExt)
10984 if (Args[i].IsSExt)
10986 if (Args[i].IsInReg) {
10990 isa<StructType>(FinalType)) {
10993 Flags.setHvaStart();
10999 if (Args[i].IsSRet)
11001 if (Args[i].IsSwiftSelf)
11002 Flags.setSwiftSelf();
11003 if (Args[i].IsSwiftAsync)
11004 Flags.setSwiftAsync();
11005 if (Args[i].IsSwiftError)
11006 Flags.setSwiftError();
11007 if (Args[i].IsCFGuardTarget)
11008 Flags.setCFGuardTarget();
11009 if (Args[i].IsByVal)
11011 if (Args[i].IsByRef)
11013 if (Args[i].IsPreallocated) {
11014 Flags.setPreallocated();
11022 if (Args[i].IsInAlloca) {
11023 Flags.setInAlloca();
11032 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11033 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11034 Flags.setByValSize(FrameSize);
11037 if (
auto MA = Args[i].Alignment)
11041 }
else if (
auto MA = Args[i].Alignment) {
11044 MemAlign = OriginalAlignment;
11046 Flags.setMemAlign(MemAlign);
11047 if (Args[i].IsNest)
11050 Flags.setInConsecutiveRegs();
11059 if (Args[i].IsSExt)
11061 else if (Args[i].IsZExt)
11066 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11071 Args[i].Ty->getPointerAddressSpace())) &&
11072 RetTys.
size() == NumValues &&
"unexpected use of 'returned'");
11085 CLI.
RetZExt == Args[i].IsZExt))
11086 Flags.setReturned();
11092 for (
unsigned j = 0; j != NumParts; ++j) {
11099 j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
11100 if (NumParts > 1 && j == 0)
11104 if (j == NumParts - 1)
11108 CLI.
Outs.push_back(MyFlags);
11109 CLI.
OutVals.push_back(Parts[j]);
11112 if (NeedsRegBlock &&
Value == NumValues - 1)
11113 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11125 "LowerCall didn't return a valid chain!");
11127 "LowerCall emitted a return value for a tail call!");
11129 "LowerCall didn't emit the correct number of values!");
11141 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11142 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11143 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11144 "LowerCall emitted a value with the wrong type!");
11157 assert(PVTs.
size() == 1 &&
"Pointers should fit in one register");
11158 EVT PtrVT = PVTs[0];
11160 unsigned NumValues = RetTys.
size();
11161 ReturnValues.
resize(NumValues);
11167 Flags.setNoUnsignedWrap(
true);
11171 for (
unsigned i = 0; i < NumValues; ++i) {
11178 DemoteStackIdx, Offsets[i]),
11180 ReturnValues[i] = L;
11181 Chains[i] = L.getValue(1);
11188 std::optional<ISD::NodeType> AssertOp;
11193 unsigned CurReg = 0;
11194 for (
EVT VT : RetTys) {
11201 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11209 if (ReturnValues.
empty())
11215 return std::make_pair(Res, CLI.
Chain);
11232 if (
N->getNumValues() == 1) {
11240 "Lowering returned the wrong number of results!");
11243 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11256 cast<RegisterSDNode>(
Op.getOperand(1))->getReg() != Reg) &&
11257 "Copy from a reg to the same reg!");
11271 ExtendType = PreferredExtendIt->second;
11274 PendingExports.push_back(Chain);
11286 return A->use_empty();
11288 const BasicBlock &Entry =
A->getParent()->front();
11289 for (
const User *U :
A->users())
11290 if (cast<Instruction>(U)->
getParent() != &Entry || isa<SwitchInst>(U))
11298 std::pair<const AllocaInst *, const StoreInst *>>;
11310 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11312 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11313 StaticAllocas.
reserve(NumArgs * 2);
11315 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11318 V = V->stripPointerCasts();
11319 const auto *AI = dyn_cast<AllocaInst>(V);
11320 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11323 return &Iter.first->second;
11333 const auto *SI = dyn_cast<StoreInst>(&
I);
11340 if (
I.isDebugOrPseudoInst())
11344 for (
const Use &U :
I.operands()) {
11345 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11346 *
Info = StaticAllocaInfo::Clobbered;
11352 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
11353 *
Info = StaticAllocaInfo::Clobbered;
11356 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
11357 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11360 const AllocaInst *AI = cast<AllocaInst>(Dst);
11363 if (*
Info != StaticAllocaInfo::Unknown)
11371 const Value *Val = SI->getValueOperand()->stripPointerCasts();
11372 const auto *Arg = dyn_cast<Argument>(Val);
11373 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11374 Arg->getType()->isEmptyTy() ||
11375 DL.getTypeStoreSize(Arg->getType()) !=
11377 !
DL.typeSizeEqualsStoreSize(Arg->getType()) ||
11378 ArgCopyElisionCandidates.
count(Arg)) {
11379 *
Info = StaticAllocaInfo::Clobbered;
11383 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11387 *
Info = StaticAllocaInfo::Elidable;
11388 ArgCopyElisionCandidates.
insert({Arg, {AI, SI}});
11393 if (ArgCopyElisionCandidates.
size() == NumArgs)
11407 auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
11410 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
11417 auto ArgCopyIter = ArgCopyElisionCandidates.
find(&Arg);
11418 assert(ArgCopyIter != ArgCopyElisionCandidates.
end());
11419 const AllocaInst *AI = ArgCopyIter->second.first;
11420 int FixedIndex = FINode->getIndex();
11422 int OldIndex = AllocaIndex;
11426 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11432 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11433 "greater than stack argument alignment ("
11434 <<
DebugStr(RequiredAlignment) <<
" vs "
11442 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11443 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11449 AllocaIndex = FixedIndex;
11450 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11451 for (
SDValue ArgVal : ArgVals)
11455 const StoreInst *SI = ArgCopyIter->second.second;
11456 ElidedArgCopyInstrs.
insert(SI);
11468void SelectionDAGISel::LowerArguments(
const Function &
F) {
11475 if (
F.hasFnAttribute(Attribute::Naked))
11493 Ins.push_back(RetArg);
11501 ArgCopyElisionCandidates);
11505 unsigned ArgNo = Arg.getArgNo();
11508 bool isArgValueUsed = !Arg.use_empty();
11509 unsigned PartBase = 0;
11510 Type *FinalType = Arg.getType();
11511 if (Arg.hasAttribute(Attribute::ByVal))
11512 FinalType = Arg.getParamByValType();
11514 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11515 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
11522 if (Arg.getType()->isPointerTy()) {
11523 Flags.setPointer();
11524 Flags.setPointerAddrSpace(
11525 cast<PointerType>(Arg.getType())->getAddressSpace());
11527 if (Arg.hasAttribute(Attribute::ZExt))
11529 if (Arg.hasAttribute(Attribute::SExt))
11531 if (Arg.hasAttribute(Attribute::InReg)) {
11535 isa<StructType>(Arg.getType())) {
11538 Flags.setHvaStart();
11544 if (Arg.hasAttribute(Attribute::StructRet))
11546 if (Arg.hasAttribute(Attribute::SwiftSelf))
11547 Flags.setSwiftSelf();
11548 if (Arg.hasAttribute(Attribute::SwiftAsync))
11549 Flags.setSwiftAsync();
11550 if (Arg.hasAttribute(Attribute::SwiftError))
11551 Flags.setSwiftError();
11552 if (Arg.hasAttribute(Attribute::ByVal))
11554 if (Arg.hasAttribute(Attribute::ByRef))
11556 if (Arg.hasAttribute(Attribute::InAlloca)) {
11557 Flags.setInAlloca();
11565 if (Arg.hasAttribute(Attribute::Preallocated)) {
11566 Flags.setPreallocated();
11578 const Align OriginalAlignment(
11580 Flags.setOrigAlign(OriginalAlignment);
11583 Type *ArgMemTy =
nullptr;
11584 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11587 ArgMemTy = Arg.getPointeeInMemoryValueType();
11589 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11594 if (
auto ParamAlign = Arg.getParamStackAlign())
11595 MemAlign = *ParamAlign;
11596 else if ((ParamAlign = Arg.getParamAlign()))
11597 MemAlign = *ParamAlign;
11600 if (
Flags.isByRef())
11601 Flags.setByRefSize(MemSize);
11603 Flags.setByValSize(MemSize);
11604 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11605 MemAlign = *ParamAlign;
11607 MemAlign = OriginalAlignment;
11609 Flags.setMemAlign(MemAlign);
11611 if (Arg.hasAttribute(Attribute::Nest))
11614 Flags.setInConsecutiveRegs();
11615 if (ArgCopyElisionCandidates.
count(&Arg))
11616 Flags.setCopyElisionCandidate();
11617 if (Arg.hasAttribute(Attribute::Returned))
11618 Flags.setReturned();
11624 for (
unsigned i = 0; i != NumRegs; ++i) {
11629 Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11631 if (NumRegs > 1 && i == 0)
11632 MyFlags.Flags.setSplit();
11635 MyFlags.Flags.setOrigAlign(
Align(1));
11636 if (i == NumRegs - 1)
11637 MyFlags.Flags.setSplitEnd();
11639 Ins.push_back(MyFlags);
11641 if (NeedsRegBlock &&
Value == NumValues - 1)
11642 Ins[
Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11650 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11654 "LowerFormalArguments didn't return a valid chain!");
11656 "LowerFormalArguments didn't emit the correct number of values!");
11658 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
11659 assert(InVals[i].getNode() &&
11660 "LowerFormalArguments emitted a null value!");
11661 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11662 "LowerFormalArguments emitted a value with the wrong type!");
11679 MVT VT = ValueVTs[0].getSimpleVT();
11681 std::optional<ISD::NodeType> AssertOp;
11684 F.getCallingConv(), AssertOp);
11690 FuncInfo->DemoteRegister = SRetReg;
11692 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11705 unsigned NumValues = ValueVTs.
size();
11706 if (NumValues == 0)
11709 bool ArgHasUses = !Arg.use_empty();
11713 if (Ins[i].
Flags.isCopyElisionCandidate()) {
11714 unsigned NumParts = 0;
11715 for (
EVT VT : ValueVTs)
11717 F.getCallingConv(), VT);
11721 ArrayRef(&InVals[i], NumParts), ArgHasUses);
11726 bool isSwiftErrorArg =
11728 Arg.hasAttribute(Attribute::SwiftError);
11729 if (!ArgHasUses && !isSwiftErrorArg) {
11730 SDB->setUnusedArgValue(&Arg, InVals[i]);
11734 dyn_cast<FrameIndexSDNode>(InVals[i].
getNode()))
11735 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11738 for (
unsigned Val = 0; Val != NumValues; ++Val) {
11739 EVT VT = ValueVTs[Val];
11741 F.getCallingConv(), VT);
11748 if (ArgHasUses || isSwiftErrorArg) {
11749 std::optional<ISD::NodeType> AssertOp;
11750 if (Arg.hasAttribute(Attribute::SExt))
11752 else if (Arg.hasAttribute(Attribute::ZExt))
11756 PartVT, VT,
nullptr, NewRoot,
11757 F.getCallingConv(), AssertOp));
11764 if (ArgValues.
empty())
11769 dyn_cast<FrameIndexSDNode>(ArgValues[0].
getNode()))
11770 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11773 SDB->getCurSDLoc());
11775 SDB->setValue(&Arg, Res);
11788 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11789 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11798 unsigned Reg = cast<RegisterSDNode>(Res.
getOperand(1))->getReg();
11810 unsigned Reg = cast<RegisterSDNode>(Res.
getOperand(1))->getReg();
11817 FuncInfo->InitializeRegForValue(&Arg);
11818 SDB->CopyToExportRegsIfNeeded(&Arg);
11822 if (!Chains.
empty()) {
11829 assert(i == InVals.
size() &&
"Argument register count mismatch!");
11833 if (!ArgCopyElisionFrameIndexMap.
empty()) {
11836 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
11837 if (
I != ArgCopyElisionFrameIndexMap.
end())
11838 VI.updateStackSlot(
I->second);
11853SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
11861 if (!isa<PHINode>(SuccBB->begin()))
continue;
11866 if (!SuccsHandled.
insert(SuccMBB).second)
11874 for (
const PHINode &PN : SuccBB->phis()) {
11876 if (PN.use_empty())
11880 if (PN.getType()->isEmptyTy())
11884 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
11886 if (
const auto *
C = dyn_cast<Constant>(PHIOp)) {
11893 if (
auto *CI = dyn_cast<ConstantInt>(
C))
11905 assert(isa<AllocaInst>(PHIOp) &&
11907 "Didn't codegen value into a register!??");
11917 for (
EVT VT : ValueVTs) {
11919 for (
unsigned i = 0; i != NumRegisters; ++i)
11921 std::make_pair(&*
MBBI++, Reg + i));
11922 Reg += NumRegisters;
11942void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
11944 if (MaybeTC.
getNode() !=
nullptr)
11959 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
11963 if (
Size == 2 &&
W.MBB == SwitchMBB) {
11976 const APInt &SmallValue =
Small.Low->getValue();
11977 const APInt &BigValue =
Big.Low->getValue();
11980 APInt CommonBit = BigValue ^ SmallValue;
11995 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
11997 addSuccessorWithProb(
11998 SwitchMBB, DefaultMBB,
12002 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12025 return a.Prob != b.Prob ?
12027 a.Low->getValue().slt(b.Low->getValue());
12034 if (
I->Prob >
W.LastCluster->Prob)
12036 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12047 UnhandledProbs +=
I->Prob;
12051 bool FallthroughUnreachable =
false;
12053 if (
I ==
W.LastCluster) {
12055 Fallthrough = DefaultMBB;
12056 FallthroughUnreachable = isa<UnreachableInst>(
12060 CurMF->
insert(BBI, Fallthrough);
12064 UnhandledProbs -=
I->Prob;
12074 CurMF->
insert(BBI, JumpMBB);
12076 auto JumpProb =
I->Prob;
12077 auto FallthroughProb = UnhandledProbs;
12085 if (*SI == DefaultMBB) {
12086 JumpProb += DefaultProb / 2;
12087 FallthroughProb -= DefaultProb / 2;
12105 if (FallthroughUnreachable) {
12112 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12113 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12119 JT->Default = Fallthrough;
12122 if (CurMBB == SwitchMBB) {
12145 BTB->
Prob += DefaultProb / 2;
12149 if (FallthroughUnreachable)
12153 if (CurMBB == SwitchMBB) {
12162 if (
I->Low ==
I->High) {
12177 if (FallthroughUnreachable)
12181 CaseBlock CB(
CC, LHS, RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12184 if (CurMBB == SwitchMBB)
12187 SL->SwitchCases.push_back(CB);
12192 CurMBB = Fallthrough;
12196void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12200 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12201 "Clusters not sorted?");
12202 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12204 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12205 SL->computeSplitWorkItemInfo(W);
12210 assert(PivotCluster >
W.FirstCluster);
12211 assert(PivotCluster <=
W.LastCluster);
12226 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12227 FirstLeft->Low ==
W.GE &&
12228 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12229 LeftMBB = FirstLeft->MBB;
12234 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12243 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12244 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12245 RightMBB = FirstRight->MBB;
12250 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12259 if (
W.MBB == SwitchMBB)
12262 SL->SwitchCases.push_back(CB);
12295 unsigned PeeledCaseIndex = 0;
12296 bool SwitchPeeled =
false;
12299 if (
CC.Prob < TopCaseProb)
12301 TopCaseProb =
CC.Prob;
12302 PeeledCaseIndex =
Index;
12303 SwitchPeeled =
true;
12308 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12309 << TopCaseProb <<
"\n");
12319 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12321 nullptr,
nullptr, TopCaseProb.
getCompl()};
12322 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12324 Clusters.erase(PeeledCaseIt);
12327 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12328 <<
CC.Prob <<
"\n");
12332 PeeledCaseProb = TopCaseProb;
12333 return PeeledSwitchMBB;
12336void SelectionDAGBuilder::visitSwitch(
const SwitchInst &SI) {
12340 Clusters.reserve(
SI.getNumCases());
12341 for (
auto I :
SI.cases()) {
12360 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12364 if (Clusters.empty()) {
12365 assert(PeeledSwitchMBB == SwitchMBB);
12367 if (DefaultMBB != NextBlock(SwitchMBB)) {
12376 SL->findBitTestClusters(Clusters, &SI);
12379 dbgs() <<
"Case clusters: ";
12386 C.Low->getValue().print(
dbgs(),
true);
12387 if (
C.Low !=
C.High) {
12389 C.High->getValue().print(
dbgs(),
true);
12396 assert(!Clusters.empty());
12400 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12407 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12409 while (!WorkList.
empty()) {
12411 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12416 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12420 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12424void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12431void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12437 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12448 for (
unsigned i = 0; i != NumElts; ++i)
12449 Mask.push_back(NumElts - 1 - i);
12454void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I) {
12485void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I) {
12510void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12514 unsigned NumValues = ValueVTs.
size();
12515 if (NumValues == 0)
return;
12520 for (
unsigned i = 0; i != NumValues; ++i)
12528void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12535 int64_t
Imm = cast<ConstantInt>(
I.getOperand(2))->getSExtValue();
12552 for (
unsigned i = 0; i < NumElts; ++i)
12581 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12582 "start of copy chain MUST be COPY");
12583 Reg =
MI->getOperand(1).getReg();
12584 MI =
MRI.def_begin(Reg)->getParent();
12586 if (
MI->getOpcode() == TargetOpcode::COPY) {
12587 assert(Reg.isVirtual() &&
"expected COPY of virtual register");
12588 Reg =
MI->getOperand(1).getReg();
12589 assert(Reg.isPhysical() &&
"expected COPY of physical register");
12590 MI =
MRI.def_begin(Reg)->getParent();
12593 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12594 "end of copy chain MUST be INLINEASM_BR");
12602void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12606 cast<CallBrInst>(
I.getParent()->getUniquePredecessor()->getTerminator());
12618 for (
auto &
T : TargetConstraints) {
12619 SDISelAsmOperandInfo OpInfo(
T);
12627 switch (OpInfo.ConstraintType) {
12635 for (
unsigned &Reg : OpInfo.AssignedRegs.Regs) {
12643 SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12646 ResultVTs.
push_back(OpInfo.ConstraintVT);
12655 ResultVTs.
push_back(OpInfo.ConstraintVT);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
unsigned const TargetRegisterInfo * TRI
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< unsigned, TypeSize > > &Regs, const SDValue &N)
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, ISD::MemIndexType &IndexType, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static void findWasmUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const unsigned char *MatcherTable, unsigned &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
static SymbolRef::Type getType(const Symbol *Sym)
support::ulittle16_t & Lo
support::ulittle16_t & Hi
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
This class represents the atomic memcpy intrinsic i.e.
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
LLVM Basic Block Representation.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
const Instruction & back() const
This class represents a no-op cast from one type to another.
bool test(unsigned Idx) const
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
size_type size() const
size - Returns the number of bits in this bitvector.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
This class represents a range of values.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
unsigned getNonMetadataArgCount() const
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Base class for variables.
std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
unsigned getAllocaAddrSpace() const
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
iterator_range< location_op_iterator > getValues() const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
bool isKillLocation() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
Register CreateRegs(const Value *V)
SmallPtrSet< const DbgVariableRecord *, 8 > PreprocessedDVRDeclares
MachineBasicBlock * getMBB(const BasicBlock *BB) const
Register DemoteRegister
DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg allocated to hold a pointer to ...
BitVector DescribedArgs
Bitvector with a bit set if corresponding argument is described in ArgDbgValues.
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
bool isExportedInst(const Value *V) const
isExportedInst - Return true if the specified value is an instruction exported from its block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
Register InitializeRegForValue(const Value *V)
unsigned ExceptionPointerVirtReg
If the current MBB is a landing pad, the exception pointer and exception selector registers are copie...
SmallPtrSet< const DbgDeclareInst *, 8 > PreprocessedDbgDeclares
Collection of dbg.declare instructions handled after argument lowering and before ISel proper.
DenseMap< const Value *, Register > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
unsigned ExceptionSelectorVirtReg
SmallVector< MachineInstr *, 8 > ArgDbgValues
ArgDbgValues - A list of DBG_VALUE instructions created during isel for function arguments that are i...
unsigned getCurrentCallSite()
Get the call site currently being processed, if any. Return zero if none.
void setCurrentCallSite(unsigned Site)
Set the call site currently being processed.
MachineRegisterInfo * RegInfo
Register CreateReg(MVT VT, bool isDivergent=false)
CreateReg - Allocate a single virtual register for the given type.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
DenseMap< const Value *, ISD::NodeType > PreferredExtendType
Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND) for a value.
Register getCatchPadExceptionPointerVReg(const Value *CPI, const TargetRegisterClass *RC)
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
void addStackRoot(int Num, const Constant *Metadata)
addStackRoot - Registers a root that lives on the stack.
Represents flags for the getelementptr instruction/expression.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
bool isEHPad() const
Returns true if the block is a landing pad.
void setIsEHCatchretTarget(bool V=true)
Indicates if this is a target block of a catchret.
void setIsCleanupFuncletEntry(bool V=true)
Indicates if this is the entry block of a cleanup funclet.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setHasPatchPoint(bool s=true)
void setHasStackMap(bool s=true)
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
Description of the location of a variable whose Address is valid and unchanging during function execu...
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
void setCallsUnwindInit(bool b)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void setHasEHCatchret(bool V)
void setCallsEHReturn(bool b)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
auto getInStackSlotVariableDbgInfo()
Returns the collection of variables for which we have debug info and that have been assigned a stack ...
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
bool hasEHFunclets() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
ArrayRef< std::pair< MCRegister, Register > > liveins() const
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Representation for a specific memory location.
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(unsigned VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
void CopyValueToVirtualRegister(const Value *V, unsigned Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, unsigned Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void init(GCFunctionInfo *gfi, AAResults *AA, AssumptionCache *AC, const TargetLibraryInfo *li)
DenseMap< const Constant *, unsigned > ConstantsOut
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
virtual void emitFunctionEntryCode()
SwiftErrorValueTracking * SwiftError
std::unique_ptr< SelectionDAGBuilder > SDB
Targets can subclass this to parameterize the SelectionDAG lowering and instruction selection process...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, MachinePointerInfo SrcPtrInfo) const
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
Help to insert SDNodeFlags automatically in transforming.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
BlockFrequencyInfo * getBFI() const
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, unsigned VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
void addMMRAMetadata(const SDNode *Node, MDNode *MMRA)
Set MMRAMetadata to be associated with Node.
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
bool shouldOptForSize() const
SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
void DeleteNode(SDNode *N)
Remove the specified node from the system.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
ProfileSummaryInfo * getPSI() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getValueType(EVT)
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
const FunctionVarLocs * getFunctionVarLocs() const
Returns the result of the AssignmentTrackingAnalysis pass if it's available, otherwise return nullptr...
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex, int64_t Size, int64_t Offset=-1)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the por...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
void addPCSections(const SDNode *Node, MDNode *MD)
Set PCSections to be associated with Node.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
void clear()
Clear the memory usage of this object.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)
Set the swifterror virtual register in the VRegDefMap for this basic block.
Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a use of a swifterror by an instruction.
Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction.
const Value * getFunctionArg() const
Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
TargetIntrinsicInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps, const Value *, const Value *) const
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual SDValue LowerFormalArguments(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::InputArg > &, const SDLoc &, SelectionDAG &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, const SDLoc &, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array,...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
virtual TargetTransformInfo getTargetTransformInfo(const Function &F) const
Return a TargetTransformInfo for a given function.
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
unsigned getID() const
Return the register class ID number.
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
TypeID
Definitions of all of the base types for the Type system.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SET_FPENV
Sets the current floating-point environment.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ RESET_FPENV
Set floating-point environment to default state.
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ SET_ROUNDING
Set rounding mode.
@ SIGN_EXTEND
Conversion operators.
@ PREALLOCATED_SETUP
PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE with the preallocated call Va...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ BR
Control flow instructions. These all have token chains.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ SSUBO
Same for subtraction.
@ PREALLOCATED_ARG
PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE with the preallocated call Value,...
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ CLEANUPRET
CLEANUPRET - Represents a return from a cleanup block funclet.
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
@ GET_FPENV
Gets the current floating-point environment.
@ SHL
Shift and rotation operations.
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ PCMARKER
PCMARKER - This corresponds to the pcmarker intrinsic.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ CATCHRET
CATCHRET - Represents a return from a catch block funclet.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
VScaleVal_match m_VScale()
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
std::vector< CaseCluster > CaseClusterVector
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
CaseClusterVector::iterator CaseClusterIt
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int popcount(T Value) noexcept
Count the number of set bits in a value.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned succ_size(const MachineBasicBlock *BB)
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
static const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setPointerAddrSpace(unsigned AS)
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< unsigned, 4 > Regs
This list holds the registers assigned to the values.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< std::pair< unsigned, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
bool hasAllowReassociation() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
A cluster of case labels.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)