79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsAMDGPU.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
112using namespace PatternMatch;
113using namespace SwitchCG;
115#define DEBUG_TYPE "isel"
123 cl::desc(
"Insert the experimental `assertalign` node."),
128 cl::desc(
"Generate low-precision inline sequences "
129 "for some float libcalls"),
135 cl::desc(
"Set the case probability threshold for peeling the case from a "
136 "switch statement. A value greater than 100 will void this "
156 const SDValue *Parts,
unsigned NumParts,
159 std::optional<CallingConv::ID>
CC);
168 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
170 std::optional<CallingConv::ID>
CC = std::nullopt,
171 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
175 PartVT, ValueVT,
CC))
182 assert(NumParts > 0 &&
"No parts to assemble!");
193 unsigned RoundBits = PartBits * RoundParts;
194 EVT RoundVT = RoundBits == ValueBits ?
200 if (RoundParts > 2) {
204 PartVT, HalfVT, V, InChain);
215 if (RoundParts < NumParts) {
217 unsigned OddParts = NumParts - RoundParts;
220 OddVT, V, InChain,
CC);
237 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
248 !PartVT.
isVector() &&
"Unexpected split");
260 if (PartEVT == ValueVT)
264 ValueVT.
bitsLT(PartEVT)) {
277 if (ValueVT.
bitsLT(PartEVT)) {
282 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
297 llvm::Attribute::StrictFP)) {
299 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
311 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
312 ValueVT.
bitsLT(PartEVT)) {
321 const Twine &ErrMsg) {
322 const Instruction *
I = dyn_cast_or_null<Instruction>(V);
326 const char *AsmError =
", possible invalid constraint for vector type";
327 if (
const CallInst *CI = dyn_cast<CallInst>(
I))
328 if (CI->isInlineAsm())
340 const SDValue *Parts,
unsigned NumParts,
343 std::optional<CallingConv::ID> CallConv) {
345 assert(NumParts > 0 &&
"No parts to assemble!");
346 const bool IsABIRegCopy = CallConv.has_value();
355 unsigned NumIntermediates;
360 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
361 NumIntermediates, RegisterVT);
365 NumIntermediates, RegisterVT);
368 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
370 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
373 "Part type sizes don't match!");
377 if (NumIntermediates == NumParts) {
380 for (
unsigned i = 0; i != NumParts; ++i)
382 V, InChain, CallConv);
383 }
else if (NumParts > 0) {
386 assert(NumParts % NumIntermediates == 0 &&
387 "Must expand into a divisible number of parts!");
388 unsigned Factor = NumParts / NumIntermediates;
389 for (
unsigned i = 0; i != NumIntermediates; ++i)
391 IntermediateVT, V, InChain, CallConv);
406 DL, BuiltVectorTy, Ops);
412 if (PartEVT == ValueVT)
428 "Cannot narrow, it would be a lossy transformation");
434 if (PartEVT == ValueVT)
459 }
else if (ValueVT.
bitsLT(PartEVT)) {
468 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
499 std::optional<CallingConv::ID> CallConv);
506 unsigned NumParts,
MVT PartVT,
const Value *V,
507 std::optional<CallingConv::ID> CallConv = std::nullopt,
521 unsigned OrigNumParts = NumParts;
523 "Copying to an illegal type!");
529 EVT PartEVT = PartVT;
530 if (PartEVT == ValueVT) {
531 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
540 assert(NumParts == 1 &&
"Do not know what to promote to!");
551 "Unknown mismatch!");
553 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
554 if (PartVT == MVT::x86mmx)
559 assert(NumParts == 1 && PartEVT != ValueVT);
565 "Unknown mismatch!");
568 if (PartVT == MVT::x86mmx)
575 "Failed to tile the value with PartVT!");
578 if (PartEVT != ValueVT) {
580 "scalar-to-vector conversion failed");
589 if (NumParts & (NumParts - 1)) {
592 "Do not know what to expand to!");
594 unsigned RoundBits = RoundParts * PartBits;
595 unsigned OddParts = NumParts - RoundParts;
604 std::reverse(Parts + RoundParts, Parts + NumParts);
606 NumParts = RoundParts;
618 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
619 for (
unsigned i = 0; i < NumParts; i += StepSize) {
620 unsigned ThisBits = StepSize * PartBits / 2;
623 SDValue &Part1 = Parts[i+StepSize/2];
630 if (ThisBits == PartBits && ThisVT != PartVT) {
638 std::reverse(Parts, Parts + OrigNumParts);
655 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
660 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
662 "Cannot widen to illegal type");
665 }
else if (PartEVT != ValueEVT) {
680 Ops.
append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
691 std::optional<CallingConv::ID> CallConv) {
695 const bool IsABIRegCopy = CallConv.has_value();
698 EVT PartEVT = PartVT;
699 if (PartEVT == ValueVT) {
718 TargetLowering::TypeWidenVector) {
736 "lossy conversion of vector to scalar type");
751 unsigned NumIntermediates;
755 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
760 NumIntermediates, RegisterVT);
763 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
765 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
768 "Mixing scalable and fixed vectors when copying in parts");
770 std::optional<ElementCount> DestEltCnt;
780 if (ValueVT == BuiltVectorTy) {
804 for (
unsigned i = 0; i != NumIntermediates; ++i) {
819 if (NumParts == NumIntermediates) {
822 for (
unsigned i = 0; i != NumParts; ++i)
824 }
else if (NumParts > 0) {
827 assert(NumIntermediates != 0 &&
"division by zero");
828 assert(NumParts % NumIntermediates == 0 &&
829 "Must expand into a divisible number of parts!");
830 unsigned Factor = NumParts / NumIntermediates;
831 for (
unsigned i = 0; i != NumIntermediates; ++i)
838 EVT valuevt, std::optional<CallingConv::ID>
CC)
839 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
840 RegCount(1, regs.
size()), CallConv(
CC) {}
844 std::optional<CallingConv::ID>
CC) {
858 for (
unsigned i = 0; i != NumRegs; ++i)
860 RegVTs.push_back(RegisterVT);
889 for (
unsigned i = 0; i != NumRegs; ++i) {
895 *Glue =
P.getValue(2);
898 Chain =
P.getValue(1);
927 EVT FromVT(MVT::Other);
931 }
else if (NumSignBits > 1) {
939 assert(FromVT != MVT::Other);
945 RegisterVT, ValueVT, V, Chain,
CallConv);
975 NumParts, RegisterVT, V,
CallConv, ExtendKind);
981 for (
unsigned i = 0; i != NumRegs; ++i) {
993 if (NumRegs == 1 || Glue)
1004 Chain = Chains[NumRegs-1];
1010 unsigned MatchingIdx,
const SDLoc &dl,
1012 std::vector<SDValue> &Ops)
const {
1017 Flag.setMatchingOp(MatchingIdx);
1026 Flag.setRegClass(RC->
getID());
1037 "No 1:1 mapping from clobbers to regs?");
1045 "If we clobbered the stack pointer, MFI should know about it.");
1054 for (
unsigned i = 0; i != NumRegs; ++i) {
1056 unsigned TheReg =
Regs[Reg++];
1067 unsigned RegCount = std::get<0>(CountAndVT);
1068 MVT RegisterVT = std::get<1>(CountAndVT);
1092 UnusedArgNodeMap.clear();
1094 PendingExports.clear();
1095 PendingConstrainedFP.clear();
1096 PendingConstrainedFPStrict.clear();
1104 DanglingDebugInfoMap.clear();
1111 if (Pending.
empty())
1117 unsigned i = 0, e = Pending.
size();
1118 for (; i != e; ++i) {
1119 assert(Pending[i].getNode()->getNumOperands() > 1);
1120 if (Pending[i].getNode()->getOperand(0) == Root)
1128 if (Pending.
size() == 1)
1147 PendingConstrainedFP.size() +
1148 PendingConstrainedFPStrict.size());
1150 PendingConstrainedFP.end());
1151 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1152 PendingConstrainedFPStrict.end());
1153 PendingConstrainedFP.clear();
1154 PendingConstrainedFPStrict.clear();
1161 PendingExports.append(PendingConstrainedFPStrict.begin(),
1162 PendingConstrainedFPStrict.end());
1163 PendingConstrainedFPStrict.clear();
1164 return updateRoot(PendingExports);
1171 assert(Variable &&
"Missing variable");
1178 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1185 if (!
N.getNode() && isa<Argument>(
Address))
1193 auto *FINode = dyn_cast<FrameIndexSDNode>(
N.getNode());
1194 if (IsParameter && FINode) {
1197 true,
DL, SDNodeOrder);
1198 }
else if (isa<Argument>(
Address)) {
1202 FuncArgumentDbgValueKind::Declare,
N);
1206 true,
DL, SDNodeOrder);
1213 FuncArgumentDbgValueKind::Declare,
N)) {
1215 <<
" (could not emit func-arg dbg_value)\n");
1227 for (
auto It = FnVarLocs->locs_begin(&
I),
End = FnVarLocs->locs_end(&
I);
1229 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1231 if (It->Values.isKillLocation(It->Expr)) {
1237 It->Values.hasArgList())) {
1239 for (
Value *V : It->Values.location_ops())
1242 FnVarLocs->getDILocalVariable(It->VariableID),
1243 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1258 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1259 if (
DPLabel *DPL = dyn_cast<DPLabel>(&DR)) {
1260 assert(DPL->getLabel() &&
"Missing label");
1262 DAG.
getDbgLabel(DPL->getLabel(), DPL->getDebugLoc(), SDNodeOrder);
1269 DPValue &DPV = cast<DPValue>(DR);
1277 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DPV
1286 if (Values.
empty()) {
1294 [](
Value *V) {
return !V || isa<UndefValue>(V); })) {
1302 SDNodeOrder, IsVariadic)) {
1313 if (
I.isTerminator()) {
1314 HandlePHINodesInSuccessorBlocks(
I.getParent());
1318 if (!isa<DbgInfoIntrinsic>(
I))
1324 bool NodeInserted =
false;
1325 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1326 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1328 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1329 DAG, [&](
SDNode *) { NodeInserted =
true; });
1335 !isa<GCStatepointInst>(
I))
1340 auto It = NodeMap.find(&
I);
1341 if (It != NodeMap.end()) {
1343 }
else if (NodeInserted) {
1346 errs() <<
"warning: loosing !pcsections metadata ["
1347 <<
I.getModule()->getName() <<
"]\n";
1356void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1366#define HANDLE_INST(NUM, OPCODE, CLASS) \
1367 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1368#include "llvm/IR/Instruction.def"
1380 for (
const Value *V : Values) {
1405 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1410 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1411 DIVariable *DanglingVariable = DDI.getVariable();
1413 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1415 << printDDI(
nullptr, DDI) <<
"\n");
1421 for (
auto &DDIMI : DanglingDebugInfoMap) {
1422 DanglingDebugInfoVector &DDIV = DDIMI.second;
1426 for (
auto &DDI : DDIV)
1427 if (isMatchingDbgValue(DDI))
1430 erase_if(DDIV, isMatchingDbgValue);
1438 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1439 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1442 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1443 for (
auto &DDI : DDIV) {
1446 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1450 "Expected inlined-at fields to agree");
1459 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1460 FuncArgumentDbgValueKind::Value, Val)) {
1462 << printDDI(V, DDI) <<
"\n");
1469 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1470 << ValSDNodeOrder <<
"\n");
1471 SDV = getDbgValue(Val, Variable, Expr,
DL,
1472 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1477 <<
" in EmitFuncArgumentDbgValue\n");
1479 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1491 DanglingDebugInfo &DDI) {
1496 const Value *OrigV = V;
1500 unsigned SDOrder = DDI.getSDNodeOrder();
1504 bool StackValue =
true;
1513 while (isa<Instruction>(V)) {
1514 const Instruction &VAsInst = *cast<const Instruction>(V);
1529 if (!AdditionalValues.
empty())
1539 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1540 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1548 assert(OrigV &&
"V shouldn't be null");
1553 << printDDI(OrigV, DDI) <<
"\n");
1570 unsigned Order,
bool IsVariadic) {
1575 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1580 for (
const Value *V : Values) {
1582 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1583 isa<ConstantPointerNull>(V)) {
1589 if (
auto *CE = dyn_cast<ConstantExpr>(V))
1590 if (CE->getOpcode() == Instruction::IntToPtr) {
1597 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1608 if (!
N.getNode() && isa<Argument>(V))
1609 N = UnusedArgNodeMap[V];
1613 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1614 FuncArgumentDbgValueKind::Value,
N))
1616 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
1641 bool IsParamOfFunc =
1651 unsigned Reg = VMI->second;
1655 V->getType(), std::nullopt);
1661 unsigned BitsToDescribe = 0;
1663 BitsToDescribe = *VarSize;
1665 BitsToDescribe = Fragment->SizeInBits;
1668 if (
Offset >= BitsToDescribe)
1671 unsigned RegisterSize = RegAndSize.second;
1672 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1673 ? BitsToDescribe -
Offset
1676 Expr,
Offset, FragmentSize);
1680 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, SDNodeOrder);
1699 SDNodeOrder, IsVariadic);
1706 for (
auto &Pair : DanglingDebugInfoMap)
1707 for (
auto &DDI : Pair.second)
1739 if (
N.getNode())
return N;
1781 if (
const Constant *
C = dyn_cast<Constant>(V)) {
1790 if (isa<ConstantPointerNull>(
C)) {
1791 unsigned AS = V->getType()->getPointerAddressSpace();
1799 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
1802 if (isa<UndefValue>(
C) && !V->getType()->isAggregateType())
1806 visit(CE->getOpcode(), *CE);
1808 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1812 if (isa<ConstantStruct>(
C) || isa<ConstantArray>(
C)) {
1814 for (
const Use &U :
C->operands()) {
1820 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1828 dyn_cast<ConstantDataSequential>(
C)) {
1830 for (
unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1834 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1838 if (isa<ArrayType>(CDS->getType()))
1843 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1844 assert((isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C)) &&
1845 "Unknown struct or array constant!");
1849 unsigned NumElts = ValueVTs.
size();
1853 for (
unsigned i = 0; i != NumElts; ++i) {
1854 EVT EltVT = ValueVTs[i];
1855 if (isa<UndefValue>(
C))
1869 if (
const auto *Equiv = dyn_cast<DSOLocalEquivalent>(
C))
1870 return getValue(Equiv->getGlobalValue());
1872 if (
const auto *
NC = dyn_cast<NoCFIValue>(
C))
1875 if (VT == MVT::aarch64svcount) {
1876 assert(
C->isNullValue() &&
"Can only zero this target type!");
1881 VectorType *VecTy = cast<VectorType>(V->getType());
1887 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1888 for (
unsigned i = 0; i != NumElements; ++i)
1894 if (isa<ConstantAggregateZero>(
C)) {
1912 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1921 if (
const Instruction *Inst = dyn_cast<Instruction>(V)) {
1925 Inst->getType(), std::nullopt);
1933 if (
const auto *BB = dyn_cast<BasicBlock>(V))
1939void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
1948 if (IsMSVCCXX || IsCoreCLR)
1975 Value *ParentPad =
I.getCatchSwitchParentPad();
1977 if (isa<ConstantTokenNone>(ParentPad))
1980 SuccessorColor = cast<Instruction>(ParentPad)->
getParent();
1981 assert(SuccessorColor &&
"No parent funclet for catchret!");
1983 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
1992void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2036 if (isa<CleanupPadInst>(Pad)) {
2038 UnwindDests.emplace_back(FuncInfo.
MBBMap[EHPadBB], Prob);
2039 UnwindDests.back().first->setIsEHScopeEntry();
2041 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2044 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2045 UnwindDests.emplace_back(FuncInfo.
MBBMap[CatchPadBB], Prob);
2046 UnwindDests.back().first->setIsEHScopeEntry();
2077 assert(UnwindDests.size() <= 1 &&
2078 "There should be at most one unwind destination for wasm");
2085 if (isa<LandingPadInst>(Pad)) {
2087 UnwindDests.emplace_back(FuncInfo.
MBBMap[EHPadBB], Prob);
2089 }
else if (isa<CleanupPadInst>(Pad)) {
2092 UnwindDests.emplace_back(FuncInfo.
MBBMap[EHPadBB], Prob);
2093 UnwindDests.
back().first->setIsEHScopeEntry();
2094 UnwindDests.back().first->setIsEHFuncletEntry();
2096 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2098 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2099 UnwindDests.emplace_back(FuncInfo.
MBBMap[CatchPadBB], Prob);
2101 if (IsMSVCCXX || IsCoreCLR)
2102 UnwindDests.back().first->setIsEHFuncletEntry();
2104 UnwindDests.back().first->setIsEHScopeEntry();
2106 NewEHPadBB = CatchSwitch->getUnwindDest();
2112 if (BPI && NewEHPadBB)
2114 EHPadBB = NewEHPadBB;
2121 auto UnwindDest =
I.getUnwindDest();
2128 for (
auto &UnwindDest : UnwindDests) {
2129 UnwindDest.first->setIsEHPad();
2130 addSuccessorWithProb(
FuncInfo.
MBB, UnwindDest.first, UnwindDest.second);
2140void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2144void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2158 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2165 const Function *
F =
I.getParent()->getParent();
2184 unsigned NumValues = ValueVTs.
size();
2187 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2188 for (
unsigned i = 0; i != NumValues; ++i) {
2195 if (MemVTs[i] != ValueVTs[i])
2205 MVT::Other, Chains);
2206 }
else if (
I.getNumOperands() != 0) {
2209 unsigned NumValues = ValueVTs.
size();
2213 const Function *
F =
I.getParent()->getParent();
2216 I.getOperand(0)->getType(),
F->getCallingConv(),
2220 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2222 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2226 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2228 for (
unsigned j = 0;
j != NumValues; ++
j) {
2229 EVT VT = ValueVTs[
j];
2241 &Parts[0], NumParts, PartVT, &
I,
CC, ExtendKind);
2248 if (
I.getOperand(0)->getType()->isPointerTy()) {
2250 Flags.setPointerAddrSpace(
2251 cast<PointerType>(
I.getOperand(0)->getType())->getAddressSpace());
2254 if (NeedsRegBlock) {
2255 Flags.setInConsecutiveRegs();
2256 if (j == NumValues - 1)
2257 Flags.setInConsecutiveRegsLast();
2266 for (
unsigned i = 0; i < NumParts; ++i) {
2268 Parts[i].getValueType().getSimpleVT(),
2279 const Function *
F =
I.getParent()->getParent();
2281 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2284 Flags.setSwiftError();
2303 "LowerReturn didn't return a valid chain!");
2314 if (V->getType()->isEmptyTy())
2319 assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2320 "Unused value assigned virtual registers!");
2330 if (!isa<Instruction>(V) && !isa<Argument>(V))
return;
2343 if (
const Instruction *VI = dyn_cast<Instruction>(V)) {
2345 if (VI->getParent() == FromBB)
2354 if (isa<Argument>(V)) {
2371 const BasicBlock *SrcBB = Src->getBasicBlock();
2372 const BasicBlock *DstBB = Dst->getBasicBlock();
2376 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2386 Src->addSuccessorWithoutProb(Dst);
2389 Prob = getEdgeProbability(Src, Dst);
2390 Src->addSuccessor(Dst, Prob);
2396 return I->getParent() == BB;
2416 if (
const CmpInst *BOp = dyn_cast<CmpInst>(
Cond)) {
2420 if (CurBB == SwitchBB ||
2426 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2431 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2437 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2439 SL->SwitchCases.push_back(CB);
2448 SL->SwitchCases.push_back(CB);
2456 unsigned Depth = 0) {
2461 auto *
I = dyn_cast<Instruction>(V);
2465 if (Necessary !=
nullptr) {
2468 if (Necessary->contains(
I))
2476 for (
unsigned OpIdx = 0,
E =
I->getNumOperands(); OpIdx <
E; ++OpIdx)
2487 if (
I.getNumSuccessors() != 2)
2490 if (!
I.isConditional())
2502 if (BPI !=
nullptr) {
2508 std::optional<bool> Likely;
2511 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2515 if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2527 if (CostThresh <= 0)
2541 if (
const auto *RhsI = dyn_cast<Instruction>(Rhs))
2552 Value *BrCond =
I.getCondition();
2553 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2554 for (
const auto *U : Ins->users()) {
2556 if (
auto *UIns = dyn_cast<Instruction>(U))
2557 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2570 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2572 for (
const auto &InsPair : RhsDeps) {
2573 if (!ShouldCountInsn(InsPair.first)) {
2574 ToDrop = InsPair.first;
2578 if (ToDrop ==
nullptr)
2580 RhsDeps.erase(ToDrop);
2583 for (
const auto &InsPair : RhsDeps) {
2591 if (CostOfIncluding > CostThresh)
2617 const Value *BOpOp0, *BOpOp1;
2631 if (BOpc == Instruction::And)
2632 BOpc = Instruction::Or;
2633 else if (BOpc == Instruction::Or)
2634 BOpc = Instruction::And;
2640 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->
hasOneUse();
2645 TProb, FProb, InvertCond);
2655 if (Opc == Instruction::Or) {
2676 auto NewTrueProb = TProb / 2;
2677 auto NewFalseProb = TProb / 2 + FProb;
2680 NewFalseProb, InvertCond);
2687 Probs[1], InvertCond);
2689 assert(Opc == Instruction::And &&
"Unknown merge op!");
2709 auto NewTrueProb = TProb + FProb / 2;
2710 auto NewFalseProb = FProb / 2;
2713 NewFalseProb, InvertCond);
2720 Probs[1], InvertCond);
2729 if (Cases.size() != 2)
return true;
2733 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2734 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2735 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2736 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2742 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2743 Cases[0].
CC == Cases[1].
CC &&
2744 isa<Constant>(Cases[0].CmpRHS) &&
2745 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2746 if (Cases[0].
CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2748 if (Cases[0].
CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2755void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2761 if (
I.isUnconditional()) {
2767 if (Succ0MBB != NextBlock(BrMBB) ||
2780 const Value *CondVal =
I.getCondition();
2800 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2802 BOp->
hasOneUse() && !
I.hasMetadata(LLVMContext::MD_unpredictable)) {
2804 const Value *BOp0, *BOp1;
2807 Opcode = Instruction::And;
2809 Opcode = Instruction::Or;
2817 Opcode, BOp0, BOp1))) {
2819 getEdgeProbability(BrMBB, Succ0MBB),
2820 getEdgeProbability(BrMBB, Succ1MBB),
2825 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2829 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2836 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2842 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2845 SL->SwitchCases.clear();
2851 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc());
2870 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2912 if (cast<ConstantInt>(CB.
CmpLHS)->isMinValue(
true)) {
2933 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2957 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2958 assert(JT.Reg != -1U &&
"Should lower JT Header first!");
2972 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2973 const SDLoc &dl = *JT.SL;
2989 unsigned JumpTableReg =
2992 JumpTableReg, SwitchOp);
2993 JT.Reg = JumpTableReg;
3005 MVT::Other, CopyTo, CMP,
3009 if (JT.MBB != NextBlock(SwitchBB))
3016 if (JT.MBB != NextBlock(SwitchBB))
3043 if (PtrTy != PtrMemTy)
3091 Entry.Node = GuardVal;
3093 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3094 Entry.IsInReg =
true;
3095 Args.push_back(Entry);
3101 getValue(GuardCheckFn), std::move(Args));
3103 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3117 Guard =
DAG.
getLoad(PtrMemTy, dl, Chain, GuardPtr,
3154 TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3185 bool UsePtrType =
false;
3189 for (
unsigned i = 0, e =
B.Cases.size(); i != e; ++i)
3209 if (!
B.FallthroughUnreachable)
3210 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3211 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3215 if (!
B.FallthroughUnreachable) {
3228 if (
MBB != NextBlock(SwitchBB))
3247 if (PopCount == 1) {
3254 }
else if (PopCount == BB.
Range) {
3273 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3275 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3286 if (NextMBB != NextBlock(SwitchBB))
3293void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3304 assert(!
I.hasOperandBundlesOtherThan(
3305 {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3306 LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3307 LLVMContext::OB_cfguardtarget,
3308 LLVMContext::OB_clang_arc_attachedcall}) &&
3309 "Cannot lower invokes with arbitrary operand bundles yet!");
3311 const Value *Callee(
I.getCalledOperand());
3312 const Function *Fn = dyn_cast<Function>(Callee);
3313 if (isa<InlineAsm>(Callee))
3314 visitInlineAsm(
I, EHPadBB);
3319 case Intrinsic::donothing:
3321 case Intrinsic::seh_try_begin:
3322 case Intrinsic::seh_scope_begin:
3323 case Intrinsic::seh_try_end:
3324 case Intrinsic::seh_scope_end:
3330 case Intrinsic::experimental_patchpoint_void:
3331 case Intrinsic::experimental_patchpoint_i64:
3332 visitPatchpoint(
I, EHPadBB);
3334 case Intrinsic::experimental_gc_statepoint:
3337 case Intrinsic::wasm_rethrow: {
3366 if (!isa<GCStatepointInst>(
I)) {
3378 addSuccessorWithProb(InvokeMBB, Return);
3379 for (
auto &UnwindDest : UnwindDests) {
3380 UnwindDest.first->setIsEHPad();
3381 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3390void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3395 assert(!
I.hasOperandBundlesOtherThan(
3396 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3397 "Cannot lower callbrs with arbitrary operand bundles yet!");
3399 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3405 Dests.
insert(
I.getDefaultDest());
3410 for (
unsigned i = 0, e =
I.getNumIndirectDests(); i < e; ++i) {
3413 Target->setIsInlineAsmBrIndirectTarget();
3414 Target->setMachineBlockAddressTaken();
3415 Target->setLabelMustBeEmitted();
3417 if (Dests.
insert(Dest).second)
3428void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3429 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3432void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3434 "Call to landingpad not in landing pad!");
3454 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3484 if (JTB.first.HeaderBB ==
First)
3485 JTB.first.HeaderBB =
Last;
3498 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3500 bool Inserted =
Done.insert(BB).second;
3505 addSuccessorWithProb(IndirectBrMBB, Succ);
3520 if (
const CallInst *Call = dyn_cast_or_null<CallInst>(
I.getPrevNode())) {
3521 if (
Call->doesNotReturn())
3529void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3531 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3532 Flags.copyFMF(*FPOp);
3540void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3542 if (
auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&
I)) {
3543 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3544 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3546 if (
auto *ExactOp = dyn_cast<PossiblyExactOperator>(&
I))
3547 Flags.setExact(ExactOp->isExact());
3548 if (
auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&
I))
3549 Flags.setDisjoint(DisjointOp->isDisjoint());
3550 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3551 Flags.copyFMF(*FPOp);
3560void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3569 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3571 "Unexpected shift type");
3582 dyn_cast<const OverflowingBinaryOperator>(&
I)) {
3583 nuw = OFBinOp->hasNoUnsignedWrap();
3584 nsw = OFBinOp->hasNoSignedWrap();
3587 dyn_cast<const PossiblyExactOperator>(&
I))
3588 exact = ExactOp->isExact();
3591 Flags.setExact(exact);
3592 Flags.setNoSignedWrap(nsw);
3593 Flags.setNoUnsignedWrap(nuw);
3599void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3604 Flags.setExact(isa<PossiblyExactOperator>(&
I) &&
3605 cast<PossiblyExactOperator>(&
I)->isExact());
3610void SelectionDAGBuilder::visitICmp(
const User &
I) {
3612 if (
const ICmpInst *IC = dyn_cast<ICmpInst>(&
I))
3613 predicate = IC->getPredicate();
3614 else if (
const ConstantExpr *IC = dyn_cast<ConstantExpr>(&
I))
3637void SelectionDAGBuilder::visitFCmp(
const User &
I) {
3639 if (
const FCmpInst *FC = dyn_cast<FCmpInst>(&
I))
3640 predicate =
FC->getPredicate();
3641 else if (
const ConstantExpr *FC = dyn_cast<ConstantExpr>(&
I))
3647 auto *FPMO = cast<FPMathOperator>(&
I);
3652 Flags.copyFMF(*FPMO);
3664 return isa<SelectInst>(V);
3668void SelectionDAGBuilder::visitSelect(
const User &
I) {
3672 unsigned NumValues = ValueVTs.
size();
3673 if (NumValues == 0)
return;
3683 bool IsUnaryAbs =
false;
3684 bool Negate =
false;
3687 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3688 Flags.copyFMF(*FPOp);
3690 Flags.setUnpredictable(
3691 cast<SelectInst>(
I).getMetadata(LLVMContext::MD_unpredictable));
3695 EVT VT = ValueVTs[0];
3707 bool UseScalarMinMax = VT.
isVector() &&
3716 switch (SPR.Flavor) {
3722 switch (SPR.NaNBehavior) {
3735 switch (SPR.NaNBehavior) {
3779 for (
unsigned i = 0; i != NumValues; ++i) {
3788 for (
unsigned i = 0; i != NumValues; ++i) {
3802void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3810void SelectionDAGBuilder::visitZExt(
const User &
I) {
3818 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3819 Flags.setNonNeg(PNI->hasNonNeg());
3824 if (
Flags.hasNonNeg() &&
3833void SelectionDAGBuilder::visitSExt(
const User &
I) {
3842void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3853void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3861void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3869void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3877void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
3885void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
3893void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
3907void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
3919void SelectionDAGBuilder::visitBitCast(
const User &
I) {
3927 if (DestVT !=
N.getValueType())
3934 else if(
ConstantInt *
C = dyn_cast<ConstantInt>(
I.getOperand(0)))
3941void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
3943 const Value *SV =
I.getOperand(0);
3948 unsigned DestAS =
I.getType()->getPointerAddressSpace();
3956void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
3964 InVec, InVal, InIdx));
3967void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
3977void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
3981 if (
auto *SVI = dyn_cast<ShuffleVectorInst>(&
I))
3982 Mask = SVI->getShuffleMask();
3984 Mask = cast<ConstantExpr>(
I).getShuffleMask();
3990 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4006 unsigned MaskNumElts =
Mask.size();
4008 if (SrcNumElts == MaskNumElts) {
4014 if (SrcNumElts < MaskNumElts) {
4018 if (MaskNumElts % SrcNumElts == 0) {
4022 unsigned NumConcat = MaskNumElts / SrcNumElts;
4023 bool IsConcat =
true;
4025 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4031 if ((
Idx % SrcNumElts != (i % SrcNumElts)) ||
4032 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4033 ConcatSrcs[i / SrcNumElts] != (
int)(
Idx / SrcNumElts))) {
4038 ConcatSrcs[i / SrcNumElts] =
Idx / SrcNumElts;
4045 for (
auto Src : ConcatSrcs) {
4058 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4059 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4076 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4078 if (
Idx >= (
int)SrcNumElts)
4079 Idx -= SrcNumElts - PaddedMaskNumElts;
4087 if (MaskNumElts != PaddedMaskNumElts)
4095 if (SrcNumElts > MaskNumElts) {
4098 int StartIdx[2] = { -1, -1 };
4099 bool CanExtract =
true;
4100 for (
int Idx : Mask) {
4105 if (
Idx >= (
int)SrcNumElts) {
4114 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4115 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4119 StartIdx[Input] = NewStartIdx;
4122 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4128 for (
unsigned Input = 0; Input < 2; ++Input) {
4129 SDValue &Src = Input == 0 ? Src1 : Src2;
4130 if (StartIdx[Input] < 0)
4140 for (
int &
Idx : MappedOps) {
4141 if (
Idx >= (
int)SrcNumElts)
4142 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4157 for (
int Idx : Mask) {
4163 SDValue &Src =
Idx < (int)SrcNumElts ? Src1 : Src2;
4164 if (
Idx >= (
int)SrcNumElts)
Idx -= SrcNumElts;
4178 const Value *Op0 =
I.getOperand(0);
4179 const Value *Op1 =
I.getOperand(1);
4180 Type *AggTy =
I.getType();
4182 bool IntoUndef = isa<UndefValue>(Op0);
4183 bool FromUndef = isa<UndefValue>(Op1);
4193 unsigned NumAggValues = AggValueVTs.
size();
4194 unsigned NumValValues = ValValueVTs.
size();
4198 if (!NumAggValues) {
4206 for (; i != LinearIndex; ++i)
4207 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4212 for (; i != LinearIndex + NumValValues; ++i)
4213 Values[i] = FromUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4217 for (; i != NumAggValues; ++i)
4218 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4227 const Value *Op0 =
I.getOperand(0);
4229 Type *ValTy =
I.getType();
4230 bool OutOfUndef = isa<UndefValue>(Op0);
4238 unsigned NumValValues = ValValueVTs.
size();
4241 if (!NumValValues) {
4250 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4251 Values[i - LinearIndex] =
4260void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4261 Value *Op0 =
I.getOperand(0);
4271 bool IsVectorGEP =
I.getType()->isVectorTy();
4273 IsVectorGEP ? cast<VectorType>(
I.getType())->getElementCount()
4276 if (IsVectorGEP && !
N.getValueType().isVector()) {
4284 const Value *
Idx = GTI.getOperand();
4285 if (
StructType *StTy = GTI.getStructTypeOrNull()) {
4286 unsigned Field = cast<Constant>(
Idx)->getUniqueInteger().getZExtValue();
4295 if (int64_t(
Offset) >= 0 && cast<GEPOperator>(
I).isInBounds())
4296 Flags.setNoUnsignedWrap(
true);
4312 bool ElementScalable = ElementSize.
isScalable();
4316 const auto *
C = dyn_cast<Constant>(
Idx);
4317 if (
C && isa<VectorType>(
C->getType()))
4318 C =
C->getSplatValue();
4320 const auto *CI = dyn_cast_or_null<ConstantInt>(
C);
4321 if (CI && CI->isZero())
4323 if (CI && !ElementScalable) {
4337 Flags.setNoUnsignedWrap(
true);
4350 VectorElementCount);
4358 if (ElementScalable) {
4359 EVT VScaleTy =
N.getValueType().getScalarType();
4369 if (ElementMul != 1) {
4370 if (ElementMul.isPowerOf2()) {
4371 unsigned Amt = ElementMul.logBase2();
4373 N.getValueType(), IdxN,
4379 N.getValueType(), IdxN, Scale);
4385 N.getValueType(),
N, IdxN);
4396 if (PtrMemTy != PtrTy && !cast<GEPOperator>(
I).isInBounds())
4402void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4409 Type *Ty =
I.getAllocatedType();
4413 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4437 if (*Alignment <= StackAlign)
4438 Alignment = std::nullopt;
4445 Flags.setNoUnsignedWrap(
true);
4455 DAG.
getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4471 if (!
I.hasMetadata(LLVMContext::MD_noundef))
4473 return I.getMetadata(LLVMContext::MD_range);
4476void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4478 return visitAtomicLoad(
I);
4481 const Value *SV =
I.getOperand(0);
4485 if (
const Argument *Arg = dyn_cast<Argument>(SV)) {
4486 if (Arg->hasSwiftErrorAttr())
4487 return visitLoadFromSwiftError(
I);
4490 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4491 if (Alloca->isSwiftError())
4492 return visitLoadFromSwiftError(
I);
4498 Type *Ty =
I.getType();
4502 unsigned NumValues = ValueVTs.
size();
4506 Align Alignment =
I.getAlign();
4509 bool isVolatile =
I.isVolatile();
4514 bool ConstantMemory =
false;
4527 ConstantMemory =
true;
4542 unsigned ChainI = 0;
4543 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4566 MMOFlags, AAInfo, Ranges);
4567 Chains[ChainI] =
L.getValue(1);
4569 if (MemVTs[i] != ValueVTs[i])
4575 if (!ConstantMemory) {
4588void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4590 "call visitStoreToSwiftError when backend supports swifterror");
4594 const Value *SrcV =
I.getOperand(0);
4596 SrcV->
getType(), ValueVTs, &Offsets, 0);
4597 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4598 "expect a single EVT for swifterror");
4607 SDValue(Src.getNode(), Src.getResNo()));
4611void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4613 "call visitLoadFromSwiftError when backend supports swifterror");
4616 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4617 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4618 "Support volatile, non temporal, invariant for load_from_swift_error");
4620 const Value *SV =
I.getOperand(0);
4621 Type *Ty =
I.getType();
4626 I.getAAMetadata()))) &&
4627 "load_from_swift_error should not be constant memory");
4632 ValueVTs, &Offsets, 0);
4633 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4634 "expect a single EVT for swifterror");
4644void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4646 return visitAtomicStore(
I);
4648 const Value *SrcV =
I.getOperand(0);
4649 const Value *PtrV =
I.getOperand(1);
4655 if (
const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4656 if (Arg->hasSwiftErrorAttr())
4657 return visitStoreToSwiftError(
I);
4660 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4661 if (Alloca->isSwiftError())
4662 return visitStoreToSwiftError(
I);
4669 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4670 unsigned NumValues = ValueVTs.
size();
4683 Align Alignment =
I.getAlign();
4688 unsigned ChainI = 0;
4689 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4706 if (MemVTs[i] != ValueVTs[i])
4709 DAG.
getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4710 Chains[ChainI] = St;
4719void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4720 bool IsCompressing) {
4726 Src0 =
I.getArgOperand(0);
4727 Ptr =
I.getArgOperand(1);
4728 Alignment = cast<ConstantInt>(
I.getArgOperand(2))->getAlignValue();
4729 Mask =
I.getArgOperand(3);
4734 Src0 =
I.getArgOperand(0);
4735 Ptr =
I.getArgOperand(1);
4736 Mask =
I.getArgOperand(2);
4737 Alignment =
I.getParamAlign(1).valueOrOne();
4740 Value *PtrOperand, *MaskOperand, *Src0Operand;
4743 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4745 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4787 assert(
Ptr->getType()->isVectorTy() &&
"Unexpected pointer type");
4790 if (
auto *
C = dyn_cast<Constant>(
Ptr)) {
4791 C =
C->getSplatValue();
4797 ElementCount NumElts = cast<VectorType>(
Ptr->getType())->getElementCount();
4806 if (!
GEP ||
GEP->getParent() != CurBB)
4809 if (
GEP->getNumOperands() != 2)
4812 const Value *BasePtr =
GEP->getPointerOperand();
4813 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
4819 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
4824 if (ScaleVal != 1 &&
4837void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
4845 Align Alignment = cast<ConstantInt>(
I.getArgOperand(2))
4846 ->getMaybeAlignValue()
4857 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
4879 Ops, MMO, IndexType,
false);
4884void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
4890 Ptr =
I.getArgOperand(0);
4891 Alignment = cast<ConstantInt>(
I.getArgOperand(1))->getAlignValue();
4892 Mask =
I.getArgOperand(2);
4893 Src0 =
I.getArgOperand(3);
4898 Ptr =
I.getArgOperand(0);
4899 Alignment =
I.getParamAlign(0).valueOrOne();
4900 Mask =
I.getArgOperand(1);
4901 Src0 =
I.getArgOperand(2);
4904 Value *PtrOperand, *MaskOperand, *Src0Operand;
4907 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4909 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4938void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
4948 Align Alignment = cast<ConstantInt>(
I.getArgOperand(1))
4949 ->getMaybeAlignValue()
4961 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
5011 dl, MemVT, VTs, InChain,
5022void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5025 switch (
I.getOperation()) {
5074void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5088void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5107 I.getAlign(),
AAMDNodes(),
nullptr, SSID, Order);
5123void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5144 I.getAlign(),
AAMDNodes(),
nullptr, SSID, Ordering);
5160void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5161 unsigned Intrinsic) {
5166 bool HasChain = !
F->doesNotAccessMemory();
5167 bool OnlyLoad = HasChain &&
F->onlyReadsMemory();
5194 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5195 const Value *Arg =
I.getArgOperand(i);
5196 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5203 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5204 assert(CI->getBitWidth() <= 64 &&
5205 "large intrinsic immediates not handled");
5223 if (
auto *FPMO = dyn_cast<FPMathOperator>(&
I))
5224 Flags.copyFMF(*FPMO);
5231 auto *Token = Bundle->Inputs[0].get();
5233 assert(Ops.
back().getValueType() != MVT::Glue &&
5234 "Did not expected another glue node here.");
5242 if (IsTgtIntrinsic) {
5250 else if (
Info.fallbackAddressSpace)
5254 Info.size,
I.getAAMetadata());
5255 }
else if (!HasChain) {
5257 }
else if (!
I.getType()->isVoidTy()) {
5271 if (!
I.getType()->isVoidTy()) {
5272 if (!isa<VectorType>(
I.getType()))
5344 SDValue TwoToFractionalPartOfX;
5421 if (
Op.getValueType() == MVT::f32 &&
5445 if (
Op.getValueType() == MVT::f32 &&
5544 if (
Op.getValueType() == MVT::f32 &&
5628 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5641 if (
Op.getValueType() == MVT::f32 &&
5718 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5729 if (
Op.getValueType() == MVT::f32 &&
5742 bool IsExp10 =
false;
5743 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5747 IsExp10 = LHSC->isExactlyValue(Ten);
5774 unsigned Val = RHSC->getSExtValue();
5803 CurSquare, CurSquare);
5808 if (RHSC->getSExtValue() < 0)
5822 EVT VT =
LHS.getValueType();
5845 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
5849 Opcode, VT, ScaleInt);
5884 switch (
N.getOpcode()) {
5887 Regs.emplace_back(cast<RegisterSDNode>(
Op)->
getReg(),
5888 Op.getValueType().getSizeInBits());
5913bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5916 const Argument *Arg = dyn_cast<Argument>(V);
5930 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
5937 auto *NewDIExpr = FragExpr;
5944 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
5947 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
5948 return BuildMI(MF,
DL, Inst, Indirect, Reg, Variable, FragExpr);
5952 if (Kind == FuncArgumentDbgValueKind::Value) {
5957 if (!IsInEntryBlock)
5973 bool VariableIsFunctionInputArg = Variable->
isParameter() &&
5974 !
DL->getInlinedAt();
5976 if (!IsInPrologue && !VariableIsFunctionInputArg)
6007 if (VariableIsFunctionInputArg) {
6017 bool IsIndirect =
false;
6018 std::optional<MachineOperand>
Op;
6021 if (FI != std::numeric_limits<int>::max())
6025 if (!
Op &&
N.getNode()) {
6028 if (ArgRegsAndSizes.
size() == 1)
6029 Reg = ArgRegsAndSizes.
front().first;
6031 if (Reg &&
Reg.isVirtual()) {
6039 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6043 if (!
Op &&
N.getNode()) {
6048 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
6057 for (
const auto &RegAndSize : SplitRegs) {
6061 int RegFragmentSizeInBits = RegAndSize.second;
6063 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6066 if (
Offset >= ExprFragmentSizeInBits)
6070 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6071 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6076 Expr,
Offset, RegFragmentSizeInBits);
6077 Offset += RegAndSize.second;
6080 if (!FragmentExpr) {
6087 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6088 Kind != FuncArgumentDbgValueKind::Value);
6099 V->getType(), std::nullopt);
6100 if (RFV.occupiesMultipleRegs()) {
6101 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6106 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6107 }
else if (ArgRegsAndSizes.
size() > 1) {
6110 splitMultiRegDbgValue(ArgRegsAndSizes);
6119 "Expected inlined-at fields to agree");
6123 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6125 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6138 unsigned DbgSDNodeOrder) {
6139 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
6151 false, dl, DbgSDNodeOrder);
6154 false, dl, DbgSDNodeOrder);
6158 switch (Intrinsic) {
6159 case Intrinsic::smul_fix:
6161 case Intrinsic::umul_fix:
6163 case Intrinsic::smul_fix_sat:
6165 case Intrinsic::umul_fix_sat:
6167 case Intrinsic::sdiv_fix:
6169 case Intrinsic::udiv_fix:
6171 case Intrinsic::sdiv_fix_sat:
6173 case Intrinsic::udiv_fix_sat:
6180void SelectionDAGBuilder::lowerCallToExternalSymbol(
const CallInst &
I,
6181 const char *FunctionName) {
6182 assert(FunctionName &&
"FunctionName must not be nullptr");
6192 assert(cast<CallBase>(PreallocatedSetup)
6195 "expected call_preallocated_setup Value");
6196 for (
const auto *U : PreallocatedSetup->
users()) {
6197 auto *UseCall = cast<CallBase>(U);
6198 const Function *Fn = UseCall->getCalledFunction();
6199 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6209bool SelectionDAGBuilder::visitEntryValueDbgValue(
6216 const Argument *Arg = cast<Argument>(Values[0]);
6222 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6223 "couldn't find an associated register for the Argument\n");
6226 Register ArgVReg = ArgIt->getSecond();
6229 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6231 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6235 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6236 "couldn't find a physical register\n");
6241void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6242 unsigned Intrinsic) {
6244 switch (Intrinsic) {
6245 case Intrinsic::experimental_convergence_anchor:
6248 case Intrinsic::experimental_convergence_entry:
6251 case Intrinsic::experimental_convergence_loop: {
6253 auto *Token = Bundle->Inputs[0].get();
6262void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6263 unsigned Intrinsic) {
6270 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
6271 Flags.copyFMF(*FPOp);
6273 switch (Intrinsic) {
6276 visitTargetIntrinsic(
I, Intrinsic);
6278 case Intrinsic::vscale: {
6283 case Intrinsic::vastart: visitVAStart(
I);
return;
6284 case Intrinsic::vaend: visitVAEnd(
I);
return;
6285 case Intrinsic::vacopy: visitVACopy(
I);
return;
6286 case Intrinsic::returnaddress:
6291 case Intrinsic::addressofreturnaddress:
6296 case Intrinsic::sponentry:
6301 case Intrinsic::frameaddress:
6306 case Intrinsic::read_volatile_register:
6307 case Intrinsic::read_register: {
6311 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6319 case Intrinsic::write_register: {
6321 Value *RegValue =
I.getArgOperand(1);
6324 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6329 case Intrinsic::memcpy: {
6330 const auto &MCI = cast<MemCpyInst>(
I);
6335 Align DstAlign = MCI.getDestAlign().valueOrOne();
6336 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6337 Align Alignment = std::min(DstAlign, SrcAlign);
6338 bool isVol = MCI.isVolatile();
6344 Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6347 updateDAGForMaybeTailCall(MC);
6350 case Intrinsic::memcpy_inline: {
6351 const auto &MCI = cast<MemCpyInlineInst>(
I);
6355 assert(isa<ConstantSDNode>(
Size) &&
"memcpy_inline needs constant size");
6357 Align DstAlign = MCI.getDestAlign().valueOrOne();
6358 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6359 Align Alignment = std::min(DstAlign, SrcAlign);
6360 bool isVol = MCI.isVolatile();
6368 updateDAGForMaybeTailCall(MC);
6371 case Intrinsic::memset: {
6372 const auto &MSI = cast<MemSetInst>(
I);
6377 Align Alignment = MSI.getDestAlign().valueOrOne();
6378 bool isVol = MSI.isVolatile();
6382 Root, sdl, Op1, Op2, Op3, Alignment, isVol,
false,
6384 updateDAGForMaybeTailCall(MS);
6387 case Intrinsic::memset_inline: {
6388 const auto &MSII = cast<MemSetInlineInst>(
I);
6392 assert(isa<ConstantSDNode>(
Size) &&
"memset_inline needs constant size");
6394 Align DstAlign = MSII.getDestAlign().valueOrOne();
6395 bool isVol = MSII.isVolatile();
6402 updateDAGForMaybeTailCall(MC);
6405 case Intrinsic::memmove: {
6406 const auto &MMI = cast<MemMoveInst>(
I);
6411 Align DstAlign = MMI.getDestAlign().valueOrOne();
6412 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6413 Align Alignment = std::min(DstAlign, SrcAlign);
6414 bool isVol = MMI.isVolatile();
6422 I.getAAMetadata(),
AA);
6423 updateDAGForMaybeTailCall(MM);
6426 case Intrinsic::memcpy_element_unordered_atomic: {
6432 Type *LengthTy =
MI.getLength()->getType();
6433 unsigned ElemSz =
MI.getElementSizeInBytes();
6439 updateDAGForMaybeTailCall(MC);
6442 case Intrinsic::memmove_element_unordered_atomic: {
6443 auto &
MI = cast<AtomicMemMoveInst>(
I);
6448 Type *LengthTy =
MI.getLength()->getType();
6449 unsigned ElemSz =
MI.getElementSizeInBytes();
6455 updateDAGForMaybeTailCall(MC);
6458 case Intrinsic::memset_element_unordered_atomic: {
6459 auto &
MI = cast<AtomicMemSetInst>(
I);
6464 Type *LengthTy =
MI.getLength()->getType();
6465 unsigned ElemSz =
MI.getElementSizeInBytes();
6470 updateDAGForMaybeTailCall(MC);
6473 case Intrinsic::call_preallocated_setup: {
6482 case Intrinsic::call_preallocated_arg: {
6497 case Intrinsic::dbg_declare: {
6498 const auto &DI = cast<DbgDeclareInst>(
I);
6501 if (AssignmentTrackingEnabled ||
6504 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DI <<
"\n");
6510 assert(!DI.hasArgList() &&
"Only dbg.value should currently use DIArgList");
6515 case Intrinsic::dbg_label: {
6518 assert(Label &&
"Missing label");
6525 case Intrinsic::dbg_assign: {
6527 if (AssignmentTrackingEnabled)
6533 case Intrinsic::dbg_value: {
6535 if (AssignmentTrackingEnabled)
6555 SDNodeOrder, IsVariadic))
6561 case Intrinsic::eh_typeid_for: {
6570 case Intrinsic::eh_return_i32:
6571 case Intrinsic::eh_return_i64:
6579 case Intrinsic::eh_unwind_init:
6582 case Intrinsic::eh_dwarf_cfa:
6587 case Intrinsic::eh_sjlj_callsite: {
6589 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(0));
6595 case Intrinsic::eh_sjlj_functioncontext: {
6599 cast<AllocaInst>(
I.getArgOperand(0)->stripPointerCasts());
6604 case Intrinsic::eh_sjlj_setjmp: {
6614 case Intrinsic::eh_sjlj_longjmp:
6618 case Intrinsic::eh_sjlj_setup_dispatch:
6622 case Intrinsic::masked_gather:
6623 visitMaskedGather(
I);
6625 case Intrinsic::masked_load:
6628 case Intrinsic::masked_scatter:
6629 visitMaskedScatter(
I);
6631 case Intrinsic::masked_store:
6632 visitMaskedStore(
I);
6634 case Intrinsic::masked_expandload:
6635 visitMaskedLoad(
I,
true );
6637 case Intrinsic::masked_compressstore:
6638 visitMaskedStore(
I,
true );
6640 case Intrinsic::powi:
6644 case Intrinsic::log:
6647 case Intrinsic::log2:
6651 case Intrinsic::log10:
6655 case Intrinsic::exp:
6658 case Intrinsic::exp2:
6662 case Intrinsic::pow:
6666 case Intrinsic::sqrt:
6667 case Intrinsic::fabs:
6668 case Intrinsic::sin:
6669 case Intrinsic::cos:
6670 case Intrinsic::exp10:
6671 case Intrinsic::floor:
6672 case Intrinsic::ceil:
6673 case Intrinsic::trunc:
6674 case Intrinsic::rint:
6675 case Intrinsic::nearbyint:
6676 case Intrinsic::round:
6677 case Intrinsic::roundeven:
6678 case Intrinsic::canonicalize: {
6680 switch (Intrinsic) {
6682 case Intrinsic::sqrt: Opcode =
ISD::FSQRT;
break;
6683 case Intrinsic::fabs: Opcode =
ISD::FABS;
break;
6684 case Intrinsic::sin: Opcode =
ISD::FSIN;
break;
6685 case Intrinsic::cos: Opcode =
ISD::FCOS;
break;
6686 case Intrinsic::exp10: Opcode =
ISD::FEXP10;
break;
6687 case Intrinsic::floor: Opcode =
ISD::FFLOOR;
break;
6688 case Intrinsic::ceil: Opcode =
ISD::FCEIL;
break;
6689 case Intrinsic::trunc: Opcode =
ISD::FTRUNC;
break;
6690 case Intrinsic::rint: Opcode =
ISD::FRINT;
break;
6692 case Intrinsic::round: Opcode =
ISD::FROUND;
break;
6702 case Intrinsic::lround:
6703 case Intrinsic::llround:
6704 case Intrinsic::lrint:
6705 case Intrinsic::llrint: {
6707 switch (Intrinsic) {
6709 case Intrinsic::lround: Opcode =
ISD::LROUND;
break;
6711 case Intrinsic::lrint: Opcode =
ISD::LRINT;
break;
6712 case Intrinsic::llrint: Opcode =
ISD::LLRINT;
break;
6720 case Intrinsic::minnum:
6726 case Intrinsic::maxnum:
6732 case Intrinsic::minimum:
6738 case Intrinsic::maximum:
6744 case Intrinsic::copysign:
6750 case Intrinsic::ldexp:
6756 case Intrinsic::frexp: {
6764 case Intrinsic::arithmetic_fence: {
6770 case Intrinsic::fma:
6776#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6777 case Intrinsic::INTRINSIC:
6778#include "llvm/IR/ConstrainedOps.def"
6779 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(
I));
6781#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6782#include "llvm/IR/VPIntrinsics.def"
6783 visitVectorPredicationIntrinsic(cast<VPIntrinsic>(
I));
6785 case Intrinsic::fptrunc_round: {
6788 Metadata *MD = cast<MetadataAsValue>(
I.getArgOperand(1))->getMetadata();
6789 std::optional<RoundingMode> RoundMode =
6796 Flags.copyFMF(*cast<FPMathOperator>(&
I));
6808 case Intrinsic::fmuladd: {
6829 case Intrinsic::convert_to_fp16:
6836 case Intrinsic::convert_from_fp16:
6842 case Intrinsic::fptosi_sat: {
6849 case Intrinsic::fptoui_sat: {
6856 case Intrinsic::set_rounding:
6862 case Intrinsic::is_fpclass: {
6867 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
6872 Flags.setNoFPExcept(
6873 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
6888 case Intrinsic::get_fpenv: {
6903 int SPFI = cast<FrameIndexSDNode>(Temp.
getNode())->getIndex();
6910 Res =
DAG.
getLoad(EnvVT, sdl, Chain, Temp, MPI);
6916 case Intrinsic::set_fpenv: {
6930 int SPFI = cast<FrameIndexSDNode>(Temp.
getNode())->getIndex();
6933 Chain =
DAG.
getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
6943 case Intrinsic::reset_fpenv:
6946 case Intrinsic::get_fpmode:
6955 case Intrinsic::set_fpmode:
6960 case Intrinsic::reset_fpmode: {
6965 case Intrinsic::pcmarker: {
6970 case Intrinsic::readcyclecounter: {
6978 case Intrinsic::readsteadycounter: {
6986 case Intrinsic::bitreverse:
6991 case Intrinsic::bswap:
6996 case Intrinsic::cttz: {
6998 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(1));
7004 case Intrinsic::ctlz: {
7006 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(1));
7012 case Intrinsic::ctpop: {
7018 case Intrinsic::fshl:
7019 case Intrinsic::fshr: {
7020 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7024 EVT VT =
X.getValueType();
7035 case Intrinsic::sadd_sat: {
7041 case Intrinsic::uadd_sat: {
7047 case Intrinsic::ssub_sat: {
7053 case Intrinsic::usub_sat: {
7059 case Intrinsic::sshl_sat: {
7065 case Intrinsic::ushl_sat: {
7071 case Intrinsic::smul_fix:
7072 case Intrinsic::umul_fix:
7073 case Intrinsic::smul_fix_sat:
7074 case Intrinsic::umul_fix_sat: {
7082 case Intrinsic::sdiv_fix:
7083 case Intrinsic::udiv_fix:
7084 case Intrinsic::sdiv_fix_sat:
7085 case Intrinsic::udiv_fix_sat: {
7090 Op1, Op2, Op3,
DAG, TLI));
7093 case Intrinsic::smax: {
7099 case Intrinsic::smin: {
7105 case Intrinsic::umax: {
7111 case Intrinsic::umin: {
7117 case Intrinsic::abs: {
7123 case Intrinsic::stacksave: {
7131 case Intrinsic::stackrestore:
7135 case Intrinsic::get_dynamic_area_offset: {
7150 case Intrinsic::stackguard: {
7171 case Intrinsic::stackprotector: {
7192 Chain, sdl, Src, FIN,
7199 case Intrinsic::objectsize:
7202 case Intrinsic::is_constant:
7205 case Intrinsic::annotation:
7206 case Intrinsic::ptr_annotation:
7207 case Intrinsic::launder_invariant_group:
7208 case Intrinsic::strip_invariant_group:
7213 case Intrinsic::assume:
7214 case Intrinsic::experimental_noalias_scope_decl:
7215 case Intrinsic::var_annotation:
7216 case Intrinsic::sideeffect:
7221 case Intrinsic::codeview_annotation: {
7226 Metadata *MD = cast<MetadataAsValue>(
I.getArgOperand(0))->getMetadata();
7233 case Intrinsic::init_trampoline: {
7234 const Function *
F = cast<Function>(
I.getArgOperand(1)->stripPointerCasts());
7249 case Intrinsic::adjust_trampoline:
7254 case Intrinsic::gcroot: {
7256 "only valid in functions with gc specified, enforced by Verifier");
7258 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7259 const Constant *TypeMap = cast<Constant>(
I.getArgOperand(1));
7265 case Intrinsic::gcread:
7266 case Intrinsic::gcwrite:
7268 case Intrinsic::get_rounding:
7274 case Intrinsic::expect:
7279 case Intrinsic::ubsantrap:
7280 case Intrinsic::debugtrap:
7281 case Intrinsic::trap: {
7283 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7284 if (TrapFuncName.
empty()) {
7285 switch (Intrinsic) {
7286 case Intrinsic::trap:
7289 case Intrinsic::debugtrap:
7292 case Intrinsic::ubsantrap:
7296 cast<ConstantInt>(
I.getArgOperand(0))->getZExtValue(), sdl,
7304 if (Intrinsic == Intrinsic::ubsantrap) {
7306 Args[0].Val =
I.getArgOperand(0);
7308 Args[0].Ty =
Args[0].Val->getType();
7312 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7323 case Intrinsic::uadd_with_overflow:
7324 case Intrinsic::sadd_with_overflow:
7325 case Intrinsic::usub_with_overflow:
7326 case Intrinsic::ssub_with_overflow:
7327 case Intrinsic::umul_with_overflow:
7328 case Intrinsic::smul_with_overflow: {
7330 switch (Intrinsic) {
7332 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7333 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7334 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7335 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7336 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7337 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7343 EVT OverflowVT = MVT::i1;
7352 case Intrinsic::prefetch: {
7354 unsigned rw = cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue();
7367 std::nullopt, Flags);
7376 case Intrinsic::lifetime_start:
7377 case Intrinsic::lifetime_end: {
7378 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7383 const int64_t ObjectSize =
7384 cast<ConstantInt>(
I.getArgOperand(0))->getSExtValue();
7389 for (
const Value *Alloca : Allocas) {
7390 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7393 if (!LifetimeObject)
7413 case Intrinsic::pseudoprobe: {
7414 auto Guid = cast<ConstantInt>(
I.getArgOperand(0))->getZExtValue();
7415 auto Index = cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue();
7416 auto Attr = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
7421 case Intrinsic::invariant_start:
7426 case Intrinsic::invariant_end:
7429 case Intrinsic::clear_cache:
7432 lowerCallToExternalSymbol(
I, FunctionName);
7434 case Intrinsic::donothing:
7435 case Intrinsic::seh_try_begin:
7436 case Intrinsic::seh_scope_begin:
7437 case Intrinsic::seh_try_end:
7438 case Intrinsic::seh_scope_end:
7441 case Intrinsic::experimental_stackmap:
7444 case Intrinsic::experimental_patchpoint_void:
7445 case Intrinsic::experimental_patchpoint_i64:
7448 case Intrinsic::experimental_gc_statepoint:
7451 case Intrinsic::experimental_gc_result:
7452 visitGCResult(cast<GCResultInst>(
I));
7454 case Intrinsic::experimental_gc_relocate:
7455 visitGCRelocate(cast<GCRelocateInst>(
I));
7457 case Intrinsic::instrprof_cover:
7459 case Intrinsic::instrprof_increment:
7461 case Intrinsic::instrprof_timestamp:
7463 case Intrinsic::instrprof_value_profile:
7465 case Intrinsic::instrprof_mcdc_parameters:
7467 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7469 case Intrinsic::instrprof_mcdc_condbitmap_update:
7471 case Intrinsic::localescape: {
7477 for (
unsigned Idx = 0,
E =
I.arg_size();
Idx <
E; ++
Idx) {
7478 Value *Arg =
I.getArgOperand(
Idx)->stripPointerCasts();
7479 if (isa<ConstantPointerNull>(Arg))
7483 "can only escape static allocas");
7489 TII->get(TargetOpcode::LOCAL_ESCAPE))
7497 case Intrinsic::localrecover: {
7502 auto *Fn = cast<Function>(
I.getArgOperand(0)->stripPointerCasts());
7503 auto *
Idx = cast<ConstantInt>(
I.getArgOperand(2));
7505 unsigned(
Idx->getLimitedValue(std::numeric_limits<int>::max()));
7527 case Intrinsic::eh_exceptionpointer:
7528 case Intrinsic::eh_exceptioncode: {
7530 const auto *CPI = cast<CatchPadInst>(
I.getArgOperand(0));
7535 if (Intrinsic == Intrinsic::eh_exceptioncode)
7540 case Intrinsic::xray_customevent: {
7569 case Intrinsic::xray_typedevent: {
7596 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7602 case Intrinsic::experimental_deoptimize:
7605 case Intrinsic::experimental_stepvector:
7608 case Intrinsic::vector_reduce_fadd:
7609 case Intrinsic::vector_reduce_fmul:
7610 case Intrinsic::vector_reduce_add:
7611 case Intrinsic::vector_reduce_mul:
7612 case Intrinsic::vector_reduce_and:
7613 case Intrinsic::vector_reduce_or:
7614 case Intrinsic::vector_reduce_xor:
7615 case Intrinsic::vector_reduce_smax:
7616 case Intrinsic::vector_reduce_smin:
7617 case Intrinsic::vector_reduce_umax:
7618 case Intrinsic::vector_reduce_umin:
7619 case Intrinsic::vector_reduce_fmax:
7620 case Intrinsic::vector_reduce_fmin:
7621 case Intrinsic::vector_reduce_fmaximum:
7622 case Intrinsic::vector_reduce_fminimum:
7623 visitVectorReduce(
I, Intrinsic);
7626 case Intrinsic::icall_branch_funnel: {
7635 "llvm.icall.branch.funnel operand must be a GlobalValue");
7638 struct BranchFunnelTarget {
7644 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7647 if (ElemBase !=
Base)
7649 "to the same GlobalValue");
7652 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7655 "llvm.icall.branch.funnel operand must be a GlobalValue");
7661 [](
const BranchFunnelTarget &T1,
const BranchFunnelTarget &T2) {
7662 return T1.Offset < T2.Offset;
7665 for (
auto &
T : Targets) {
7680 case Intrinsic::wasm_landingpad_index:
7686 case Intrinsic::aarch64_settag:
7687 case Intrinsic::aarch64_settag_zero: {
7689 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
7698 case Intrinsic::amdgcn_cs_chain: {
7699 assert(
I.arg_size() == 5 &&
"Additional args not supported yet");
7700 assert(cast<ConstantInt>(
I.getOperand(4))->isZero() &&
7701 "Non-zero flags not supported yet");
7717 for (
unsigned Idx : {2, 3, 1}) {
7720 Arg.
Ty =
I.getOperand(
Idx)->getType();
7722 Args.push_back(Arg);
7725 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
7726 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
7727 Args[2].IsInReg =
true;
7732 .setCallee(
CC,
RetTy, Callee, std::move(Args))
7735 .setConvergent(
I.isConvergent());
7737 std::pair<SDValue, SDValue>
Result =
7741 "Should've lowered as tail call");
7746 case Intrinsic::ptrmask: {
7750 EVT PtrVT =
Ptr.getValueType();
7752 "Pointers with different index type are not supported by SDAG");
7756 case Intrinsic::threadlocal_address: {
7760 case Intrinsic::get_active_lane_mask: {
7763 EVT ElementVT =
Index.getValueType();
7766 visitTargetIntrinsic(
I, Intrinsic);
7784 case Intrinsic::experimental_get_vector_length: {
7785 assert(cast<ConstantInt>(
I.getOperand(1))->getSExtValue() > 0 &&
7786 "Expected positive VF");
7787 unsigned VF = cast<ConstantInt>(
I.getOperand(1))->getZExtValue();
7788 bool IsScalable = cast<ConstantInt>(
I.getOperand(2))->isOne();
7794 visitTargetIntrinsic(
I, Intrinsic);
7803 if (CountVT.
bitsLT(VT)) {
7818 case Intrinsic::experimental_cttz_elts: {
7821 EVT OpVT =
Op.getValueType();
7824 visitTargetIntrinsic(
I, Intrinsic);
7844 if (!cast<ConstantSDNode>(
getValue(
I.getOperand(1)))->isZero())
7845 CR = CR.subtract(
APInt(64, 1));
7847 unsigned EltWidth =
I.getType()->getScalarSizeInBits();
7848 EltWidth = std::min(EltWidth, (
unsigned)CR.getActiveBits());
7874 case Intrinsic::vector_insert: {
7882 if (
Index.getValueType() != VectorIdxTy)
7890 case Intrinsic::vector_extract: {
7898 if (
Index.getValueType() != VectorIdxTy)
7905 case Intrinsic::experimental_vector_reverse:
7906 visitVectorReverse(
I);
7908 case Intrinsic::experimental_vector_splice:
7909 visitVectorSplice(
I);
7911 case Intrinsic::callbr_landingpad:
7912 visitCallBrLandingPad(
I);
7914 case Intrinsic::experimental_vector_interleave2:
7915 visitVectorInterleave(
I);
7917 case Intrinsic::experimental_vector_deinterleave2:
7918 visitVectorDeinterleave(
I);
7920 case Intrinsic::experimental_convergence_anchor:
7921 case Intrinsic::experimental_convergence_entry:
7922 case Intrinsic::experimental_convergence_loop:
7923 visitConvergenceControl(
I, Intrinsic);
7927void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
7963 PendingConstrainedFP.push_back(OutChain);
7969 PendingConstrainedFPStrict.push_back(OutChain);
7981 Flags.setNoFPExcept(
true);
7983 if (
auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
7984 Flags.copyFMF(*FPOp);
7989#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
7990 case Intrinsic::INTRINSIC: \
7991 Opcode = ISD::STRICT_##DAGN; \
7993#include "llvm/IR/ConstrainedOps.def"
7994 case Intrinsic::experimental_constrained_fmuladd: {
8001 pushOutChain(
Mul, EB);
8022 auto *
FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
8032 pushOutChain(Result, EB);
8039 std::optional<unsigned> ResOPC;
8041 case Intrinsic::vp_ctlz: {
8042 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8043 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8046 case Intrinsic::vp_cttz: {
8047 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8048 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8051#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8052 case Intrinsic::VPID: \
8053 ResOPC = ISD::VPSD; \
8055#include "llvm/IR/VPIntrinsics.def"
8060 "Inconsistency: no SDNode available for this VPIntrinsic!");
8062 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8063 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8065 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8066 : ISD::VP_REDUCE_FMUL;
8072void SelectionDAGBuilder::visitVPLoad(
8098void SelectionDAGBuilder::visitVPGather(
8134 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8140void SelectionDAGBuilder::visitVPStore(
8144 EVT VT = OpValues[0].getValueType();
8162void SelectionDAGBuilder::visitVPScatter(
8167 EVT VT = OpValues[0].getValueType();
8197 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8198 OpValues[2], OpValues[3]},
8204void SelectionDAGBuilder::visitVPStridedLoad(
8223 OpValues[2], OpValues[3], MMO,
8231void SelectionDAGBuilder::visitVPStridedStore(
8235 EVT VT = OpValues[0].getValueType();
8247 DAG.
getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8255void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8280 "Unexpected target EVL type");
8289void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8296 if (
const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8297 return visitVPCmp(*CmpI);
8308 "Unexpected target EVL type");
8312 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8314 if (
I == EVLParamPos)
8322 if (
auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8329 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8331 case ISD::VP_GATHER:
8332 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8334 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8335 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8338 visitVPStore(VPIntrin, OpValues);
8340 case ISD::VP_SCATTER:
8341 visitVPScatter(VPIntrin, OpValues);
8343 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8344 visitVPStridedStore(VPIntrin, OpValues);
8346 case ISD::VP_FMULADD: {
8347 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8349 if (
auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8356 ISD::VP_FMUL,
DL, VTs,
8357 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8360 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8365 case ISD::VP_IS_FPCLASS: {
8368 auto Constant = OpValues[1]->getAsZExtVal();
8371 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8375 case ISD::VP_INTTOPTR: {
8386 case ISD::VP_PTRTOINT: {
8401 case ISD::VP_CTLZ_ZERO_UNDEF:
8403 case ISD::VP_CTTZ_ZERO_UNDEF: {
8405 DAG.
getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8425 if (CallSiteIndex) {
8439 assert(BeginLabel &&
"BeginLabel should've been set");
8454 assert(II &&
"II should've been set");
8465std::pair<SDValue, SDValue>
8479 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
8482 "Non-null chain expected with non-tail call!");
8483 assert((Result.second.getNode() || !Result.first.getNode()) &&
8484 "Null value expected with tail call!");
8486 if (!Result.second.getNode()) {
8493 PendingExports.clear();
8508 bool isMustTailCall,
8517 const Value *SwiftErrorVal =
nullptr;
8524 if (Caller->getFnAttribute(
"disable-tail-calls").getValueAsString() ==
8525 "true" && !isMustTailCall)
8532 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8541 if (V->getType()->isEmptyTy())
8545 Entry.Node = ArgNode; Entry.Ty = V->getType();
8547 Entry.setAttributes(&CB,
I - CB.
arg_begin());
8559 Args.push_back(Entry);
8563 if (Entry.IsSRet && isa<Instruction>(V))
8571 Value *V = Bundle->Inputs[0];
8573 Entry.Node = ArgNode;
8574 Entry.Ty = V->getType();
8575 Entry.IsCFGuardTarget =
true;
8576 Args.push_back(Entry);
8594 "Target doesn't support calls with kcfi operand bundles.");
8595 CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8602 auto *Token = Bundle->Inputs[0].get();
8603 ConvControlToken =
getValue(Token);
8618 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
8620 if (Result.first.getNode()) {
8642 if (
const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
8661 bool ConstantMemory =
false;
8666 ConstantMemory =
true;
8677 if (!ConstantMemory)
8684void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
8698bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
8699 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
8713 if (Res.first.getNode()) {
8714 processIntegerCallValue(
I, Res.first,
true);
8728 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
8751 switch (NumBitsToCompare) {
8763 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
8781 processIntegerCallValue(
I, Cmp,
false);
8790bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
8791 const Value *Src =
I.getArgOperand(0);
8796 std::pair<SDValue, SDValue> Res =
8800 if (Res.first.getNode()) {
8814bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
8822 Align Alignment = std::min(DstAlign, SrcAlign);
8836 "** memcpy should not be lowered as TailCall in mempcpy context **");
8854bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
8855 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
8858 std::pair<SDValue, SDValue> Res =
8863 if (Res.first.getNode()) {
8877bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
8878 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
8881 std::pair<SDValue, SDValue> Res =
8886 if (Res.first.getNode()) {
8887 processIntegerCallValue(
I, Res.first,
true);
8900bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
8901 const Value *Arg0 =
I.getArgOperand(0);
8904 std::pair<SDValue, SDValue> Res =
8907 if (Res.first.getNode()) {
8908 processIntegerCallValue(
I, Res.first,
false);
8921bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
8922 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
8925 std::pair<SDValue, SDValue> Res =
8929 if (Res.first.getNode()) {
8930 processIntegerCallValue(
I, Res.first,
false);
8943bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
8946 if (!
I.onlyReadsMemory())
8950 Flags.copyFMF(cast<FPMathOperator>(
I));
8963bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
8966 if (!
I.onlyReadsMemory())
8970 Flags.copyFMF(cast<FPMathOperator>(
I));
8979void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
8981 if (
I.isInlineAsm()) {
8989 if (
F->isDeclaration()) {
8991 unsigned IID =
F->getIntrinsicID();
8997 visitIntrinsicCall(
I, IID);
9006 if (!
I.isNoBuiltin() && !
I.isStrictFP() && !
F->hasLocalLinkage() &&
9012 if (visitMemCmpBCmpCall(
I))
9015 case LibFunc_copysign:
9016 case LibFunc_copysignf:
9017 case LibFunc_copysignl:
9020 if (
I.onlyReadsMemory()) {
9024 LHS.getValueType(), LHS, RHS));
9061 case LibFunc_sqrt_finite:
9062 case LibFunc_sqrtf_finite:
9063 case LibFunc_sqrtl_finite:
9068 case LibFunc_floorf:
9069 case LibFunc_floorl:
9073 case LibFunc_nearbyint:
9074 case LibFunc_nearbyintf:
9075 case LibFunc_nearbyintl:
9092 case LibFunc_roundf:
9093 case LibFunc_roundl:
9098 case LibFunc_truncf:
9099 case LibFunc_truncl:
9116 case LibFunc_exp10f:
9117 case LibFunc_exp10l:
9122 case LibFunc_ldexpf:
9123 case LibFunc_ldexpl:
9127 case LibFunc_memcmp:
9128 if (visitMemCmpBCmpCall(
I))
9131 case LibFunc_mempcpy:
9132 if (visitMemPCpyCall(
I))
9135 case LibFunc_memchr:
9136 if (visitMemChrCall(
I))
9139 case LibFunc_strcpy:
9140 if (visitStrCpyCall(
I,
false))
9143 case LibFunc_stpcpy:
9144 if (visitStrCpyCall(
I,
true))
9147 case LibFunc_strcmp:
9148 if (visitStrCmpCall(
I))
9151 case LibFunc_strlen:
9152 if (visitStrLenCall(
I))
9155 case LibFunc_strnlen:
9156 if (visitStrNLenCall(
I))
9166 assert(!
I.hasOperandBundlesOtherThan(
9167 {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
9168 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
9169 LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi,
9170 LLVMContext::OB_convergencectrl}) &&
9171 "Cannot lower calls with arbitrary operand bundles!");
9209 for (
const auto &Code : Codes)
9224 SDISelAsmOperandInfo &MatchingOpInfo,
9226 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9232 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9234 OpInfo.ConstraintVT);
9235 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9237 MatchingOpInfo.ConstraintVT);
9238 if ((OpInfo.ConstraintVT.isInteger() !=
9239 MatchingOpInfo.ConstraintVT.isInteger()) ||
9240 (MatchRC.second != InputRC.second)) {
9243 " with a matching output constraint of"
9244 " incompatible type!");
9246 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9253 SDISelAsmOperandInfo &OpInfo,
9266 const Value *OpVal = OpInfo.CallOperandVal;
9267 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9268 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9280 TySize,
DL.getPrefTypeAlign(Ty),
false);
9282 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9285 OpInfo.CallOperand = StackSlot;
9298static std::optional<unsigned>
9300 SDISelAsmOperandInfo &OpInfo,
9301 SDISelAsmOperandInfo &RefOpInfo) {
9312 return std::nullopt;
9316 unsigned AssignedReg;
9319 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9322 return std::nullopt;
9327 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9329 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9338 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9343 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9348 OpInfo.CallOperand =
9350 OpInfo.ConstraintVT = RegVT;
9354 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9357 OpInfo.CallOperand =
9359 OpInfo.ConstraintVT = VT;
9366 if (OpInfo.isMatchingInputConstraint())
9367 return std::nullopt;
9369 EVT ValueVT = OpInfo.ConstraintVT;
9370 if (OpInfo.ConstraintVT == MVT::Other)
9374 unsigned NumRegs = 1;
9375 if (OpInfo.ConstraintVT != MVT::Other)
9390 I = std::find(
I, RC->
end(), AssignedReg);
9391 if (
I == RC->
end()) {
9394 return {AssignedReg};
9398 for (; NumRegs; --NumRegs, ++
I) {
9399 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
9404 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
9405 return std::nullopt;
9410 const std::vector<SDValue> &AsmNodeOperands) {
9413 for (; OperandNo; --OperandNo) {
9415 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9418 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
9419 "Skipped past definitions?");
9420 CurOp +=
F.getNumOperandRegisters() + 1;
9431 explicit ExtraFlags(
const CallBase &Call) {
9433 if (
IA->hasSideEffects())
9435 if (
IA->isAlignStack())
9437 if (
Call.isConvergent())
9458 unsigned get()
const {
return Flags; }
9465 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(
Op)) {
9466 auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9481void SelectionDAGBuilder::visitInlineAsm(
const CallBase &Call,
9494 bool HasSideEffect =
IA->hasSideEffects();
9495 ExtraFlags ExtraInfo(Call);
9497 for (
auto &
T : TargetConstraints) {
9498 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
9499 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
9501 if (OpInfo.CallOperandVal)
9502 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
9505 HasSideEffect = OpInfo.hasMemory(TLI);
9514 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9517 return emitInlineAsmError(Call,
"constraint '" +
Twine(
T.ConstraintCode) +
9518 "' expects an integer constant "
9521 ExtraInfo.update(
T);
9528 bool EmitEHLabels = isa<InvokeInst>(Call);
9530 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
9532 bool IsCallBr = isa<CallBrInst>(Call);
9534 if (IsCallBr || EmitEHLabels) {
9543 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
9548 IA->collectAsmStrs(AsmStrs);
9551 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9559 if (OpInfo.hasMatchingInput()) {
9560 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
9591 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
9594 OpInfo.isIndirect =
false;
9601 !OpInfo.isIndirect) {
9602 assert((OpInfo.isMultipleAlternative ||
9604 "Can only indirectify direct input operands!");
9610 OpInfo.CallOperandVal =
nullptr;
9613 OpInfo.isIndirect =
true;
9619 std::vector<SDValue> AsmNodeOperands;
9620 AsmNodeOperands.push_back(
SDValue());
9627 const MDNode *SrcLoc =
Call.getMetadata(
"srcloc");
9637 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9639 SDISelAsmOperandInfo &RefOpInfo =
9640 OpInfo.isMatchingInputConstraint()
9641 ? ConstraintOperands[OpInfo.getMatchedOperand()]
9643 const auto RegError =
9648 const char *
RegName =
TRI.getName(*RegError);
9649 emitInlineAsmError(Call,
"register '" +
Twine(
RegName) +
9650 "' allocated for constraint '" +
9651 Twine(OpInfo.ConstraintCode) +
9652 "' does not match required type");
9656 auto DetectWriteToReservedRegister = [&]() {
9659 for (
unsigned Reg : OpInfo.AssignedRegs.Regs) {
9661 TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
9663 emitInlineAsmError(Call,
"write to reserved register '" +
9672 !OpInfo.isMatchingInputConstraint())) &&
9673 "Only address as input operand is allowed.");
9675 switch (OpInfo.Type) {
9681 "Failed to convert memory constraint code to constraint id.");
9685 OpFlags.setMemConstraint(ConstraintID);
9688 AsmNodeOperands.push_back(OpInfo.CallOperand);
9693 if (OpInfo.AssignedRegs.Regs.empty()) {
9695 Call,
"couldn't allocate output register for constraint '" +
9696 Twine(OpInfo.ConstraintCode) +
"'");
9700 if (DetectWriteToReservedRegister())
9705 OpInfo.AssignedRegs.AddInlineAsmOperands(
9714 SDValue InOperandVal = OpInfo.CallOperand;
9716 if (OpInfo.isMatchingInputConstraint()) {
9722 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
9723 if (OpInfo.isIndirect) {
9725 emitInlineAsmError(Call,
"inline asm not supported yet: "
9726 "don't know how to handle tied "
9727 "indirect register inputs");
9735 auto *
R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
9737 MVT RegVT =
R->getSimpleValueType(0);
9741 :
TRI.getMinimalPhysRegClass(TiedReg);
9742 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
9749 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &Call);
9751 OpInfo.getMatchedOperand(), dl,
DAG,
9756 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
9758 "Unexpected number of operands");
9761 Flag.clearMemConstraint();
9762 Flag.setMatchingOp(OpInfo.getMatchedOperand());
9765 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
9776 std::vector<SDValue> Ops;
9781 if (isa<ConstantSDNode>(InOperandVal)) {
9782 emitInlineAsmError(Call,
"value out of range for constraint '" +
9783 Twine(OpInfo.ConstraintCode) +
"'");
9787 emitInlineAsmError(Call,
9788 "invalid operand for inline asm constraint '" +
9789 Twine(OpInfo.ConstraintCode) +
"'");
9802 assert((OpInfo.isIndirect ||
9804 "Operand must be indirect to be a mem!");
9807 "Memory operands expect pointer values");
9812 "Failed to convert memory constraint code to constraint id.");
9816 ResOpType.setMemConstraint(ConstraintID);
9820 AsmNodeOperands.push_back(InOperandVal);
9828 "Failed to convert memory constraint code to constraint id.");
9834 auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
9842 ResOpType.setMemConstraint(ConstraintID);
9844 AsmNodeOperands.push_back(
9847 AsmNodeOperands.push_back(AsmOp);
9853 "Unknown constraint type!");
9856 if (OpInfo.isIndirect) {
9858 Call,
"Don't know how to handle indirect register inputs yet "
9859 "for constraint '" +
9860 Twine(OpInfo.ConstraintCode) +
"'");
9865 if (OpInfo.AssignedRegs.Regs.empty()) {
9866 emitInlineAsmError(Call,
9867 "couldn't allocate input reg for constraint '" +
9868 Twine(OpInfo.ConstraintCode) +
"'");
9872 if (DetectWriteToReservedRegister())
9877 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue,
9881 0, dl,
DAG, AsmNodeOperands);
9887 if (!OpInfo.AssignedRegs.Regs.empty())
9897 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
9901 DAG.
getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
9912 if (
StructType *StructResult = dyn_cast<StructType>(CallResultType))
9913 ResultTypes = StructResult->elements();
9914 else if (!CallResultType->
isVoidTy())
9915 ResultTypes =
ArrayRef(CallResultType);
9917 auto CurResultType = ResultTypes.
begin();
9918 auto handleRegAssign = [&](
SDValue V) {
9919 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
9920 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
9933 if (ResultVT !=
V.getValueType() &&
9936 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
9937 V.getValueType().isInteger()) {
9943 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
9949 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9953 if (OpInfo.AssignedRegs.Regs.empty())
9956 switch (OpInfo.ConstraintType) {
9960 Chain, &Glue, &Call);
9972 assert(
false &&
"Unexpected unknown constraint");
9976 if (OpInfo.isIndirect) {
9977 const Value *
Ptr = OpInfo.CallOperandVal;
9978 assert(
Ptr &&
"Expected value CallOperandVal for indirect asm operand");
9984 assert(!
Call.getType()->isVoidTy() &&
"Bad inline asm!");
9989 handleRegAssign(Val);
9995 if (!ResultValues.
empty()) {
9996 assert(CurResultType == ResultTypes.
end() &&
9997 "Mismatch in number of ResultTypes");
9999 "Mismatch in number of output operands in asm result");
10007 if (!OutChains.
empty())
10010 if (EmitEHLabels) {
10011 Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
10015 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10020void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &Call,
10021 const Twine &Message) {
10030 if (ValueVTs.
empty())
10034 for (
unsigned i = 0, e = ValueVTs.
size(); i != e; ++i)
10040void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10047void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10053 DL.getABITypeAlign(
I.getType()).value());
10056 if (
I.getType()->isPointerTy())
10062void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10069void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10090 if (!
Lo.isMinValue())
10094 unsigned Bits = std::max(
Hi.getActiveBits(),
10103 unsigned NumVals =
Op.getNode()->getNumValues();
10110 for (
unsigned I = 1;
I != NumVals; ++
I)
10124 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10127 Args.reserve(NumArgs);
10131 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10132 ArgI != ArgE; ++ArgI) {
10133 const Value *V = Call->getOperand(ArgI);
10135 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10139 Entry.Ty = V->getType();
10140 Entry.setAttributes(Call, ArgI);
10141 Args.push_back(Entry);
10146 .
setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10175 for (
unsigned I = StartIdx;
I < Call.arg_size();
I++) {
10190void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10224 assert(
ID.getValueType() == MVT::i64);
10255void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10271 if (
auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10274 else if (
auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10276 SDLoc(SymbolicCallee),
10277 SymbolicCallee->getValueType(0));
10287 "Not enough arguments provided to the patchpoint intrinsic");
10290 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10306 "Expected a callseq node.");
10308 bool HasGlue =
Call->getGluedNode();
10338 unsigned NumCallRegArgs =
Call->getNumOperands() - (HasGlue ? 4 : 3);
10339 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10348 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10359 if (IsAnyRegCC && HasDef) {
10364 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
10388 if (IsAnyRegCC && HasDef) {
10400void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
10401 unsigned Intrinsic) {
10405 if (
I.arg_size() > 1)
10411 if (
auto *FPMO = dyn_cast<FPMathOperator>(&
I))
10414 switch (Intrinsic) {
10415 case Intrinsic::vector_reduce_fadd:
10423 case Intrinsic::vector_reduce_fmul:
10431 case Intrinsic::vector_reduce_add:
10434 case Intrinsic::vector_reduce_mul:
10437 case Intrinsic::vector_reduce_and:
10440 case Intrinsic::vector_reduce_or:
10443 case Intrinsic::vector_reduce_xor:
10446 case Intrinsic::vector_reduce_smax:
10449 case Intrinsic::vector_reduce_smin:
10452 case Intrinsic::vector_reduce_umax:
10455 case Intrinsic::vector_reduce_umin:
10458 case Intrinsic::vector_reduce_fmax:
10461 case Intrinsic::vector_reduce_fmin:
10464 case Intrinsic::vector_reduce_fmaximum:
10467 case Intrinsic::vector_reduce_fminimum:
10481 Attrs.push_back(Attribute::SExt);
10483 Attrs.push_back(Attribute::ZExt);
10485 Attrs.push_back(Attribute::InReg);
10495std::pair<SDValue, SDValue>
10509 RetTys.
swap(OldRetTys);
10510 Offsets.swap(OldOffsets);
10512 for (
size_t i = 0, e = OldRetTys.
size(); i != e; ++i) {
10513 EVT RetVT = OldRetTys[i];
10517 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
10518 RetTys.
append(NumRegs, RegisterVT);
10519 for (
unsigned j = 0; j != NumRegs; ++j)
10520 Offsets.push_back(
Offset + j * RegisterVTByteSZ);
10532 int DemoteStackIdx = -100;
10543 DL.getAllocaAddrSpace());
10547 Entry.Node = DemoteStackSlot;
10548 Entry.Ty = StackSlotPtrType;
10549 Entry.IsSExt =
false;
10550 Entry.IsZExt =
false;
10551 Entry.IsInReg =
false;
10552 Entry.IsSRet =
true;
10553 Entry.IsNest =
false;
10554 Entry.IsByVal =
false;
10555 Entry.IsByRef =
false;
10556 Entry.IsReturned =
false;
10557 Entry.IsSwiftSelf =
false;
10558 Entry.IsSwiftAsync =
false;
10559 Entry.IsSwiftError =
false;
10560 Entry.IsCFGuardTarget =
false;
10561 Entry.Alignment = Alignment;
10573 for (
unsigned I = 0,
E = RetTys.
size();
I !=
E; ++
I) {
10575 if (NeedsRegBlock) {
10576 Flags.setInConsecutiveRegs();
10577 if (
I == RetTys.
size() - 1)
10578 Flags.setInConsecutiveRegsLast();
10580 EVT VT = RetTys[
I];
10585 for (
unsigned i = 0; i != NumRegs; ++i) {
10587 MyFlags.
Flags = Flags;
10588 MyFlags.
VT = RegisterVT;
10589 MyFlags.
ArgVT = VT;
10594 cast<PointerType>(CLI.
RetTy)->getAddressSpace());
10602 CLI.
Ins.push_back(MyFlags);
10616 CLI.
Ins.push_back(MyFlags);
10624 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
10628 Type *FinalType = Args[i].Ty;
10629 if (Args[i].IsByVal)
10630 FinalType = Args[i].IndirectType;
10633 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
Value != NumValues;
10638 Args[i].Node.getResNo() +
Value);
10645 Flags.setOrigAlign(OriginalAlignment);
10647 if (Args[i].Ty->isPointerTy()) {
10648 Flags.setPointer();
10649 Flags.setPointerAddrSpace(
10650 cast<PointerType>(Args[i].Ty)->getAddressSpace());
10652 if (Args[i].IsZExt)
10654 if (Args[i].IsSExt)
10656 if (Args[i].IsInReg) {
10660 isa<StructType>(FinalType)) {
10663 Flags.setHvaStart();
10669 if (Args[i].IsSRet)
10671 if (Args[i].IsSwiftSelf)
10672 Flags.setSwiftSelf();
10673 if (Args[i].IsSwiftAsync)
10674 Flags.setSwiftAsync();
10675 if (Args[i].IsSwiftError)
10676 Flags.setSwiftError();
10677 if (Args[i].IsCFGuardTarget)
10678 Flags.setCFGuardTarget();
10679 if (Args[i].IsByVal)
10681 if (Args[i].IsByRef)
10683 if (Args[i].IsPreallocated) {
10684 Flags.setPreallocated();
10692 if (Args[i].IsInAlloca) {
10693 Flags.setInAlloca();
10702 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
10703 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
10704 Flags.setByValSize(FrameSize);
10707 if (
auto MA = Args[i].Alignment)
10711 }
else if (
auto MA = Args[i].Alignment) {
10714 MemAlign = OriginalAlignment;
10716 Flags.setMemAlign(MemAlign);
10717 if (Args[i].IsNest)
10720 Flags.setInConsecutiveRegs();
10729 if (Args[i].IsSExt)
10731 else if (Args[i].IsZExt)
10736 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
10741 Args[i].Ty->getPointerAddressSpace())) &&
10742 RetTys.
size() == NumValues &&
"unexpected use of 'returned'");
10755 CLI.
RetZExt == Args[i].IsZExt))
10756 Flags.setReturned();
10762 for (
unsigned j = 0; j != NumParts; ++j) {
10769 j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
10770 if (NumParts > 1 && j == 0)
10774 if (j == NumParts - 1)
10778 CLI.
Outs.push_back(MyFlags);
10779 CLI.
OutVals.push_back(Parts[j]);
10782 if (NeedsRegBlock &&
Value == NumValues - 1)
10783 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
10795 "LowerCall didn't return a valid chain!");
10797 "LowerCall emitted a return value for a tail call!");
10799 "LowerCall didn't emit the correct number of values!");
10811 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
10812 assert(InVals[i].getNode() &&
"LowerCall emitted a null value!");
10813 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
10814 "LowerCall emitted a value with the wrong type!");
10827 assert(PVTs.
size() == 1 &&
"Pointers should fit in one register");
10828 EVT PtrVT = PVTs[0];
10830 unsigned NumValues = RetTys.
size();
10831 ReturnValues.
resize(NumValues);
10837 Flags.setNoUnsignedWrap(
true);
10841 for (
unsigned i = 0; i < NumValues; ++i) {
10848 DemoteStackIdx, Offsets[i]),
10850 ReturnValues[i] = L;
10851 Chains[i] = L.getValue(1);
10858 std::optional<ISD::NodeType> AssertOp;
10863 unsigned CurReg = 0;
10864 for (
EVT VT : RetTys) {
10871 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
10879 if (ReturnValues.
empty())
10885 return std::make_pair(Res, CLI.
Chain);
10902 if (
N->getNumValues() == 1) {
10910 "Lowering returned the wrong number of results!");
10913 for (
unsigned I = 0,
E =
N->getNumValues();
I !=
E; ++
I)
10926 cast<RegisterSDNode>(
Op.getOperand(1))->getReg() != Reg) &&
10927 "Copy from a reg to the same reg!");
10941 ExtendType = PreferredExtendIt->second;
10944 PendingExports.push_back(Chain);
10956 return A->use_empty();
10958 const BasicBlock &Entry =
A->getParent()->front();
10959 for (
const User *U :
A->users())
10960 if (cast<Instruction>(U)->
getParent() != &Entry || isa<SwitchInst>(U))
10968 std::pair<const AllocaInst *, const StoreInst *>>;
10980 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
10982 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
10983 StaticAllocas.
reserve(NumArgs * 2);
10985 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
10988 V = V->stripPointerCasts();
10989 const auto *AI = dyn_cast<AllocaInst>(V);
10990 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
10993 return &Iter.first->second;
11003 const auto *SI = dyn_cast<StoreInst>(&
I);
11010 if (
I.isDebugOrPseudoInst())
11014 for (
const Use &U :
I.operands()) {
11015 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11016 *
Info = StaticAllocaInfo::Clobbered;
11022 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
11023 *
Info = StaticAllocaInfo::Clobbered;
11026 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
11027 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11030 const AllocaInst *AI = cast<AllocaInst>(Dst);
11033 if (*
Info != StaticAllocaInfo::Unknown)
11041 const Value *Val = SI->getValueOperand()->stripPointerCasts();
11042 const auto *Arg = dyn_cast<Argument>(Val);
11043 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11044 Arg->getType()->isEmptyTy() ||
11045 DL.getTypeStoreSize(Arg->getType()) !=
11047 !
DL.typeSizeEqualsStoreSize(Arg->getType()) ||
11048 ArgCopyElisionCandidates.
count(Arg)) {
11049 *
Info = StaticAllocaInfo::Clobbered;
11053 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11057 *
Info = StaticAllocaInfo::Elidable;
11058 ArgCopyElisionCandidates.
insert({Arg, {AI, SI}});
11063 if (ArgCopyElisionCandidates.
size() == NumArgs)
11077 auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
11080 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
11087 auto ArgCopyIter = ArgCopyElisionCandidates.
find(&Arg);
11088 assert(ArgCopyIter != ArgCopyElisionCandidates.
end());
11089 const AllocaInst *AI = ArgCopyIter->second.first;
11090 int FixedIndex = FINode->getIndex();
11092 int OldIndex = AllocaIndex;
11096 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11102 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11103 "greater than stack argument alignment ("
11104 <<
DebugStr(RequiredAlignment) <<
" vs "
11112 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11113 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11118 AllocaIndex = FixedIndex;
11119 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11120 for (
SDValue ArgVal : ArgVals)
11124 const StoreInst *SI = ArgCopyIter->second.second;
11125 ElidedArgCopyInstrs.
insert(SI);
11137void SelectionDAGISel::LowerArguments(
const Function &
F) {
11144 if (
F.hasFnAttribute(Attribute::Naked))
11162 Ins.push_back(RetArg);
11170 ArgCopyElisionCandidates);
11174 unsigned ArgNo = Arg.getArgNo();
11177 bool isArgValueUsed = !Arg.use_empty();
11178 unsigned PartBase = 0;
11179 Type *FinalType = Arg.getType();
11180 if (Arg.hasAttribute(Attribute::ByVal))
11181 FinalType = Arg.getParamByValType();
11183 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11184 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
11191 if (Arg.getType()->isPointerTy()) {
11192 Flags.setPointer();
11193 Flags.setPointerAddrSpace(
11194 cast<PointerType>(Arg.getType())->getAddressSpace());
11196 if (Arg.hasAttribute(Attribute::ZExt))
11198 if (Arg.hasAttribute(Attribute::SExt))
11200 if (Arg.hasAttribute(Attribute::InReg)) {
11204 isa<StructType>(Arg.getType())) {
11207 Flags.setHvaStart();
11213 if (Arg.hasAttribute(Attribute::StructRet))
11215 if (Arg.hasAttribute(Attribute::SwiftSelf))
11216 Flags.setSwiftSelf();
11217 if (Arg.hasAttribute(Attribute::SwiftAsync))
11218 Flags.setSwiftAsync();
11219 if (Arg.hasAttribute(Attribute::SwiftError))
11220 Flags.setSwiftError();
11221 if (Arg.hasAttribute(Attribute::ByVal))
11223 if (Arg.hasAttribute(Attribute::ByRef))
11225 if (Arg.hasAttribute(Attribute::InAlloca)) {
11226 Flags.setInAlloca();
11234 if (Arg.hasAttribute(Attribute::Preallocated)) {
11235 Flags.setPreallocated();
11247 const Align OriginalAlignment(
11249 Flags.setOrigAlign(OriginalAlignment);
11252 Type *ArgMemTy =
nullptr;
11253 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11256 ArgMemTy = Arg.getPointeeInMemoryValueType();
11258 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11263 if (
auto ParamAlign = Arg.getParamStackAlign())
11264 MemAlign = *ParamAlign;
11265 else if ((ParamAlign = Arg.getParamAlign()))
11266 MemAlign = *ParamAlign;
11269 if (
Flags.isByRef())
11270 Flags.setByRefSize(MemSize);
11272 Flags.setByValSize(MemSize);
11273 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11274 MemAlign = *ParamAlign;
11276 MemAlign = OriginalAlignment;
11278 Flags.setMemAlign(MemAlign);
11280 if (Arg.hasAttribute(Attribute::Nest))
11283 Flags.setInConsecutiveRegs();
11284 if (ArgCopyElisionCandidates.
count(&Arg))
11285 Flags.setCopyElisionCandidate();
11286 if (Arg.hasAttribute(Attribute::Returned))
11287 Flags.setReturned();
11293 for (
unsigned i = 0; i != NumRegs; ++i) {
11298 Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11300 if (NumRegs > 1 && i == 0)
11301 MyFlags.Flags.setSplit();
11304 MyFlags.Flags.setOrigAlign(
Align(1));
11305 if (i == NumRegs - 1)
11306 MyFlags.Flags.setSplitEnd();
11308 Ins.push_back(MyFlags);
11310 if (NeedsRegBlock &&
Value == NumValues - 1)
11311 Ins[
Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11319 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11323 "LowerFormalArguments didn't return a valid chain!");
11325 "LowerFormalArguments didn't emit the correct number of values!");
11327 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
11328 assert(InVals[i].getNode() &&
11329 "LowerFormalArguments emitted a null value!");
11330 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11331 "LowerFormalArguments emitted a value with the wrong type!");
11348 MVT VT = ValueVTs[0].getSimpleVT();
11350 std::optional<ISD::NodeType> AssertOp;
11353 F.getCallingConv(), AssertOp);
11359 FuncInfo->DemoteRegister = SRetReg;
11361 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11374 unsigned NumValues = ValueVTs.
size();
11375 if (NumValues == 0)
11378 bool ArgHasUses = !Arg.use_empty();
11382 if (Ins[i].
Flags.isCopyElisionCandidate()) {
11383 unsigned NumParts = 0;
11384 for (
EVT VT : ValueVTs)
11386 F.getCallingConv(), VT);
11390 ArrayRef(&InVals[i], NumParts), ArgHasUses);
11395 bool isSwiftErrorArg =
11397 Arg.hasAttribute(Attribute::SwiftError);
11398 if (!ArgHasUses && !isSwiftErrorArg) {
11399 SDB->setUnusedArgValue(&Arg, InVals[i]);
11403 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11404 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11407 for (
unsigned Val = 0; Val != NumValues; ++Val) {
11408 EVT VT = ValueVTs[Val];
11410 F.getCallingConv(), VT);
11417 if (ArgHasUses || isSwiftErrorArg) {
11418 std::optional<ISD::NodeType> AssertOp;
11419 if (Arg.hasAttribute(Attribute::SExt))
11421 else if (Arg.hasAttribute(Attribute::ZExt))
11425 PartVT, VT,
nullptr, NewRoot,
11426 F.getCallingConv(), AssertOp));
11433 if (ArgValues.
empty())
11438 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11439 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11442 SDB->getCurSDLoc());
11444 SDB->setValue(&Arg, Res);
11457 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11458 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11467 unsigned Reg = cast<RegisterSDNode>(Res.
getOperand(1))->getReg();
11479 unsigned Reg = cast<RegisterSDNode>(Res.
getOperand(1))->getReg();
11486 FuncInfo->InitializeRegForValue(&Arg);
11487 SDB->CopyToExportRegsIfNeeded(&Arg);
11491 if (!Chains.
empty()) {
11498 assert(i == InVals.
size() &&
"Argument register count mismatch!");
11502 if (!ArgCopyElisionFrameIndexMap.
empty()) {
11505 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
11506 if (
I != ArgCopyElisionFrameIndexMap.
end())
11507 VI.updateStackSlot(
I->second);
11522SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
11530 if (!isa<PHINode>(SuccBB->begin()))
continue;
11535 if (!SuccsHandled.
insert(SuccMBB).second)
11543 for (
const PHINode &PN : SuccBB->phis()) {
11545 if (PN.use_empty())
11549 if (PN.getType()->isEmptyTy())
11553 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
11555 if (
const auto *
C = dyn_cast<Constant>(PHIOp)) {
11562 if (
auto *CI = dyn_cast<ConstantInt>(
C))
11574 assert(isa<AllocaInst>(PHIOp) &&
11576 "Didn't codegen value into a register!??");
11586 for (
EVT VT : ValueVTs) {
11588 for (
unsigned i = 0; i != NumRegisters; ++i)
11590 std::make_pair(&*
MBBI++, Reg + i));
11591 Reg += NumRegisters;
11611void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
11613 if (MaybeTC.
getNode() !=
nullptr)
11628 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
11632 if (
Size == 2 &&
W.MBB == SwitchMBB) {
11645 const APInt &SmallValue =
Small.Low->getValue();
11646 const APInt &BigValue =
Big.Low->getValue();
11649 APInt CommonBit = BigValue ^ SmallValue;
11664 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
11666 addSuccessorWithProb(
11667 SwitchMBB, DefaultMBB,
11671 addSuccessorWithProb(SwitchMBB, DefaultMBB);
11694 return a.Prob != b.Prob ?
11696 a.Low->getValue().slt(b.Low->getValue());
11703 if (
I->Prob >
W.LastCluster->Prob)
11705 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
11716 UnhandledProbs +=
I->Prob;
11720 bool FallthroughUnreachable =
false;
11722 if (
I ==
W.LastCluster) {
11724 Fallthrough = DefaultMBB;
11725 FallthroughUnreachable = isa<UnreachableInst>(
11729 CurMF->
insert(BBI, Fallthrough);
11733 UnhandledProbs -=
I->Prob;
11743 CurMF->
insert(BBI, JumpMBB);
11745 auto JumpProb =
I->Prob;
11746 auto FallthroughProb = UnhandledProbs;
11754 if (*SI == DefaultMBB) {
11755 JumpProb += DefaultProb / 2;
11756 FallthroughProb -= DefaultProb / 2;
11774 if (FallthroughUnreachable) {
11776 bool HasBranchTargetEnforcement =
false;
11778 HasBranchTargetEnforcement =
11782 HasBranchTargetEnforcement =
11784 "branch-target-enforcement");
11786 if (!HasBranchTargetEnforcement)
11791 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
11792 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
11798 JT->Default = Fallthrough;
11801 if (CurMBB == SwitchMBB) {
11824 BTB->
Prob += DefaultProb / 2;
11828 if (FallthroughUnreachable)
11832 if (CurMBB == SwitchMBB) {
11841 if (
I->Low ==
I->High) {
11856 if (FallthroughUnreachable)
11860 CaseBlock CB(
CC, LHS, RHS, MHS,
I->MBB, Fallthrough, CurMBB,
11863 if (CurMBB == SwitchMBB)
11866 SL->SwitchCases.push_back(CB);
11871 CurMBB = Fallthrough;
11875void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
11879 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
11880 "Clusters not sorted?");
11881 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
11883 auto [LastLeft, FirstRight, LeftProb, RightProb] =
11884 SL->computeSplitWorkItemInfo(W);
11889 assert(PivotCluster >
W.FirstCluster);
11890 assert(PivotCluster <=
W.LastCluster);
11905 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
11906 FirstLeft->Low ==
W.GE &&
11907 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
11908 LeftMBB = FirstLeft->MBB;
11913 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
11922 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
11923 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
11924 RightMBB = FirstRight->MBB;
11929 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
11938 if (
W.MBB == SwitchMBB)
11941 SL->SwitchCases.push_back(CB);
11974 unsigned PeeledCaseIndex = 0;
11975 bool SwitchPeeled =
false;
11978 if (
CC.Prob < TopCaseProb)
11980 TopCaseProb =
CC.Prob;
11981 PeeledCaseIndex =
Index;
11982 SwitchPeeled =
true;
11987 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
11988 << TopCaseProb <<
"\n");
11998 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12000 nullptr,
nullptr, TopCaseProb.
getCompl()};
12001 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12003 Clusters.erase(PeeledCaseIt);
12006 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12007 <<
CC.Prob <<
"\n");
12011 PeeledCaseProb = TopCaseProb;
12012 return PeeledSwitchMBB;
12015void SelectionDAGBuilder::visitSwitch(
const SwitchInst &SI) {
12019 Clusters.reserve(
SI.getNumCases());
12020 for (
auto I :
SI.cases()) {
12039 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12043 if (Clusters.empty()) {
12044 assert(PeeledSwitchMBB == SwitchMBB);
12046 if (DefaultMBB != NextBlock(SwitchMBB)) {
12055 SL->findBitTestClusters(Clusters, &SI);
12058 dbgs() <<
"Case clusters: ";
12065 C.Low->getValue().print(
dbgs(),
true);
12066 if (
C.Low !=
C.High) {
12068 C.High->getValue().print(
dbgs(),
true);
12075 assert(!Clusters.empty());
12079 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12086 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12088 while (!WorkList.
empty()) {
12090 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12095 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12099 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12103void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12110void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12116 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12127 for (
unsigned i = 0; i != NumElts; ++i)
12128 Mask.push_back(NumElts - 1 - i);
12133void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I) {
12164void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I) {
12189void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12193 unsigned NumValues = ValueVTs.
size();
12194 if (NumValues == 0)
return;
12199 for (
unsigned i = 0; i != NumValues; ++i)
12207void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12214 int64_t
Imm = cast<ConstantInt>(
I.getOperand(2))->getSExtValue();
12230 for (
unsigned i = 0; i < NumElts; ++i)
12259 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12260 "start of copy chain MUST be COPY");
12261 Reg =
MI->getOperand(1).getReg();
12262 MI =
MRI.def_begin(Reg)->getParent();
12264 if (
MI->getOpcode() == TargetOpcode::COPY) {
12265 assert(Reg.isVirtual() &&
"expected COPY of virtual register");
12266 Reg =
MI->getOperand(1).getReg();
12267 assert(Reg.isPhysical() &&
"expected COPY of physical register");
12268 MI =
MRI.def_begin(Reg)->getParent();
12271 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12272 "end of copy chain MUST be INLINEASM_BR");
12280void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12284 cast<CallBrInst>(
I.getParent()->getUniquePredecessor()->getTerminator());
12296 for (
auto &
T : TargetConstraints) {
12297 SDISelAsmOperandInfo OpInfo(
T);
12305 switch (OpInfo.ConstraintType) {
12313 for (
size_t i = 0, e = OpInfo.AssignedRegs.Regs.size(); i != e; ++i) {
12318 OpInfo.AssignedRegs.Regs[i] = OriginalDef;
12321 SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12324 ResultVTs.
push_back(OpInfo.ConstraintVT);
12333 ResultVTs.
push_back(OpInfo.ConstraintVT);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
unsigned const TargetRegisterInfo * TRI
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
This file provides utility analysis objects describing memory locations.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< unsigned, TypeSize > > &Regs, const SDValue &N)
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, ISD::MemIndexType &IndexType, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static void findWasmUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
static SymbolRef::Type getType(const Symbol *Sym)
support::ulittle16_t & Lo
support::ulittle16_t & Hi
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
This class represents the atomic memcpy intrinsic i.e.
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
bool getValueAsBool() const
Return the attribute's value as a boolean.
LLVM Basic Block Representation.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
const Instruction & back() const
This class represents a no-op cast from one type to another.
bool test(unsigned Idx) const
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
size_type size() const
size - Returns the number of bits in this bitvector.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
uint32_t getNumerator() const
uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This class represents a range of values.
APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
bool isEmptySet() const
Return true if this set contains no members.
bool isUpperWrapped() const
Return true if the exclusive upper bound wraps around the unsigned domain.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Base class for variables.
std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
Records a position in IR for a source label (DILabel).
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LocationType getType() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
unsigned getAllocaAddrSpace() const
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
iterator_range< location_op_iterator > getValues() const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
bool isKillLocation() const
DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
Register CreateRegs(const Value *V)
Register DemoteRegister
DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg allocated to hold a pointer to ...
BitVector DescribedArgs
Bitvector with a bit set if corresponding argument is described in ArgDbgValues.
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
SmallPtrSet< const DPValue *, 8 > PreprocessedDPVDeclares
bool isExportedInst(const Value *V) const
isExportedInst - Return true if the specified value is an instruction exported from its block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
Register InitializeRegForValue(const Value *V)
unsigned ExceptionPointerVirtReg
If the current MBB is a landing pad, the exception pointer and exception selector registers are copie...
SmallPtrSet< const DbgDeclareInst *, 8 > PreprocessedDbgDeclares
Collection of dbg.declare instructions handled after argument lowering and before ISel proper.
DenseMap< const Value *, Register > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
unsigned ExceptionSelectorVirtReg
SmallVector< MachineInstr *, 8 > ArgDbgValues
ArgDbgValues - A list of DBG_VALUE instructions created during isel for function arguments that are i...
MachineRegisterInfo * RegInfo
Register CreateReg(MVT VT, bool isDivergent=false)
CreateReg - Allocate a single virtual register for the given type.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
DenseMap< const Value *, ISD::NodeType > PreferredExtendType
Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND) for a value.
Register getCatchPadExceptionPointerVReg(const Value *CPI, const TargetRegisterClass *RC)
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
void addStackRoot(int Num, const Constant *Metadata)
addStackRoot - Registers a root that lives on the stack.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const BasicBlock * getParent() const
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
uint64_t getScalarSizeInBits() const
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
bool isEHPad() const
Returns true if the block is a landing pad.
void setIsEHCatchretTarget(bool V=true)
Indicates if this is a target block of a catchret.
void setIsCleanupFuncletEntry(bool V=true)
Indicates if this is the entry block of a cleanup funclet.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
std::vector< MachineBasicBlock * >::iterator succ_iterator
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setHasPatchPoint(bool s=true)
void setHasStackMap(bool s=true)
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
Description of the location of a variable whose Address is valid and unchanging during function execu...
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
void setCallsUnwindInit(bool b)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void setHasEHCatchret(bool V)
void setCallsEHReturn(bool b)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
auto getInStackSlotVariableDbgInfo()
Returns the collection of variables for which we have debug info and that have been assigned a stack ...
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineModuleInfo & getMMI() const
const MachineBasicBlock & front() const
bool hasEHFunclets() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
const MCContext & getContext() const
const Module * getModule() const
void setCurrentCallSite(unsigned Site)
Set the call site currently being processed.
unsigned getCurrentCallSite()
Get the call site currently being processed, if any.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
ArrayRef< std::pair< MCRegister, Register > > liveins() const
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Representation for a specific memory location.
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
Metadata * getModuleFlag(StringRef Key) const
Return the corresponding value if Key appears in module flags, otherwise return null.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(unsigned VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
void CopyValueToVirtualRegister(const Value *V, unsigned Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, unsigned Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr)
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void init(GCFunctionInfo *gfi, AAResults *AA, AssumptionCache *AC, const TargetLibraryInfo *li)
DenseMap< const Constant *, unsigned > ConstantsOut
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
virtual void emitFunctionEntryCode()
SwiftErrorValueTracking * SwiftError
std::unique_ptr< SelectionDAGBuilder > SDB
Targets can subclass this to parameterize the SelectionDAG lowering and instruction selection process...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, MachinePointerInfo SrcPtrInfo) const
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
Help to insert SDNodeFlags automatically in transforming.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
BlockFrequencyInfo * getBFI() const
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, unsigned VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
bool shouldOptForSize() const
SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
void DeleteNode(SDNode *N)
Remove the specified node from the system.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
ProfileSummaryInfo * getPSI() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
SDValue getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getValueType(EVT)
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
const FunctionVarLocs * getFunctionVarLocs() const
Returns the result of the AssignmentTrackingAnalysis pass if it's available, otherwise return nullptr...
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex, int64_t Size, int64_t Offset=-1)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the por...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
void addPCSections(const SDNode *Node, MDNode *MD)
Set PCSections to be associated with Node.
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL, bool LegalTypes=true)
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
void clear()
Clear the memory usage of this object.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)
Set the swifterror virtual register in the VRegDefMap for this basic block.
Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a use of a swifterror by an instruction.
Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction.
const Value * getFunctionArg() const
Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
TargetIntrinsicInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps, const Value *, const Value *) const
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual const char * getClearCacheBuiltinName() const
Return the builtin name for the __builtin___clear_cache intrinsic Default is to invoke the clear cach...
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual SDValue LowerFormalArguments(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::InputArg > &, const SDLoc &, SelectionDAG &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, const SDLoc &, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array,...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
virtual TargetTransformInfo getTargetTransformInfo(const Function &F) const
Return a TargetTransformInfo for a given function.
CodeModel::Model getCodeModel() const
Returns the code model.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
unsigned getID() const
Return the register class ID number.
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
TypeID
Definitions of all of the base types for the Type system.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SET_FPENV
Sets the current floating-point environment.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ RESET_FPENV
Set floating-point environment to default state.
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ SET_ROUNDING
Set rounding mode.
@ SIGN_EXTEND
Conversion operators.
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ BR
Control flow instructions. These all have token chains.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ CLEANUPRET
CLEANUPRET - Represents a return from a cleanup block funclet.
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
@ GET_FPENV
Gets the current floating-point environment.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ PCMARKER
PCMARKER - This corresponds to the pcmarker intrinsic.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ CATCHRET
CATCHRET - Represents a return from a catch block funclet.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
VScaleVal_match m_VScale()
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
std::vector< CaseCluster > CaseClusterVector
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
CaseClusterVector::iterator CaseClusterIt
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int popcount(T Value) noexcept
Count the number of set bits in a value.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
gep_type_iterator gep_type_end(const User *GEP)
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
unsigned succ_size(const MachineBasicBlock *BB)
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
static const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isScalableVT() const
Return true if the type is a scalable type.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setPointerAddrSpace(unsigned AS)
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< unsigned, 4 > Regs
This list holds the registers assigned to the values.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< std::pair< unsigned, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
bool hasAllowReassociation() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
A cluster of case labels.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)