79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsAMDGPU.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
112using namespace PatternMatch;
113using namespace SwitchCG;
115#define DEBUG_TYPE "isel"
123 cl::desc(
"Insert the experimental `assertalign` node."),
128 cl::desc(
"Generate low-precision inline sequences "
129 "for some float libcalls"),
135 cl::desc(
"Set the case probability threshold for peeling the case from a "
136 "switch statement. A value greater than 100 will void this "
156 const SDValue *Parts,
unsigned NumParts,
159 std::optional<CallingConv::ID>
CC);
168 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
170 std::optional<CallingConv::ID>
CC = std::nullopt,
171 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
175 PartVT, ValueVT,
CC))
182 assert(NumParts > 0 &&
"No parts to assemble!");
193 unsigned RoundBits = PartBits * RoundParts;
194 EVT RoundVT = RoundBits == ValueBits ?
200 if (RoundParts > 2) {
204 PartVT, HalfVT, V, InChain);
215 if (RoundParts < NumParts) {
217 unsigned OddParts = NumParts - RoundParts;
220 OddVT, V, InChain,
CC);
237 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
248 !PartVT.
isVector() &&
"Unexpected split");
260 if (PartEVT == ValueVT)
264 ValueVT.
bitsLT(PartEVT)) {
277 if (ValueVT.
bitsLT(PartEVT)) {
282 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
297 llvm::Attribute::StrictFP)) {
299 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
311 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
312 ValueVT.
bitsLT(PartEVT)) {
321 const Twine &ErrMsg) {
322 const Instruction *
I = dyn_cast_or_null<Instruction>(V);
326 const char *AsmError =
", possible invalid constraint for vector type";
327 if (
const CallInst *CI = dyn_cast<CallInst>(
I))
328 if (CI->isInlineAsm())
340 const SDValue *Parts,
unsigned NumParts,
343 std::optional<CallingConv::ID> CallConv) {
345 assert(NumParts > 0 &&
"No parts to assemble!");
346 const bool IsABIRegCopy = CallConv.has_value();
355 unsigned NumIntermediates;
360 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
361 NumIntermediates, RegisterVT);
365 NumIntermediates, RegisterVT);
368 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
370 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
373 "Part type sizes don't match!");
377 if (NumIntermediates == NumParts) {
380 for (
unsigned i = 0; i != NumParts; ++i)
382 V, InChain, CallConv);
383 }
else if (NumParts > 0) {
386 assert(NumParts % NumIntermediates == 0 &&
387 "Must expand into a divisible number of parts!");
388 unsigned Factor = NumParts / NumIntermediates;
389 for (
unsigned i = 0; i != NumIntermediates; ++i)
391 IntermediateVT, V, InChain, CallConv);
406 DL, BuiltVectorTy, Ops);
412 if (PartEVT == ValueVT)
428 "Cannot narrow, it would be a lossy transformation");
434 if (PartEVT == ValueVT)
459 }
else if (ValueVT.
bitsLT(PartEVT)) {
468 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
499 std::optional<CallingConv::ID> CallConv);
506 unsigned NumParts,
MVT PartVT,
const Value *V,
507 std::optional<CallingConv::ID> CallConv = std::nullopt,
521 unsigned OrigNumParts = NumParts;
523 "Copying to an illegal type!");
529 EVT PartEVT = PartVT;
530 if (PartEVT == ValueVT) {
531 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
540 assert(NumParts == 1 &&
"Do not know what to promote to!");
551 "Unknown mismatch!");
553 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
554 if (PartVT == MVT::x86mmx)
559 assert(NumParts == 1 && PartEVT != ValueVT);
565 "Unknown mismatch!");
568 if (PartVT == MVT::x86mmx)
575 "Failed to tile the value with PartVT!");
578 if (PartEVT != ValueVT) {
580 "scalar-to-vector conversion failed");
589 if (NumParts & (NumParts - 1)) {
592 "Do not know what to expand to!");
594 unsigned RoundBits = RoundParts * PartBits;
595 unsigned OddParts = NumParts - RoundParts;
604 std::reverse(Parts + RoundParts, Parts + NumParts);
606 NumParts = RoundParts;
618 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
619 for (
unsigned i = 0; i < NumParts; i += StepSize) {
620 unsigned ThisBits = StepSize * PartBits / 2;
623 SDValue &Part1 = Parts[i+StepSize/2];
630 if (ThisBits == PartBits && ThisVT != PartVT) {
638 std::reverse(Parts, Parts + OrigNumParts);
655 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
660 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
662 "Cannot widen to illegal type");
665 }
else if (PartEVT != ValueEVT) {
680 Ops.
append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
691 std::optional<CallingConv::ID> CallConv) {
695 const bool IsABIRegCopy = CallConv.has_value();
698 EVT PartEVT = PartVT;
699 if (PartEVT == ValueVT) {
718 TargetLowering::TypeWidenVector) {
736 "lossy conversion of vector to scalar type");
751 unsigned NumIntermediates;
755 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
760 NumIntermediates, RegisterVT);
763 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
765 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
768 "Mixing scalable and fixed vectors when copying in parts");
770 std::optional<ElementCount> DestEltCnt;
780 if (ValueVT == BuiltVectorTy) {
804 for (
unsigned i = 0; i != NumIntermediates; ++i) {
819 if (NumParts == NumIntermediates) {
822 for (
unsigned i = 0; i != NumParts; ++i)
824 }
else if (NumParts > 0) {
827 assert(NumIntermediates != 0 &&
"division by zero");
828 assert(NumParts % NumIntermediates == 0 &&
829 "Must expand into a divisible number of parts!");
830 unsigned Factor = NumParts / NumIntermediates;
831 for (
unsigned i = 0; i != NumIntermediates; ++i)
838 EVT valuevt, std::optional<CallingConv::ID>
CC)
839 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
840 RegCount(1, regs.
size()), CallConv(
CC) {}
844 std::optional<CallingConv::ID>
CC) {
858 for (
unsigned i = 0; i != NumRegs; ++i)
860 RegVTs.push_back(RegisterVT);
889 for (
unsigned i = 0; i != NumRegs; ++i) {
895 *Glue =
P.getValue(2);
898 Chain =
P.getValue(1);
927 EVT FromVT(MVT::Other);
931 }
else if (NumSignBits > 1) {
939 assert(FromVT != MVT::Other);
945 RegisterVT, ValueVT, V, Chain,
CallConv);
975 NumParts, RegisterVT, V,
CallConv, ExtendKind);
981 for (
unsigned i = 0; i != NumRegs; ++i) {
993 if (NumRegs == 1 || Glue)
1004 Chain = Chains[NumRegs-1];
1010 unsigned MatchingIdx,
const SDLoc &dl,
1012 std::vector<SDValue> &Ops)
const {
1017 Flag.setMatchingOp(MatchingIdx);
1026 Flag.setRegClass(RC->
getID());
1037 "No 1:1 mapping from clobbers to regs?");
1045 "If we clobbered the stack pointer, MFI should know about it.");
1054 for (
unsigned i = 0; i != NumRegs; ++i) {
1056 unsigned TheReg =
Regs[Reg++];
1067 unsigned RegCount = std::get<0>(CountAndVT);
1068 MVT RegisterVT = std::get<1>(CountAndVT);
1092 UnusedArgNodeMap.clear();
1094 PendingExports.clear();
1095 PendingConstrainedFP.clear();
1096 PendingConstrainedFPStrict.clear();
1104 DanglingDebugInfoMap.clear();
1111 if (Pending.
empty())
1117 unsigned i = 0, e = Pending.
size();
1118 for (; i != e; ++i) {
1119 assert(Pending[i].getNode()->getNumOperands() > 1);
1120 if (Pending[i].getNode()->getOperand(0) == Root)
1128 if (Pending.
size() == 1)
1147 PendingConstrainedFP.size() +
1148 PendingConstrainedFPStrict.size());
1150 PendingConstrainedFP.end());
1151 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1152 PendingConstrainedFPStrict.end());
1153 PendingConstrainedFP.clear();
1154 PendingConstrainedFPStrict.clear();
1161 PendingExports.append(PendingConstrainedFPStrict.begin(),
1162 PendingConstrainedFPStrict.end());
1163 PendingConstrainedFPStrict.clear();
1164 return updateRoot(PendingExports);
1171 assert(Variable &&
"Missing variable");
1178 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1185 if (!
N.getNode() && isa<Argument>(
Address))
1193 auto *FINode = dyn_cast<FrameIndexSDNode>(
N.getNode());
1194 if (IsParameter && FINode) {
1197 true,
DL, SDNodeOrder);
1198 }
else if (isa<Argument>(
Address)) {
1202 FuncArgumentDbgValueKind::Declare,
N);
1206 true,
DL, SDNodeOrder);
1213 FuncArgumentDbgValueKind::Declare,
N)) {
1215 <<
" (could not emit func-arg dbg_value)\n");
1227 for (
auto It = FnVarLocs->locs_begin(&
I),
End = FnVarLocs->locs_end(&
I);
1229 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1231 if (It->Values.isKillLocation(It->Expr)) {
1237 It->Values.hasArgList())) {
1239 for (
Value *V : It->Values.location_ops())
1242 FnVarLocs->getDILocalVariable(It->VariableID),
1243 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1258 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1259 if (
DPLabel *DPL = dyn_cast<DPLabel>(&DR)) {
1260 assert(DPL->getLabel() &&
"Missing label");
1262 DAG.
getDbgLabel(DPL->getLabel(), DPL->getDebugLoc(), SDNodeOrder);
1269 DPValue &DPV = cast<DPValue>(DR);
1277 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DPV
1286 if (Values.
empty()) {
1294 [](
Value *V) {
return !V || isa<UndefValue>(V); })) {
1302 SDNodeOrder, IsVariadic)) {
1313 if (
I.isTerminator()) {
1314 HandlePHINodesInSuccessorBlocks(
I.getParent());
1318 if (!isa<DbgInfoIntrinsic>(
I))
1324 bool NodeInserted =
false;
1325 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1326 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1328 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1329 DAG, [&](
SDNode *) { NodeInserted =
true; });
1335 !isa<GCStatepointInst>(
I))
1340 auto It = NodeMap.find(&
I);
1341 if (It != NodeMap.end()) {
1343 }
else if (NodeInserted) {
1346 errs() <<
"warning: loosing !pcsections metadata ["
1347 <<
I.getModule()->getName() <<
"]\n";
1356void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1366#define HANDLE_INST(NUM, OPCODE, CLASS) \
1367 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1368#include "llvm/IR/Instruction.def"
1380 for (
const Value *V : Values) {
1405 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1410 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1411 DIVariable *DanglingVariable = DDI.getVariable();
1413 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1415 << printDDI(
nullptr, DDI) <<
"\n");
1421 for (
auto &DDIMI : DanglingDebugInfoMap) {
1422 DanglingDebugInfoVector &DDIV = DDIMI.second;
1426 for (
auto &DDI : DDIV)
1427 if (isMatchingDbgValue(DDI))
1430 erase_if(DDIV, isMatchingDbgValue);
1438 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1439 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1442 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1443 for (
auto &DDI : DDIV) {
1446 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1450 "Expected inlined-at fields to agree");
1459 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1460 FuncArgumentDbgValueKind::Value, Val)) {
1462 << printDDI(V, DDI) <<
"\n");
1469 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1470 << ValSDNodeOrder <<
"\n");
1471 SDV = getDbgValue(Val, Variable, Expr,
DL,
1472 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1477 <<
" in EmitFuncArgumentDbgValue\n");
1479 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1491 DanglingDebugInfo &DDI) {
1496 const Value *OrigV = V;
1500 unsigned SDOrder = DDI.getSDNodeOrder();
1504 bool StackValue =
true;
1513 while (isa<Instruction>(V)) {
1514 const Instruction &VAsInst = *cast<const Instruction>(V);
1529 if (!AdditionalValues.
empty())
1539 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1540 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1548 assert(OrigV &&
"V shouldn't be null");
1553 << printDDI(OrigV, DDI) <<
"\n");
1570 unsigned Order,
bool IsVariadic) {
1575 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1580 for (
const Value *V : Values) {
1582 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1583 isa<ConstantPointerNull>(V)) {
1589 if (
auto *CE = dyn_cast<ConstantExpr>(V))
1590 if (CE->getOpcode() == Instruction::IntToPtr) {
1597 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1608 if (!
N.getNode() && isa<Argument>(V))
1609 N = UnusedArgNodeMap[V];
1613 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1614 FuncArgumentDbgValueKind::Value,
N))
1616 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
1641 bool IsParamOfFunc =
1651 unsigned Reg = VMI->second;
1655 V->getType(), std::nullopt);
1661 unsigned BitsToDescribe = 0;
1663 BitsToDescribe = *VarSize;
1665 BitsToDescribe = Fragment->SizeInBits;
1668 if (
Offset >= BitsToDescribe)
1671 unsigned RegisterSize = RegAndSize.second;
1672 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1673 ? BitsToDescribe -
Offset
1676 Expr,
Offset, FragmentSize);
1680 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, SDNodeOrder);
1699 SDNodeOrder, IsVariadic);
1706 for (
auto &Pair : DanglingDebugInfoMap)
1707 for (
auto &DDI : Pair.second)
1739 if (
N.getNode())
return N;
1781 if (
const Constant *
C = dyn_cast<Constant>(V)) {
1790 if (isa<ConstantPointerNull>(
C)) {
1791 unsigned AS = V->getType()->getPointerAddressSpace();
1799 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
1802 if (isa<UndefValue>(
C) && !V->getType()->isAggregateType())
1806 visit(CE->getOpcode(), *CE);
1808 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1812 if (isa<ConstantStruct>(
C) || isa<ConstantArray>(
C)) {
1814 for (
const Use &U :
C->operands()) {
1820 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1828 dyn_cast<ConstantDataSequential>(
C)) {
1830 for (
unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1834 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1838 if (isa<ArrayType>(CDS->getType()))
1843 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1844 assert((isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C)) &&
1845 "Unknown struct or array constant!");
1849 unsigned NumElts = ValueVTs.
size();
1853 for (
unsigned i = 0; i != NumElts; ++i) {
1854 EVT EltVT = ValueVTs[i];
1855 if (isa<UndefValue>(
C))
1869 if (
const auto *Equiv = dyn_cast<DSOLocalEquivalent>(
C))
1870 return getValue(Equiv->getGlobalValue());
1872 if (
const auto *
NC = dyn_cast<NoCFIValue>(
C))
1875 if (VT == MVT::aarch64svcount) {
1876 assert(
C->isNullValue() &&
"Can only zero this target type!");
1881 VectorType *VecTy = cast<VectorType>(V->getType());
1887 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1888 for (
unsigned i = 0; i != NumElements; ++i)
1894 if (isa<ConstantAggregateZero>(
C)) {
1912 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1921 if (
const Instruction *Inst = dyn_cast<Instruction>(V)) {
1925 Inst->getType(), std::nullopt);
1933 if (
const auto *BB = dyn_cast<BasicBlock>(V))
1939void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
1948 if (IsMSVCCXX || IsCoreCLR)
1975 Value *ParentPad =
I.getCatchSwitchParentPad();
1977 if (isa<ConstantTokenNone>(ParentPad))
1980 SuccessorColor = cast<Instruction>(ParentPad)->
getParent();
1981 assert(SuccessorColor &&
"No parent funclet for catchret!");
1983 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
1992void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2036 if (isa<CleanupPadInst>(Pad)) {
2038 UnwindDests.emplace_back(FuncInfo.
MBBMap[EHPadBB], Prob);
2039 UnwindDests.back().first->setIsEHScopeEntry();
2041 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2044 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2045 UnwindDests.emplace_back(FuncInfo.
MBBMap[CatchPadBB], Prob);
2046 UnwindDests.back().first->setIsEHScopeEntry();
2077 assert(UnwindDests.size() <= 1 &&
2078 "There should be at most one unwind destination for wasm");
2085 if (isa<LandingPadInst>(Pad)) {
2087 UnwindDests.emplace_back(FuncInfo.
MBBMap[EHPadBB], Prob);
2089 }
else if (isa<CleanupPadInst>(Pad)) {
2092 UnwindDests.emplace_back(FuncInfo.
MBBMap[EHPadBB], Prob);
2093 UnwindDests.
back().first->setIsEHScopeEntry();
2094 UnwindDests.back().first->setIsEHFuncletEntry();
2096 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2098 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2099 UnwindDests.emplace_back(FuncInfo.
MBBMap[CatchPadBB], Prob);
2101 if (IsMSVCCXX || IsCoreCLR)
2102 UnwindDests.back().first->setIsEHFuncletEntry();
2104 UnwindDests.back().first->setIsEHScopeEntry();
2106 NewEHPadBB = CatchSwitch->getUnwindDest();
2112 if (BPI && NewEHPadBB)
2114 EHPadBB = NewEHPadBB;
2121 auto UnwindDest =
I.getUnwindDest();
2128 for (
auto &UnwindDest : UnwindDests) {
2129 UnwindDest.first->setIsEHPad();
2130 addSuccessorWithProb(
FuncInfo.
MBB, UnwindDest.first, UnwindDest.second);
2140void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2144void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2158 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2165 const Function *
F =
I.getParent()->getParent();
2184 unsigned NumValues = ValueVTs.
size();
2187 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2188 for (
unsigned i = 0; i != NumValues; ++i) {
2195 if (MemVTs[i] != ValueVTs[i])
2205 MVT::Other, Chains);
2206 }
else if (
I.getNumOperands() != 0) {
2209 unsigned NumValues = ValueVTs.
size();
2213 const Function *
F =
I.getParent()->getParent();
2216 I.getOperand(0)->getType(),
F->getCallingConv(),
2220 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2222 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2226 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2228 for (
unsigned j = 0;
j != NumValues; ++
j) {
2229 EVT VT = ValueVTs[
j];
2241 &Parts[0], NumParts, PartVT, &
I,
CC, ExtendKind);
2248 if (
I.getOperand(0)->getType()->isPointerTy()) {
2250 Flags.setPointerAddrSpace(
2251 cast<PointerType>(
I.getOperand(0)->getType())->getAddressSpace());
2254 if (NeedsRegBlock) {
2255 Flags.setInConsecutiveRegs();
2256 if (j == NumValues - 1)
2257 Flags.setInConsecutiveRegsLast();
2266 for (
unsigned i = 0; i < NumParts; ++i) {
2268 Parts[i].getValueType().getSimpleVT(),
2279 const Function *
F =
I.getParent()->getParent();
2281 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2284 Flags.setSwiftError();
2303 "LowerReturn didn't return a valid chain!");
2314 if (V->getType()->isEmptyTy())
2319 assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2320 "Unused value assigned virtual registers!");
2330 if (!isa<Instruction>(V) && !isa<Argument>(V))
return;
2343 if (
const Instruction *VI = dyn_cast<Instruction>(V)) {
2345 if (VI->getParent() == FromBB)
2354 if (isa<Argument>(V)) {
2371 const BasicBlock *SrcBB = Src->getBasicBlock();
2372 const BasicBlock *DstBB = Dst->getBasicBlock();
2376 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2386 Src->addSuccessorWithoutProb(Dst);
2389 Prob = getEdgeProbability(Src, Dst);
2390 Src->addSuccessor(Dst, Prob);
2396 return I->getParent() == BB;
2416 if (
const CmpInst *BOp = dyn_cast<CmpInst>(
Cond)) {
2420 if (CurBB == SwitchBB ||
2426 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2431 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2437 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2439 SL->SwitchCases.push_back(CB);
2448 SL->SwitchCases.push_back(CB);
2456 unsigned Depth = 0) {
2461 auto *
I = dyn_cast<Instruction>(V);
2465 if (Necessary !=
nullptr) {
2468 if (Necessary->contains(
I))
2476 for (
unsigned OpIdx = 0,
E =
I->getNumOperands(); OpIdx <
E; ++OpIdx)
2487 if (
I.getNumSuccessors() != 2)
2490 if (!
I.isConditional())
2502 if (BPI !=
nullptr) {
2508 std::optional<bool> Likely;
2511 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2515 if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2527 if (CostThresh <= 0)
2541 if (
const auto *RhsI = dyn_cast<Instruction>(Rhs))
2552 Value *BrCond =
I.getCondition();
2553 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2554 for (
const auto *U : Ins->users()) {
2556 if (
auto *UIns = dyn_cast<Instruction>(U))
2557 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2570 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2572 for (
const auto &InsPair : RhsDeps) {
2573 if (!ShouldCountInsn(InsPair.first)) {
2574 ToDrop = InsPair.first;
2578 if (ToDrop ==
nullptr)
2580 RhsDeps.erase(ToDrop);
2583 for (
const auto &InsPair : RhsDeps) {
2591 if (CostOfIncluding > CostThresh)
2617 const Value *BOpOp0, *BOpOp1;
2631 if (BOpc == Instruction::And)
2632 BOpc = Instruction::Or;
2633 else if (BOpc == Instruction::Or)
2634 BOpc = Instruction::And;
2640 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->
hasOneUse();
2645 TProb, FProb, InvertCond);
2655 if (Opc == Instruction::Or) {
2676 auto NewTrueProb = TProb / 2;
2677 auto NewFalseProb = TProb / 2 + FProb;
2680 NewFalseProb, InvertCond);
2687 Probs[1], InvertCond);
2689 assert(Opc == Instruction::And &&
"Unknown merge op!");
2709 auto NewTrueProb = TProb + FProb / 2;
2710 auto NewFalseProb = FProb / 2;
2713 NewFalseProb, InvertCond);
2720 Probs[1], InvertCond);
2729 if (Cases.size() != 2)
return true;
2733 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2734 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2735 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2736 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2742 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2743 Cases[0].
CC == Cases[1].
CC &&
2744 isa<Constant>(Cases[0].CmpRHS) &&
2745 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2746 if (Cases[0].
CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2748 if (Cases[0].
CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2755void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2761 if (
I.isUnconditional()) {
2767 if (Succ0MBB != NextBlock(BrMBB) ||
2780 const Value *CondVal =
I.getCondition();
2800 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2802 BOp->
hasOneUse() && !
I.hasMetadata(LLVMContext::MD_unpredictable)) {
2804 const Value *BOp0, *BOp1;
2807 Opcode = Instruction::And;
2809 Opcode = Instruction::Or;
2817 Opcode, BOp0, BOp1))) {
2819 getEdgeProbability(BrMBB, Succ0MBB),
2820 getEdgeProbability(BrMBB, Succ1MBB),
2825 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2829 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2836 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2842 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2845 SL->SwitchCases.clear();
2851 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc());
2870 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2912 if (cast<ConstantInt>(CB.
CmpLHS)->isMinValue(
true)) {
2933 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2957 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2958 assert(JT.Reg != -1U &&
"Should lower JT Header first!");
2972 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2973 const SDLoc &dl = *JT.SL;
2989 unsigned JumpTableReg =
2992 JumpTableReg, SwitchOp);
2993 JT.Reg = JumpTableReg;
3005 MVT::Other, CopyTo, CMP,
3009 if (JT.MBB != NextBlock(SwitchBB))
3016 if (JT.MBB != NextBlock(SwitchBB))
3044 if (PtrTy != PtrMemTy)
3092 Entry.Node = GuardVal;
3094 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3095 Entry.IsInReg =
true;
3096 Args.push_back(Entry);
3102 getValue(GuardCheckFn), std::move(Args));
3104 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3118 Guard =
DAG.
getLoad(PtrMemTy, dl, Chain, GuardPtr,
3155 TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3186 bool UsePtrType =
false;
3190 for (
unsigned i = 0, e =
B.Cases.size(); i != e; ++i)
3210 if (!
B.FallthroughUnreachable)
3211 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3212 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3216 if (!
B.FallthroughUnreachable) {
3229 if (
MBB != NextBlock(SwitchBB))
3248 if (PopCount == 1) {
3255 }
else if (PopCount == BB.
Range) {
3274 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3276 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3287 if (NextMBB != NextBlock(SwitchBB))
3294void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3305 assert(!
I.hasOperandBundlesOtherThan(
3306 {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3307 LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3308 LLVMContext::OB_cfguardtarget,
3309 LLVMContext::OB_clang_arc_attachedcall}) &&
3310 "Cannot lower invokes with arbitrary operand bundles yet!");
3312 const Value *Callee(
I.getCalledOperand());
3313 const Function *Fn = dyn_cast<Function>(Callee);
3314 if (isa<InlineAsm>(Callee))
3315 visitInlineAsm(
I, EHPadBB);
3320 case Intrinsic::donothing:
3322 case Intrinsic::seh_try_begin:
3323 case Intrinsic::seh_scope_begin:
3324 case Intrinsic::seh_try_end:
3325 case Intrinsic::seh_scope_end:
3331 case Intrinsic::experimental_patchpoint_void:
3332 case Intrinsic::experimental_patchpoint_i64:
3333 visitPatchpoint(
I, EHPadBB);
3335 case Intrinsic::experimental_gc_statepoint:
3338 case Intrinsic::wasm_rethrow: {
3367 if (!isa<GCStatepointInst>(
I)) {
3379 addSuccessorWithProb(InvokeMBB, Return);
3380 for (
auto &UnwindDest : UnwindDests) {
3381 UnwindDest.first->setIsEHPad();
3382 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3391void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3396 assert(!
I.hasOperandBundlesOtherThan(
3397 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3398 "Cannot lower callbrs with arbitrary operand bundles yet!");
3400 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3406 Dests.
insert(
I.getDefaultDest());
3411 for (
unsigned i = 0, e =
I.getNumIndirectDests(); i < e; ++i) {
3414 Target->setIsInlineAsmBrIndirectTarget();
3415 Target->setMachineBlockAddressTaken();
3416 Target->setLabelMustBeEmitted();
3418 if (Dests.
insert(Dest).second)
3429void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3430 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3433void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3435 "Call to landingpad not in landing pad!");
3455 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3485 if (JTB.first.HeaderBB ==
First)
3486 JTB.first.HeaderBB =
Last;
3499 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3501 bool Inserted =
Done.insert(BB).second;
3506 addSuccessorWithProb(IndirectBrMBB, Succ);
3521 if (
const CallInst *Call = dyn_cast_or_null<CallInst>(
I.getPrevNode())) {
3522 if (
Call->doesNotReturn())
3530void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3532 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3533 Flags.copyFMF(*FPOp);
3541void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3543 if (
auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&
I)) {
3544 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3545 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3547 if (
auto *ExactOp = dyn_cast<PossiblyExactOperator>(&
I))
3548 Flags.setExact(ExactOp->isExact());
3549 if (
auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&
I))
3550 Flags.setDisjoint(DisjointOp->isDisjoint());
3551 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3552 Flags.copyFMF(*FPOp);
3561void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3570 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3572 "Unexpected shift type");
3583 dyn_cast<const OverflowingBinaryOperator>(&
I)) {
3584 nuw = OFBinOp->hasNoUnsignedWrap();
3585 nsw = OFBinOp->hasNoSignedWrap();
3588 dyn_cast<const PossiblyExactOperator>(&
I))
3589 exact = ExactOp->isExact();
3592 Flags.setExact(exact);
3593 Flags.setNoSignedWrap(nsw);
3594 Flags.setNoUnsignedWrap(nuw);
3600void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3605 Flags.setExact(isa<PossiblyExactOperator>(&
I) &&
3606 cast<PossiblyExactOperator>(&
I)->isExact());
3611void SelectionDAGBuilder::visitICmp(
const User &
I) {
3613 if (
const ICmpInst *IC = dyn_cast<ICmpInst>(&
I))
3614 predicate = IC->getPredicate();
3615 else if (
const ConstantExpr *IC = dyn_cast<ConstantExpr>(&
I))
3638void SelectionDAGBuilder::visitFCmp(
const User &
I) {
3640 if (
const FCmpInst *FC = dyn_cast<FCmpInst>(&
I))
3641 predicate =
FC->getPredicate();
3642 else if (
const ConstantExpr *FC = dyn_cast<ConstantExpr>(&
I))
3648 auto *FPMO = cast<FPMathOperator>(&
I);
3653 Flags.copyFMF(*FPMO);
3665 return isa<SelectInst>(V);
3669void SelectionDAGBuilder::visitSelect(
const User &
I) {
3673 unsigned NumValues = ValueVTs.
size();
3674 if (NumValues == 0)
return;
3684 bool IsUnaryAbs =
false;
3685 bool Negate =
false;
3688 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3689 Flags.copyFMF(*FPOp);
3691 Flags.setUnpredictable(
3692 cast<SelectInst>(
I).getMetadata(LLVMContext::MD_unpredictable));
3696 EVT VT = ValueVTs[0];
3708 bool UseScalarMinMax = VT.
isVector() &&
3717 switch (SPR.Flavor) {
3723 switch (SPR.NaNBehavior) {
3736 switch (SPR.NaNBehavior) {
3780 for (
unsigned i = 0; i != NumValues; ++i) {
3789 for (
unsigned i = 0; i != NumValues; ++i) {
3803void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3811void SelectionDAGBuilder::visitZExt(
const User &
I) {
3819 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3820 Flags.setNonNeg(PNI->hasNonNeg());
3825 if (
Flags.hasNonNeg() &&
3834void SelectionDAGBuilder::visitSExt(
const User &
I) {
3843void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3854void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3862void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3870void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3878void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
3886void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
3894void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
3908void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
3920void SelectionDAGBuilder::visitBitCast(
const User &
I) {
3928 if (DestVT !=
N.getValueType())
3935 else if(
ConstantInt *
C = dyn_cast<ConstantInt>(
I.getOperand(0)))
3942void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
3944 const Value *SV =
I.getOperand(0);
3949 unsigned DestAS =
I.getType()->getPointerAddressSpace();
3957void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
3965 InVec, InVal, InIdx));
3968void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
3978void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
3982 if (
auto *SVI = dyn_cast<ShuffleVectorInst>(&
I))
3983 Mask = SVI->getShuffleMask();
3985 Mask = cast<ConstantExpr>(
I).getShuffleMask();
3991 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4007 unsigned MaskNumElts =
Mask.size();
4009 if (SrcNumElts == MaskNumElts) {
4015 if (SrcNumElts < MaskNumElts) {
4019 if (MaskNumElts % SrcNumElts == 0) {
4023 unsigned NumConcat = MaskNumElts / SrcNumElts;
4024 bool IsConcat =
true;
4026 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4032 if ((
Idx % SrcNumElts != (i % SrcNumElts)) ||
4033 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4034 ConcatSrcs[i / SrcNumElts] != (
int)(
Idx / SrcNumElts))) {
4039 ConcatSrcs[i / SrcNumElts] =
Idx / SrcNumElts;
4046 for (
auto Src : ConcatSrcs) {
4059 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4060 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4077 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4079 if (
Idx >= (
int)SrcNumElts)
4080 Idx -= SrcNumElts - PaddedMaskNumElts;
4088 if (MaskNumElts != PaddedMaskNumElts)
4096 if (SrcNumElts > MaskNumElts) {
4099 int StartIdx[2] = { -1, -1 };
4100 bool CanExtract =
true;
4101 for (
int Idx : Mask) {
4106 if (
Idx >= (
int)SrcNumElts) {
4115 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4116 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4120 StartIdx[Input] = NewStartIdx;
4123 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4129 for (
unsigned Input = 0; Input < 2; ++Input) {
4130 SDValue &Src = Input == 0 ? Src1 : Src2;
4131 if (StartIdx[Input] < 0)
4141 for (
int &
Idx : MappedOps) {
4142 if (
Idx >= (
int)SrcNumElts)
4143 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4158 for (
int Idx : Mask) {
4164 SDValue &Src =
Idx < (int)SrcNumElts ? Src1 : Src2;
4165 if (
Idx >= (
int)SrcNumElts)
Idx -= SrcNumElts;
4179 const Value *Op0 =
I.getOperand(0);
4180 const Value *Op1 =
I.getOperand(1);
4181 Type *AggTy =
I.getType();
4183 bool IntoUndef = isa<UndefValue>(Op0);
4184 bool FromUndef = isa<UndefValue>(Op1);
4194 unsigned NumAggValues = AggValueVTs.
size();
4195 unsigned NumValValues = ValValueVTs.
size();
4199 if (!NumAggValues) {
4207 for (; i != LinearIndex; ++i)
4208 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4213 for (; i != LinearIndex + NumValValues; ++i)
4214 Values[i] = FromUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4218 for (; i != NumAggValues; ++i)
4219 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4228 const Value *Op0 =
I.getOperand(0);
4230 Type *ValTy =
I.getType();
4231 bool OutOfUndef = isa<UndefValue>(Op0);
4239 unsigned NumValValues = ValValueVTs.
size();
4242 if (!NumValValues) {
4251 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4252 Values[i - LinearIndex] =
4261void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4262 Value *Op0 =
I.getOperand(0);
4272 bool IsVectorGEP =
I.getType()->isVectorTy();
4274 IsVectorGEP ? cast<VectorType>(
I.getType())->getElementCount()
4277 if (IsVectorGEP && !
N.getValueType().isVector()) {
4285 const Value *
Idx = GTI.getOperand();
4286 if (
StructType *StTy = GTI.getStructTypeOrNull()) {
4287 unsigned Field = cast<Constant>(
Idx)->getUniqueInteger().getZExtValue();
4296 if (int64_t(
Offset) >= 0 && cast<GEPOperator>(
I).isInBounds())
4297 Flags.setNoUnsignedWrap(
true);
4313 bool ElementScalable = ElementSize.
isScalable();
4317 const auto *
C = dyn_cast<Constant>(
Idx);
4318 if (
C && isa<VectorType>(
C->getType()))
4319 C =
C->getSplatValue();
4321 const auto *CI = dyn_cast_or_null<ConstantInt>(
C);
4322 if (CI && CI->isZero())
4324 if (CI && !ElementScalable) {
4338 Flags.setNoUnsignedWrap(
true);
4351 VectorElementCount);
4359 if (ElementScalable) {
4360 EVT VScaleTy =
N.getValueType().getScalarType();
4370 if (ElementMul != 1) {
4371 if (ElementMul.isPowerOf2()) {
4372 unsigned Amt = ElementMul.logBase2();
4374 N.getValueType(), IdxN,
4380 N.getValueType(), IdxN, Scale);
4386 N.getValueType(),
N, IdxN);
4397 if (PtrMemTy != PtrTy && !cast<GEPOperator>(
I).isInBounds())
4403void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4410 Type *Ty =
I.getAllocatedType();
4414 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4438 if (*Alignment <= StackAlign)
4439 Alignment = std::nullopt;
4446 Flags.setNoUnsignedWrap(
true);
4456 DAG.
getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4472 if (!
I.hasMetadata(LLVMContext::MD_noundef))
4474 return I.getMetadata(LLVMContext::MD_range);
4477void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4479 return visitAtomicLoad(
I);
4482 const Value *SV =
I.getOperand(0);
4486 if (
const Argument *Arg = dyn_cast<Argument>(SV)) {
4487 if (Arg->hasSwiftErrorAttr())
4488 return visitLoadFromSwiftError(
I);
4491 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4492 if (Alloca->isSwiftError())
4493 return visitLoadFromSwiftError(
I);
4499 Type *Ty =
I.getType();
4503 unsigned NumValues = ValueVTs.
size();
4507 Align Alignment =
I.getAlign();
4510 bool isVolatile =
I.isVolatile();
4515 bool ConstantMemory =
false;
4528 ConstantMemory =
true;
4543 unsigned ChainI = 0;
4544 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4567 MMOFlags, AAInfo, Ranges);
4568 Chains[ChainI] =
L.getValue(1);
4570 if (MemVTs[i] != ValueVTs[i])
4576 if (!ConstantMemory) {
4589void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4591 "call visitStoreToSwiftError when backend supports swifterror");
4595 const Value *SrcV =
I.getOperand(0);
4597 SrcV->
getType(), ValueVTs, &Offsets, 0);
4598 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4599 "expect a single EVT for swifterror");
4608 SDValue(Src.getNode(), Src.getResNo()));
4612void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4614 "call visitLoadFromSwiftError when backend supports swifterror");
4617 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4618 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4619 "Support volatile, non temporal, invariant for load_from_swift_error");
4621 const Value *SV =
I.getOperand(0);
4622 Type *Ty =
I.getType();
4627 I.getAAMetadata()))) &&
4628 "load_from_swift_error should not be constant memory");
4633 ValueVTs, &Offsets, 0);
4634 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4635 "expect a single EVT for swifterror");
4645void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4647 return visitAtomicStore(
I);
4649 const Value *SrcV =
I.getOperand(0);
4650 const Value *PtrV =
I.getOperand(1);
4656 if (
const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4657 if (Arg->hasSwiftErrorAttr())
4658 return visitStoreToSwiftError(
I);
4661 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4662 if (Alloca->isSwiftError())
4663 return visitStoreToSwiftError(
I);
4670 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4671 unsigned NumValues = ValueVTs.
size();
4684 Align Alignment =
I.getAlign();
4689 unsigned ChainI = 0;
4690 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4707 if (MemVTs[i] != ValueVTs[i])
4710 DAG.
getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4711 Chains[ChainI] = St;
4720void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4721 bool IsCompressing) {
4727 Src0 =
I.getArgOperand(0);
4728 Ptr =
I.getArgOperand(1);
4729 Alignment = cast<ConstantInt>(
I.getArgOperand(2))->getAlignValue();
4730 Mask =
I.getArgOperand(3);
4735 Src0 =
I.getArgOperand(0);
4736 Ptr =
I.getArgOperand(1);
4737 Mask =
I.getArgOperand(2);
4738 Alignment =
I.getParamAlign(1).valueOrOne();
4741 Value *PtrOperand, *MaskOperand, *Src0Operand;
4744 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4746 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4788 assert(
Ptr->getType()->isVectorTy() &&
"Unexpected pointer type");
4791 if (
auto *
C = dyn_cast<Constant>(
Ptr)) {
4792 C =
C->getSplatValue();
4798 ElementCount NumElts = cast<VectorType>(
Ptr->getType())->getElementCount();
4807 if (!
GEP ||
GEP->getParent() != CurBB)
4810 if (
GEP->getNumOperands() != 2)
4813 const Value *BasePtr =
GEP->getPointerOperand();
4814 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
4820 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
4825 if (ScaleVal != 1 &&
4838void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
4846 Align Alignment = cast<ConstantInt>(
I.getArgOperand(2))
4847 ->getMaybeAlignValue()
4858 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
4880 Ops, MMO, IndexType,
false);
4885void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
4891 Ptr =
I.getArgOperand(0);
4892 Alignment = cast<ConstantInt>(
I.getArgOperand(1))->getAlignValue();
4893 Mask =
I.getArgOperand(2);
4894 Src0 =
I.getArgOperand(3);
4899 Ptr =
I.getArgOperand(0);
4900 Alignment =
I.getParamAlign(0).valueOrOne();
4901 Mask =
I.getArgOperand(1);
4902 Src0 =
I.getArgOperand(2);
4905 Value *PtrOperand, *MaskOperand, *Src0Operand;
4908 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4910 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4939void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
4949 Align Alignment = cast<ConstantInt>(
I.getArgOperand(1))
4950 ->getMaybeAlignValue()
4962 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
5009 AAMDNodes(),
nullptr, SSID, SuccessOrdering, FailureOrdering);
5012 dl, MemVT, VTs, InChain,
5023void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5026 switch (
I.getOperation()) {
5076void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5090void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5110 nullptr, SSID, Order);
5126void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5148 nullptr, SSID, Ordering);
5164void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5165 unsigned Intrinsic) {
5170 bool HasChain = !
F->doesNotAccessMemory();
5171 bool OnlyLoad = HasChain &&
F->onlyReadsMemory();
5198 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5199 const Value *Arg =
I.getArgOperand(i);
5200 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5207 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5208 assert(CI->getBitWidth() <= 64 &&
5209 "large intrinsic immediates not handled");
5227 if (
auto *FPMO = dyn_cast<FPMathOperator>(&
I))
5228 Flags.copyFMF(*FPMO);
5235 auto *Token = Bundle->Inputs[0].get();
5237 assert(Ops.
back().getValueType() != MVT::Glue &&
5238 "Did not expected another glue node here.");
5246 if (IsTgtIntrinsic) {
5254 else if (
Info.fallbackAddressSpace)
5258 Info.size,
I.getAAMetadata());
5259 }
else if (!HasChain) {
5261 }
else if (!
I.getType()->isVoidTy()) {
5275 if (!
I.getType()->isVoidTy()) {
5276 if (!isa<VectorType>(
I.getType()))
5348 SDValue TwoToFractionalPartOfX;
5425 if (
Op.getValueType() == MVT::f32 &&
5449 if (
Op.getValueType() == MVT::f32 &&
5548 if (
Op.getValueType() == MVT::f32 &&
5632 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5645 if (
Op.getValueType() == MVT::f32 &&
5722 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5733 if (
Op.getValueType() == MVT::f32 &&
5746 bool IsExp10 =
false;
5747 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5751 IsExp10 = LHSC->isExactlyValue(Ten);
5778 unsigned Val = RHSC->getSExtValue();
5807 CurSquare, CurSquare);
5812 if (RHSC->getSExtValue() < 0)
5826 EVT VT =
LHS.getValueType();
5849 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
5853 Opcode, VT, ScaleInt);
5888 switch (
N.getOpcode()) {
5891 Regs.emplace_back(cast<RegisterSDNode>(
Op)->
getReg(),
5892 Op.getValueType().getSizeInBits());
5917bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5920 const Argument *Arg = dyn_cast<Argument>(V);
5934 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
5941 auto *NewDIExpr = FragExpr;
5948 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
5951 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
5952 return BuildMI(MF,
DL, Inst, Indirect, Reg, Variable, FragExpr);
5956 if (Kind == FuncArgumentDbgValueKind::Value) {
5961 if (!IsInEntryBlock)
5977 bool VariableIsFunctionInputArg = Variable->
isParameter() &&
5978 !
DL->getInlinedAt();
5980 if (!IsInPrologue && !VariableIsFunctionInputArg)
6011 if (VariableIsFunctionInputArg) {
6021 bool IsIndirect =
false;
6022 std::optional<MachineOperand>
Op;
6025 if (FI != std::numeric_limits<int>::max())
6029 if (!
Op &&
N.getNode()) {
6032 if (ArgRegsAndSizes.
size() == 1)
6033 Reg = ArgRegsAndSizes.
front().first;
6035 if (Reg &&
Reg.isVirtual()) {
6043 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6047 if (!
Op &&
N.getNode()) {
6052 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
6061 for (
const auto &RegAndSize : SplitRegs) {
6065 int RegFragmentSizeInBits = RegAndSize.second;
6067 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6070 if (
Offset >= ExprFragmentSizeInBits)
6074 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6075 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6080 Expr,
Offset, RegFragmentSizeInBits);
6081 Offset += RegAndSize.second;
6084 if (!FragmentExpr) {
6091 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6092 Kind != FuncArgumentDbgValueKind::Value);
6103 V->getType(), std::nullopt);
6104 if (RFV.occupiesMultipleRegs()) {
6105 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6110 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6111 }
else if (ArgRegsAndSizes.
size() > 1) {
6114 splitMultiRegDbgValue(ArgRegsAndSizes);
6123 "Expected inlined-at fields to agree");
6127 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6129 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6142 unsigned DbgSDNodeOrder) {
6143 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
6155 false, dl, DbgSDNodeOrder);
6158 false, dl, DbgSDNodeOrder);
6162 switch (Intrinsic) {
6163 case Intrinsic::smul_fix:
6165 case Intrinsic::umul_fix:
6167 case Intrinsic::smul_fix_sat:
6169 case Intrinsic::umul_fix_sat:
6171 case Intrinsic::sdiv_fix:
6173 case Intrinsic::udiv_fix:
6175 case Intrinsic::sdiv_fix_sat:
6177 case Intrinsic::udiv_fix_sat:
6184void SelectionDAGBuilder::lowerCallToExternalSymbol(
const CallInst &
I,
6185 const char *FunctionName) {
6186 assert(FunctionName &&
"FunctionName must not be nullptr");
6196 assert(cast<CallBase>(PreallocatedSetup)
6199 "expected call_preallocated_setup Value");
6200 for (
const auto *U : PreallocatedSetup->
users()) {
6201 auto *UseCall = cast<CallBase>(U);
6202 const Function *Fn = UseCall->getCalledFunction();
6203 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6213bool SelectionDAGBuilder::visitEntryValueDbgValue(
6220 const Argument *Arg = cast<Argument>(Values[0]);
6226 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6227 "couldn't find an associated register for the Argument\n");
6230 Register ArgVReg = ArgIt->getSecond();
6233 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6235 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6239 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6240 "couldn't find a physical register\n");
6245void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6246 unsigned Intrinsic) {
6248 switch (Intrinsic) {
6249 case Intrinsic::experimental_convergence_anchor:
6252 case Intrinsic::experimental_convergence_entry:
6255 case Intrinsic::experimental_convergence_loop: {
6257 auto *Token = Bundle->Inputs[0].get();
6266void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6267 unsigned Intrinsic) {
6274 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
6275 Flags.copyFMF(*FPOp);
6277 switch (Intrinsic) {
6280 visitTargetIntrinsic(
I, Intrinsic);
6282 case Intrinsic::vscale: {
6287 case Intrinsic::vastart: visitVAStart(
I);
return;
6288 case Intrinsic::vaend: visitVAEnd(
I);
return;
6289 case Intrinsic::vacopy: visitVACopy(
I);
return;
6290 case Intrinsic::returnaddress:
6295 case Intrinsic::addressofreturnaddress:
6300 case Intrinsic::sponentry:
6305 case Intrinsic::frameaddress:
6310 case Intrinsic::read_volatile_register:
6311 case Intrinsic::read_register: {
6315 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6323 case Intrinsic::write_register: {
6325 Value *RegValue =
I.getArgOperand(1);
6328 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6333 case Intrinsic::memcpy: {
6334 const auto &MCI = cast<MemCpyInst>(
I);
6339 Align DstAlign = MCI.getDestAlign().valueOrOne();
6340 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6341 Align Alignment = std::min(DstAlign, SrcAlign);
6342 bool isVol = MCI.isVolatile();
6348 Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6351 updateDAGForMaybeTailCall(MC);
6354 case Intrinsic::memcpy_inline: {
6355 const auto &MCI = cast<MemCpyInlineInst>(
I);
6359 assert(isa<ConstantSDNode>(
Size) &&
"memcpy_inline needs constant size");
6361 Align DstAlign = MCI.getDestAlign().valueOrOne();
6362 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6363 Align Alignment = std::min(DstAlign, SrcAlign);
6364 bool isVol = MCI.isVolatile();
6372 updateDAGForMaybeTailCall(MC);
6375 case Intrinsic::memset: {
6376 const auto &MSI = cast<MemSetInst>(
I);
6381 Align Alignment = MSI.getDestAlign().valueOrOne();
6382 bool isVol = MSI.isVolatile();
6386 Root, sdl, Op1, Op2, Op3, Alignment, isVol,
false,
6388 updateDAGForMaybeTailCall(MS);
6391 case Intrinsic::memset_inline: {
6392 const auto &MSII = cast<MemSetInlineInst>(
I);
6396 assert(isa<ConstantSDNode>(
Size) &&
"memset_inline needs constant size");
6398 Align DstAlign = MSII.getDestAlign().valueOrOne();
6399 bool isVol = MSII.isVolatile();
6406 updateDAGForMaybeTailCall(MC);
6409 case Intrinsic::memmove: {
6410 const auto &MMI = cast<MemMoveInst>(
I);
6415 Align DstAlign = MMI.getDestAlign().valueOrOne();
6416 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6417 Align Alignment = std::min(DstAlign, SrcAlign);
6418 bool isVol = MMI.isVolatile();
6426 I.getAAMetadata(),
AA);
6427 updateDAGForMaybeTailCall(MM);
6430 case Intrinsic::memcpy_element_unordered_atomic: {
6436 Type *LengthTy =
MI.getLength()->getType();
6437 unsigned ElemSz =
MI.getElementSizeInBytes();
6443 updateDAGForMaybeTailCall(MC);
6446 case Intrinsic::memmove_element_unordered_atomic: {
6447 auto &
MI = cast<AtomicMemMoveInst>(
I);
6452 Type *LengthTy =
MI.getLength()->getType();
6453 unsigned ElemSz =
MI.getElementSizeInBytes();
6459 updateDAGForMaybeTailCall(MC);
6462 case Intrinsic::memset_element_unordered_atomic: {
6463 auto &
MI = cast<AtomicMemSetInst>(
I);
6468 Type *LengthTy =
MI.getLength()->getType();
6469 unsigned ElemSz =
MI.getElementSizeInBytes();
6474 updateDAGForMaybeTailCall(MC);
6477 case Intrinsic::call_preallocated_setup: {
6486 case Intrinsic::call_preallocated_arg: {
6501 case Intrinsic::dbg_declare: {
6502 const auto &DI = cast<DbgDeclareInst>(
I);
6505 if (AssignmentTrackingEnabled ||
6508 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DI <<
"\n");
6514 assert(!DI.hasArgList() &&
"Only dbg.value should currently use DIArgList");
6519 case Intrinsic::dbg_label: {
6522 assert(Label &&
"Missing label");
6529 case Intrinsic::dbg_assign: {
6531 if (AssignmentTrackingEnabled)
6537 case Intrinsic::dbg_value: {
6539 if (AssignmentTrackingEnabled)
6559 SDNodeOrder, IsVariadic))
6565 case Intrinsic::eh_typeid_for: {
6574 case Intrinsic::eh_return_i32:
6575 case Intrinsic::eh_return_i64:
6583 case Intrinsic::eh_unwind_init:
6586 case Intrinsic::eh_dwarf_cfa:
6591 case Intrinsic::eh_sjlj_callsite: {
6593 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(0));
6599 case Intrinsic::eh_sjlj_functioncontext: {
6603 cast<AllocaInst>(
I.getArgOperand(0)->stripPointerCasts());
6608 case Intrinsic::eh_sjlj_setjmp: {
6618 case Intrinsic::eh_sjlj_longjmp:
6622 case Intrinsic::eh_sjlj_setup_dispatch:
6626 case Intrinsic::masked_gather:
6627 visitMaskedGather(
I);
6629 case Intrinsic::masked_load:
6632 case Intrinsic::masked_scatter:
6633 visitMaskedScatter(
I);
6635 case Intrinsic::masked_store:
6636 visitMaskedStore(
I);
6638 case Intrinsic::masked_expandload:
6639 visitMaskedLoad(
I,
true );
6641 case Intrinsic::masked_compressstore:
6642 visitMaskedStore(
I,
true );
6644 case Intrinsic::powi:
6648 case Intrinsic::log:
6651 case Intrinsic::log2:
6655 case Intrinsic::log10:
6659 case Intrinsic::exp:
6662 case Intrinsic::exp2:
6666 case Intrinsic::pow:
6670 case Intrinsic::sqrt:
6671 case Intrinsic::fabs:
6672 case Intrinsic::sin:
6673 case Intrinsic::cos:
6674 case Intrinsic::exp10:
6675 case Intrinsic::floor:
6676 case Intrinsic::ceil:
6677 case Intrinsic::trunc:
6678 case Intrinsic::rint:
6679 case Intrinsic::nearbyint:
6680 case Intrinsic::round:
6681 case Intrinsic::roundeven:
6682 case Intrinsic::canonicalize: {
6684 switch (Intrinsic) {
6686 case Intrinsic::sqrt: Opcode =
ISD::FSQRT;
break;
6687 case Intrinsic::fabs: Opcode =
ISD::FABS;
break;
6688 case Intrinsic::sin: Opcode =
ISD::FSIN;
break;
6689 case Intrinsic::cos: Opcode =
ISD::FCOS;
break;
6690 case Intrinsic::exp10: Opcode =
ISD::FEXP10;
break;
6691 case Intrinsic::floor: Opcode =
ISD::FFLOOR;
break;
6692 case Intrinsic::ceil: Opcode =
ISD::FCEIL;
break;
6693 case Intrinsic::trunc: Opcode =
ISD::FTRUNC;
break;
6694 case Intrinsic::rint: Opcode =
ISD::FRINT;
break;
6696 case Intrinsic::round: Opcode =
ISD::FROUND;
break;
6706 case Intrinsic::lround:
6707 case Intrinsic::llround:
6708 case Intrinsic::lrint:
6709 case Intrinsic::llrint: {
6711 switch (Intrinsic) {
6713 case Intrinsic::lround: Opcode =
ISD::LROUND;
break;
6715 case Intrinsic::lrint: Opcode =
ISD::LRINT;
break;
6716 case Intrinsic::llrint: Opcode =
ISD::LLRINT;
break;
6724 case Intrinsic::minnum:
6730 case Intrinsic::maxnum:
6736 case Intrinsic::minimum:
6742 case Intrinsic::maximum:
6748 case Intrinsic::copysign:
6754 case Intrinsic::ldexp:
6760 case Intrinsic::frexp: {
6768 case Intrinsic::arithmetic_fence: {
6774 case Intrinsic::fma:
6780#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6781 case Intrinsic::INTRINSIC:
6782#include "llvm/IR/ConstrainedOps.def"
6783 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(
I));
6785#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6786#include "llvm/IR/VPIntrinsics.def"
6787 visitVectorPredicationIntrinsic(cast<VPIntrinsic>(
I));
6789 case Intrinsic::fptrunc_round: {
6792 Metadata *MD = cast<MetadataAsValue>(
I.getArgOperand(1))->getMetadata();
6793 std::optional<RoundingMode> RoundMode =
6800 Flags.copyFMF(*cast<FPMathOperator>(&
I));
6812 case Intrinsic::fmuladd: {
6833 case Intrinsic::convert_to_fp16:
6840 case Intrinsic::convert_from_fp16:
6846 case Intrinsic::fptosi_sat: {
6853 case Intrinsic::fptoui_sat: {
6860 case Intrinsic::set_rounding:
6866 case Intrinsic::is_fpclass: {
6871 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
6876 Flags.setNoFPExcept(
6877 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
6892 case Intrinsic::get_fpenv: {
6907 int SPFI = cast<FrameIndexSDNode>(Temp.
getNode())->getIndex();
6914 Res =
DAG.
getLoad(EnvVT, sdl, Chain, Temp, MPI);
6920 case Intrinsic::set_fpenv: {
6934 int SPFI = cast<FrameIndexSDNode>(Temp.
getNode())->getIndex();
6937 Chain =
DAG.
getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
6947 case Intrinsic::reset_fpenv:
6950 case Intrinsic::get_fpmode:
6959 case Intrinsic::set_fpmode:
6964 case Intrinsic::reset_fpmode: {
6969 case Intrinsic::pcmarker: {
6974 case Intrinsic::readcyclecounter: {
6982 case Intrinsic::readsteadycounter: {
6990 case Intrinsic::bitreverse:
6995 case Intrinsic::bswap:
7000 case Intrinsic::cttz: {
7002 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(1));
7008 case Intrinsic::ctlz: {
7010 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(1));
7016 case Intrinsic::ctpop: {
7022 case Intrinsic::fshl:
7023 case Intrinsic::fshr: {
7024 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7028 EVT VT =
X.getValueType();
7039 case Intrinsic::sadd_sat: {
7045 case Intrinsic::uadd_sat: {
7051 case Intrinsic::ssub_sat: {
7057 case Intrinsic::usub_sat: {
7063 case Intrinsic::sshl_sat: {
7069 case Intrinsic::ushl_sat: {
7075 case Intrinsic::smul_fix:
7076 case Intrinsic::umul_fix:
7077 case Intrinsic::smul_fix_sat:
7078 case Intrinsic::umul_fix_sat: {
7086 case Intrinsic::sdiv_fix:
7087 case Intrinsic::udiv_fix:
7088 case Intrinsic::sdiv_fix_sat:
7089 case Intrinsic::udiv_fix_sat: {
7094 Op1, Op2, Op3,
DAG, TLI));
7097 case Intrinsic::smax: {
7103 case Intrinsic::smin: {
7109 case Intrinsic::umax: {
7115 case Intrinsic::umin: {
7121 case Intrinsic::abs: {
7127 case Intrinsic::stacksave: {
7135 case Intrinsic::stackrestore:
7139 case Intrinsic::get_dynamic_area_offset: {
7154 case Intrinsic::stackguard: {
7175 case Intrinsic::stackprotector: {
7196 Chain, sdl, Src, FIN,
7203 case Intrinsic::objectsize:
7206 case Intrinsic::is_constant:
7209 case Intrinsic::annotation:
7210 case Intrinsic::ptr_annotation:
7211 case Intrinsic::launder_invariant_group:
7212 case Intrinsic::strip_invariant_group:
7217 case Intrinsic::assume:
7218 case Intrinsic::experimental_noalias_scope_decl:
7219 case Intrinsic::var_annotation:
7220 case Intrinsic::sideeffect:
7225 case Intrinsic::codeview_annotation: {
7230 Metadata *MD = cast<MetadataAsValue>(
I.getArgOperand(0))->getMetadata();
7237 case Intrinsic::init_trampoline: {
7238 const Function *
F = cast<Function>(
I.getArgOperand(1)->stripPointerCasts());
7253 case Intrinsic::adjust_trampoline:
7258 case Intrinsic::gcroot: {
7260 "only valid in functions with gc specified, enforced by Verifier");
7262 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7263 const Constant *TypeMap = cast<Constant>(
I.getArgOperand(1));
7269 case Intrinsic::gcread:
7270 case Intrinsic::gcwrite:
7272 case Intrinsic::get_rounding:
7278 case Intrinsic::expect:
7283 case Intrinsic::ubsantrap:
7284 case Intrinsic::debugtrap:
7285 case Intrinsic::trap: {
7287 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7288 if (TrapFuncName.
empty()) {
7289 switch (Intrinsic) {
7290 case Intrinsic::trap:
7293 case Intrinsic::debugtrap:
7296 case Intrinsic::ubsantrap:
7300 cast<ConstantInt>(
I.getArgOperand(0))->getZExtValue(), sdl,
7308 if (Intrinsic == Intrinsic::ubsantrap) {
7310 Args[0].Val =
I.getArgOperand(0);
7312 Args[0].Ty =
Args[0].Val->getType();
7316 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7327 case Intrinsic::uadd_with_overflow:
7328 case Intrinsic::sadd_with_overflow:
7329 case Intrinsic::usub_with_overflow:
7330 case Intrinsic::ssub_with_overflow:
7331 case Intrinsic::umul_with_overflow:
7332 case Intrinsic::smul_with_overflow: {
7334 switch (Intrinsic) {
7336 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7337 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7338 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7339 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7340 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7341 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7347 EVT OverflowVT = MVT::i1;
7356 case Intrinsic::prefetch: {
7358 unsigned rw = cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue();
7371 std::nullopt, Flags);
7380 case Intrinsic::lifetime_start:
7381 case Intrinsic::lifetime_end: {
7382 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7387 const int64_t ObjectSize =
7388 cast<ConstantInt>(
I.getArgOperand(0))->getSExtValue();
7393 for (
const Value *Alloca : Allocas) {
7394 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7397 if (!LifetimeObject)
7417 case Intrinsic::pseudoprobe: {
7418 auto Guid = cast<ConstantInt>(
I.getArgOperand(0))->getZExtValue();
7419 auto Index = cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue();
7420 auto Attr = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
7425 case Intrinsic::invariant_start:
7430 case Intrinsic::invariant_end:
7433 case Intrinsic::clear_cache:
7436 lowerCallToExternalSymbol(
I, FunctionName);
7438 case Intrinsic::donothing:
7439 case Intrinsic::seh_try_begin:
7440 case Intrinsic::seh_scope_begin:
7441 case Intrinsic::seh_try_end:
7442 case Intrinsic::seh_scope_end:
7445 case Intrinsic::experimental_stackmap:
7448 case Intrinsic::experimental_patchpoint_void:
7449 case Intrinsic::experimental_patchpoint_i64:
7452 case Intrinsic::experimental_gc_statepoint:
7455 case Intrinsic::experimental_gc_result:
7456 visitGCResult(cast<GCResultInst>(
I));
7458 case Intrinsic::experimental_gc_relocate:
7459 visitGCRelocate(cast<GCRelocateInst>(
I));
7461 case Intrinsic::instrprof_cover:
7463 case Intrinsic::instrprof_increment:
7465 case Intrinsic::instrprof_timestamp:
7467 case Intrinsic::instrprof_value_profile:
7469 case Intrinsic::instrprof_mcdc_parameters:
7471 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7473 case Intrinsic::instrprof_mcdc_condbitmap_update:
7475 case Intrinsic::localescape: {
7481 for (
unsigned Idx = 0,
E =
I.arg_size();
Idx <
E; ++
Idx) {
7482 Value *Arg =
I.getArgOperand(
Idx)->stripPointerCasts();
7483 if (isa<ConstantPointerNull>(Arg))
7487 "can only escape static allocas");
7493 TII->get(TargetOpcode::LOCAL_ESCAPE))
7501 case Intrinsic::localrecover: {
7506 auto *Fn = cast<Function>(
I.getArgOperand(0)->stripPointerCasts());
7507 auto *
Idx = cast<ConstantInt>(
I.getArgOperand(2));
7509 unsigned(
Idx->getLimitedValue(std::numeric_limits<int>::max()));
7531 case Intrinsic::eh_exceptionpointer:
7532 case Intrinsic::eh_exceptioncode: {
7534 const auto *CPI = cast<CatchPadInst>(
I.getArgOperand(0));
7539 if (Intrinsic == Intrinsic::eh_exceptioncode)
7544 case Intrinsic::xray_customevent: {
7573 case Intrinsic::xray_typedevent: {
7600 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7606 case Intrinsic::experimental_deoptimize:
7609 case Intrinsic::experimental_stepvector:
7612 case Intrinsic::vector_reduce_fadd:
7613 case Intrinsic::vector_reduce_fmul:
7614 case Intrinsic::vector_reduce_add:
7615 case Intrinsic::vector_reduce_mul:
7616 case Intrinsic::vector_reduce_and:
7617 case Intrinsic::vector_reduce_or:
7618 case Intrinsic::vector_reduce_xor:
7619 case Intrinsic::vector_reduce_smax:
7620 case Intrinsic::vector_reduce_smin:
7621 case Intrinsic::vector_reduce_umax:
7622 case Intrinsic::vector_reduce_umin:
7623 case Intrinsic::vector_reduce_fmax:
7624 case Intrinsic::vector_reduce_fmin:
7625 case Intrinsic::vector_reduce_fmaximum:
7626 case Intrinsic::vector_reduce_fminimum:
7627 visitVectorReduce(
I, Intrinsic);
7630 case Intrinsic::icall_branch_funnel: {
7639 "llvm.icall.branch.funnel operand must be a GlobalValue");
7642 struct BranchFunnelTarget {
7648 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7651 if (ElemBase !=
Base)
7653 "to the same GlobalValue");
7656 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7659 "llvm.icall.branch.funnel operand must be a GlobalValue");
7665 [](
const BranchFunnelTarget &T1,
const BranchFunnelTarget &T2) {
7666 return T1.Offset < T2.Offset;
7669 for (
auto &
T : Targets) {
7684 case Intrinsic::wasm_landingpad_index:
7690 case Intrinsic::aarch64_settag:
7691 case Intrinsic::aarch64_settag_zero: {
7693 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
7702 case Intrinsic::amdgcn_cs_chain: {
7703 assert(
I.arg_size() == 5 &&
"Additional args not supported yet");
7704 assert(cast<ConstantInt>(
I.getOperand(4))->isZero() &&
7705 "Non-zero flags not supported yet");
7721 for (
unsigned Idx : {2, 3, 1}) {
7724 Arg.
Ty =
I.getOperand(
Idx)->getType();
7726 Args.push_back(Arg);
7729 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
7730 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
7731 Args[2].IsInReg =
true;
7736 .setCallee(
CC,
RetTy, Callee, std::move(Args))
7739 .setConvergent(
I.isConvergent());
7741 std::pair<SDValue, SDValue>
Result =
7745 "Should've lowered as tail call");
7750 case Intrinsic::ptrmask: {
7754 EVT PtrVT =
Ptr.getValueType();
7756 "Pointers with different index type are not supported by SDAG");
7760 case Intrinsic::threadlocal_address: {
7764 case Intrinsic::get_active_lane_mask: {
7767 EVT ElementVT =
Index.getValueType();
7770 visitTargetIntrinsic(
I, Intrinsic);
7788 case Intrinsic::experimental_get_vector_length: {
7789 assert(cast<ConstantInt>(
I.getOperand(1))->getSExtValue() > 0 &&
7790 "Expected positive VF");
7791 unsigned VF = cast<ConstantInt>(
I.getOperand(1))->getZExtValue();
7792 bool IsScalable = cast<ConstantInt>(
I.getOperand(2))->isOne();
7798 visitTargetIntrinsic(
I, Intrinsic);
7807 if (CountVT.
bitsLT(VT)) {
7822 case Intrinsic::experimental_cttz_elts: {
7825 EVT OpVT =
Op.getValueType();
7828 visitTargetIntrinsic(
I, Intrinsic);
7848 if (!cast<ConstantSDNode>(
getValue(
I.getOperand(1)))->isZero())
7849 CR = CR.subtract(
APInt(64, 1));
7851 unsigned EltWidth =
I.getType()->getScalarSizeInBits();
7852 EltWidth = std::min(EltWidth, (
unsigned)CR.getActiveBits());
7878 case Intrinsic::vector_insert: {
7886 if (
Index.getValueType() != VectorIdxTy)
7894 case Intrinsic::vector_extract: {
7902 if (
Index.getValueType() != VectorIdxTy)
7909 case Intrinsic::experimental_vector_reverse:
7910 visitVectorReverse(
I);
7912 case Intrinsic::experimental_vector_splice:
7913 visitVectorSplice(
I);
7915 case Intrinsic::callbr_landingpad:
7916 visitCallBrLandingPad(
I);
7918 case Intrinsic::experimental_vector_interleave2:
7919 visitVectorInterleave(
I);
7921 case Intrinsic::experimental_vector_deinterleave2:
7922 visitVectorDeinterleave(
I);
7924 case Intrinsic::experimental_convergence_anchor:
7925 case Intrinsic::experimental_convergence_entry:
7926 case Intrinsic::experimental_convergence_loop:
7927 visitConvergenceControl(
I, Intrinsic);
7931void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
7967 PendingConstrainedFP.push_back(OutChain);
7973 PendingConstrainedFPStrict.push_back(OutChain);
7985 Flags.setNoFPExcept(
true);
7987 if (
auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
7988 Flags.copyFMF(*FPOp);
7993#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
7994 case Intrinsic::INTRINSIC: \
7995 Opcode = ISD::STRICT_##DAGN; \
7997#include "llvm/IR/ConstrainedOps.def"
7998 case Intrinsic::experimental_constrained_fmuladd: {
8005 pushOutChain(
Mul, EB);
8026 auto *
FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
8036 pushOutChain(Result, EB);
8043 std::optional<unsigned> ResOPC;
8045 case Intrinsic::vp_ctlz: {
8046 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8047 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8050 case Intrinsic::vp_cttz: {
8051 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8052 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8055#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8056 case Intrinsic::VPID: \
8057 ResOPC = ISD::VPSD; \
8059#include "llvm/IR/VPIntrinsics.def"
8064 "Inconsistency: no SDNode available for this VPIntrinsic!");
8066 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8067 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8069 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8070 : ISD::VP_REDUCE_FMUL;
8076void SelectionDAGBuilder::visitVPLoad(
8102void SelectionDAGBuilder::visitVPGather(
8138 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8144void SelectionDAGBuilder::visitVPStore(
8148 EVT VT = OpValues[0].getValueType();
8166void SelectionDAGBuilder::visitVPScatter(
8171 EVT VT = OpValues[0].getValueType();
8201 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8202 OpValues[2], OpValues[3]},
8208void SelectionDAGBuilder::visitVPStridedLoad(
8227 OpValues[2], OpValues[3], MMO,
8235void SelectionDAGBuilder::visitVPStridedStore(
8239 EVT VT = OpValues[0].getValueType();
8251 DAG.
getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8259void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8284 "Unexpected target EVL type");
8293void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8300 if (
const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8301 return visitVPCmp(*CmpI);
8312 "Unexpected target EVL type");
8316 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8318 if (
I == EVLParamPos)
8326 if (
auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8333 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8335 case ISD::VP_GATHER:
8336 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8338 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8339 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8342 visitVPStore(VPIntrin, OpValues);
8344 case ISD::VP_SCATTER:
8345 visitVPScatter(VPIntrin, OpValues);
8347 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8348 visitVPStridedStore(VPIntrin, OpValues);
8350 case ISD::VP_FMULADD: {
8351 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8353 if (
auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8360 ISD::VP_FMUL,
DL, VTs,
8361 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8364 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8369 case ISD::VP_IS_FPCLASS: {
8372 auto Constant = OpValues[1]->getAsZExtVal();
8375 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8379 case ISD::VP_INTTOPTR: {
8390 case ISD::VP_PTRTOINT: {
8405 case ISD::VP_CTLZ_ZERO_UNDEF:
8407 case ISD::VP_CTTZ_ZERO_UNDEF: {
8409 DAG.
getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8429 if (CallSiteIndex) {
8443 assert(BeginLabel &&
"BeginLabel should've been set");
8458 assert(II &&
"II should've been set");
8469std::pair<SDValue, SDValue>
8483 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
8486 "Non-null chain expected with non-tail call!");
8487 assert((Result.second.getNode() || !Result.first.getNode()) &&
8488 "Null value expected with tail call!");
8490 if (!Result.second.getNode()) {
8497 PendingExports.clear();
8512 bool isMustTailCall,
8521 const Value *SwiftErrorVal =
nullptr;
8528 if (Caller->getFnAttribute(
"disable-tail-calls").getValueAsString() ==
8529 "true" && !isMustTailCall)
8536 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8545 if (V->getType()->isEmptyTy())
8549 Entry.Node = ArgNode; Entry.Ty = V->getType();
8551 Entry.setAttributes(&CB,
I - CB.
arg_begin());
8563 Args.push_back(Entry);
8567 if (Entry.IsSRet && isa<Instruction>(V))
8575 Value *V = Bundle->Inputs[0];
8577 Entry.Node = ArgNode;
8578 Entry.Ty = V->getType();
8579 Entry.IsCFGuardTarget =
true;
8580 Args.push_back(Entry);
8598 "Target doesn't support calls with kcfi operand bundles.");
8599 CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8606 auto *Token = Bundle->Inputs[0].get();
8607 ConvControlToken =
getValue(Token);
8622 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
8624 if (Result.first.getNode()) {
8646 if (
const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
8665 bool ConstantMemory =
false;
8670 ConstantMemory =
true;
8681 if (!ConstantMemory)
8688void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
8702bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
8703 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
8717 if (Res.first.getNode()) {
8718 processIntegerCallValue(
I, Res.first,
true);
8732 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
8755 switch (NumBitsToCompare) {
8767 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
8785 processIntegerCallValue(
I, Cmp,
false);
8794bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
8795 const Value *Src =
I.getArgOperand(0);
8800 std::pair<SDValue, SDValue> Res =
8804 if (Res.first.getNode()) {
8818bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
8826 Align Alignment = std::min(DstAlign, SrcAlign);
8840 "** memcpy should not be lowered as TailCall in mempcpy context **");
8858bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
8859 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
8862 std::pair<SDValue, SDValue> Res =
8867 if (Res.first.getNode()) {
8881bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
8882 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
8885 std::pair<SDValue, SDValue> Res =
8890 if (Res.first.getNode()) {
8891 processIntegerCallValue(
I, Res.first,
true);
8904bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
8905 const Value *Arg0 =
I.getArgOperand(0);
8908 std::pair<SDValue, SDValue> Res =
8911 if (Res.first.getNode()) {
8912 processIntegerCallValue(
I, Res.first,
false);
8925bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
8926 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
8929 std::pair<SDValue, SDValue> Res =
8933 if (Res.first.getNode()) {
8934 processIntegerCallValue(
I, Res.first,
false);
8947bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
8950 if (!
I.onlyReadsMemory())
8954 Flags.copyFMF(cast<FPMathOperator>(
I));
8967bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
8970 if (!
I.onlyReadsMemory())
8974 Flags.copyFMF(cast<FPMathOperator>(
I));
8983void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
8985 if (
I.isInlineAsm()) {
8993 if (
F->isDeclaration()) {
8995 unsigned IID =
F->getIntrinsicID();
9001 visitIntrinsicCall(
I, IID);
9010 if (!
I.isNoBuiltin() && !
I.isStrictFP() && !
F->hasLocalLinkage() &&
9016 if (visitMemCmpBCmpCall(
I))
9019 case LibFunc_copysign:
9020 case LibFunc_copysignf:
9021 case LibFunc_copysignl:
9024 if (
I.onlyReadsMemory()) {
9028 LHS.getValueType(), LHS, RHS));
9065 case LibFunc_sqrt_finite:
9066 case LibFunc_sqrtf_finite:
9067 case LibFunc_sqrtl_finite:
9072 case LibFunc_floorf:
9073 case LibFunc_floorl:
9077 case LibFunc_nearbyint:
9078 case LibFunc_nearbyintf:
9079 case LibFunc_nearbyintl:
9096 case LibFunc_roundf:
9097 case LibFunc_roundl:
9102 case LibFunc_truncf:
9103 case LibFunc_truncl:
9120 case LibFunc_exp10f:
9121 case LibFunc_exp10l:
9126 case LibFunc_ldexpf:
9127 case LibFunc_ldexpl:
9131 case LibFunc_memcmp:
9132 if (visitMemCmpBCmpCall(
I))
9135 case LibFunc_mempcpy:
9136 if (visitMemPCpyCall(
I))
9139 case LibFunc_memchr:
9140 if (visitMemChrCall(
I))
9143 case LibFunc_strcpy:
9144 if (visitStrCpyCall(
I,
false))
9147 case LibFunc_stpcpy:
9148 if (visitStrCpyCall(
I,
true))
9151 case LibFunc_strcmp:
9152 if (visitStrCmpCall(
I))
9155 case LibFunc_strlen:
9156 if (visitStrLenCall(
I))
9159 case LibFunc_strnlen:
9160 if (visitStrNLenCall(
I))
9170 assert(!
I.hasOperandBundlesOtherThan(
9171 {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
9172 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
9173 LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi,
9174 LLVMContext::OB_convergencectrl}) &&
9175 "Cannot lower calls with arbitrary operand bundles!");
9213 for (
const auto &Code : Codes)
9228 SDISelAsmOperandInfo &MatchingOpInfo,
9230 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9236 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9238 OpInfo.ConstraintVT);
9239 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9241 MatchingOpInfo.ConstraintVT);
9242 if ((OpInfo.ConstraintVT.isInteger() !=
9243 MatchingOpInfo.ConstraintVT.isInteger()) ||
9244 (MatchRC.second != InputRC.second)) {
9247 " with a matching output constraint of"
9248 " incompatible type!");
9250 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9257 SDISelAsmOperandInfo &OpInfo,
9270 const Value *OpVal = OpInfo.CallOperandVal;
9271 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9272 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9284 TySize,
DL.getPrefTypeAlign(Ty),
false);
9286 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9289 OpInfo.CallOperand = StackSlot;
9302static std::optional<unsigned>
9304 SDISelAsmOperandInfo &OpInfo,
9305 SDISelAsmOperandInfo &RefOpInfo) {
9316 return std::nullopt;
9320 unsigned AssignedReg;
9323 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9326 return std::nullopt;
9331 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9333 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9342 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9347 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9352 OpInfo.CallOperand =
9354 OpInfo.ConstraintVT = RegVT;
9358 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9361 OpInfo.CallOperand =
9363 OpInfo.ConstraintVT = VT;
9370 if (OpInfo.isMatchingInputConstraint())
9371 return std::nullopt;
9373 EVT ValueVT = OpInfo.ConstraintVT;
9374 if (OpInfo.ConstraintVT == MVT::Other)
9378 unsigned NumRegs = 1;
9379 if (OpInfo.ConstraintVT != MVT::Other)
9394 I = std::find(
I, RC->
end(), AssignedReg);
9395 if (
I == RC->
end()) {
9398 return {AssignedReg};
9402 for (; NumRegs; --NumRegs, ++
I) {
9403 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
9408 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
9409 return std::nullopt;
9414 const std::vector<SDValue> &AsmNodeOperands) {
9417 for (; OperandNo; --OperandNo) {
9419 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9422 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
9423 "Skipped past definitions?");
9424 CurOp +=
F.getNumOperandRegisters() + 1;
9435 explicit ExtraFlags(
const CallBase &Call) {
9437 if (
IA->hasSideEffects())
9439 if (
IA->isAlignStack())
9441 if (
Call.isConvergent())
9462 unsigned get()
const {
return Flags; }
9469 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(
Op)) {
9470 auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9485void SelectionDAGBuilder::visitInlineAsm(
const CallBase &Call,
9498 bool HasSideEffect =
IA->hasSideEffects();
9499 ExtraFlags ExtraInfo(Call);
9501 for (
auto &
T : TargetConstraints) {
9502 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
9503 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
9505 if (OpInfo.CallOperandVal)
9506 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
9509 HasSideEffect = OpInfo.hasMemory(TLI);
9518 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9521 return emitInlineAsmError(Call,
"constraint '" +
Twine(
T.ConstraintCode) +
9522 "' expects an integer constant "
9525 ExtraInfo.update(
T);
9532 bool EmitEHLabels = isa<InvokeInst>(Call);
9534 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
9536 bool IsCallBr = isa<CallBrInst>(Call);
9538 if (IsCallBr || EmitEHLabels) {
9547 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
9552 IA->collectAsmStrs(AsmStrs);
9555 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9563 if (OpInfo.hasMatchingInput()) {
9564 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
9595 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
9598 OpInfo.isIndirect =
false;
9605 !OpInfo.isIndirect) {
9606 assert((OpInfo.isMultipleAlternative ||
9608 "Can only indirectify direct input operands!");
9614 OpInfo.CallOperandVal =
nullptr;
9617 OpInfo.isIndirect =
true;
9623 std::vector<SDValue> AsmNodeOperands;
9624 AsmNodeOperands.push_back(
SDValue());
9631 const MDNode *SrcLoc =
Call.getMetadata(
"srcloc");
9641 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9643 SDISelAsmOperandInfo &RefOpInfo =
9644 OpInfo.isMatchingInputConstraint()
9645 ? ConstraintOperands[OpInfo.getMatchedOperand()]
9647 const auto RegError =
9652 const char *
RegName =
TRI.getName(*RegError);
9653 emitInlineAsmError(Call,
"register '" +
Twine(
RegName) +
9654 "' allocated for constraint '" +
9655 Twine(OpInfo.ConstraintCode) +
9656 "' does not match required type");
9660 auto DetectWriteToReservedRegister = [&]() {
9663 for (
unsigned Reg : OpInfo.AssignedRegs.Regs) {
9665 TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
9667 emitInlineAsmError(Call,
"write to reserved register '" +
9676 !OpInfo.isMatchingInputConstraint())) &&
9677 "Only address as input operand is allowed.");
9679 switch (OpInfo.Type) {
9685 "Failed to convert memory constraint code to constraint id.");
9689 OpFlags.setMemConstraint(ConstraintID);
9692 AsmNodeOperands.push_back(OpInfo.CallOperand);
9697 if (OpInfo.AssignedRegs.Regs.empty()) {
9699 Call,
"couldn't allocate output register for constraint '" +
9700 Twine(OpInfo.ConstraintCode) +
"'");
9704 if (DetectWriteToReservedRegister())
9709 OpInfo.AssignedRegs.AddInlineAsmOperands(
9718 SDValue InOperandVal = OpInfo.CallOperand;
9720 if (OpInfo.isMatchingInputConstraint()) {
9726 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
9727 if (OpInfo.isIndirect) {
9729 emitInlineAsmError(Call,
"inline asm not supported yet: "
9730 "don't know how to handle tied "
9731 "indirect register inputs");
9739 auto *
R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
9741 MVT RegVT =
R->getSimpleValueType(0);
9745 :
TRI.getMinimalPhysRegClass(TiedReg);
9746 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
9753 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &Call);
9755 OpInfo.getMatchedOperand(), dl,
DAG,
9760 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
9762 "Unexpected number of operands");
9765 Flag.clearMemConstraint();
9766 Flag.setMatchingOp(OpInfo.getMatchedOperand());
9769 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
9780 std::vector<SDValue> Ops;
9785 if (isa<ConstantSDNode>(InOperandVal)) {
9786 emitInlineAsmError(Call,
"value out of range for constraint '" +
9787 Twine(OpInfo.ConstraintCode) +
"'");
9791 emitInlineAsmError(Call,
9792 "invalid operand for inline asm constraint '" +
9793 Twine(OpInfo.ConstraintCode) +
"'");
9806 assert((OpInfo.isIndirect ||
9808 "Operand must be indirect to be a mem!");
9811 "Memory operands expect pointer values");
9816 "Failed to convert memory constraint code to constraint id.");
9820 ResOpType.setMemConstraint(ConstraintID);
9824 AsmNodeOperands.push_back(InOperandVal);
9832 "Failed to convert memory constraint code to constraint id.");
9838 auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
9846 ResOpType.setMemConstraint(ConstraintID);
9848 AsmNodeOperands.push_back(
9851 AsmNodeOperands.push_back(AsmOp);
9857 "Unknown constraint type!");
9860 if (OpInfo.isIndirect) {
9862 Call,
"Don't know how to handle indirect register inputs yet "
9863 "for constraint '" +
9864 Twine(OpInfo.ConstraintCode) +
"'");
9869 if (OpInfo.AssignedRegs.Regs.empty()) {
9870 emitInlineAsmError(Call,
9871 "couldn't allocate input reg for constraint '" +
9872 Twine(OpInfo.ConstraintCode) +
"'");
9876 if (DetectWriteToReservedRegister())
9881 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue,
9885 0, dl,
DAG, AsmNodeOperands);
9891 if (!OpInfo.AssignedRegs.Regs.empty())
9901 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
9905 DAG.
getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
9916 if (
StructType *StructResult = dyn_cast<StructType>(CallResultType))
9917 ResultTypes = StructResult->elements();
9918 else if (!CallResultType->
isVoidTy())
9919 ResultTypes =
ArrayRef(CallResultType);
9921 auto CurResultType = ResultTypes.
begin();
9922 auto handleRegAssign = [&](
SDValue V) {
9923 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
9924 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
9937 if (ResultVT !=
V.getValueType() &&
9940 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
9941 V.getValueType().isInteger()) {
9947 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
9953 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9957 if (OpInfo.AssignedRegs.Regs.empty())
9960 switch (OpInfo.ConstraintType) {
9964 Chain, &Glue, &Call);
9976 assert(
false &&
"Unexpected unknown constraint");
9980 if (OpInfo.isIndirect) {
9981 const Value *
Ptr = OpInfo.CallOperandVal;
9982 assert(
Ptr &&
"Expected value CallOperandVal for indirect asm operand");
9988 assert(!
Call.getType()->isVoidTy() &&
"Bad inline asm!");
9993 handleRegAssign(Val);
9999 if (!ResultValues.
empty()) {
10000 assert(CurResultType == ResultTypes.
end() &&
10001 "Mismatch in number of ResultTypes");
10003 "Mismatch in number of output operands in asm result");
10011 if (!OutChains.
empty())
10014 if (EmitEHLabels) {
10015 Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
10019 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10024void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &Call,
10025 const Twine &Message) {
10034 if (ValueVTs.
empty())
10038 for (
unsigned i = 0, e = ValueVTs.
size(); i != e; ++i)
10044void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10051void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10057 DL.getABITypeAlign(
I.getType()).value());
10060 if (
I.getType()->isPointerTy())
10066void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10073void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10094 if (!
Lo.isMinValue())
10098 unsigned Bits = std::max(
Hi.getActiveBits(),
10107 unsigned NumVals =
Op.getNode()->getNumValues();
10114 for (
unsigned I = 1;
I != NumVals; ++
I)
10128 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10131 Args.reserve(NumArgs);
10135 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10136 ArgI != ArgE; ++ArgI) {
10137 const Value *V = Call->getOperand(ArgI);
10139 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10143 Entry.Ty = V->getType();
10144 Entry.setAttributes(Call, ArgI);
10145 Args.push_back(Entry);
10150 .
setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10179 for (
unsigned I = StartIdx;
I < Call.arg_size();
I++) {
10194void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10228 assert(
ID.getValueType() == MVT::i64);
10259void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10275 if (
auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10278 else if (
auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10280 SDLoc(SymbolicCallee),
10281 SymbolicCallee->getValueType(0));
10291 "Not enough arguments provided to the patchpoint intrinsic");
10294 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10310 "Expected a callseq node.");
10312 bool HasGlue =
Call->getGluedNode();
10342 unsigned NumCallRegArgs =
Call->getNumOperands() - (HasGlue ? 4 : 3);
10343 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10352 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10363 if (IsAnyRegCC && HasDef) {
10368 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
10392 if (IsAnyRegCC && HasDef) {
10404void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
10405 unsigned Intrinsic) {
10409 if (
I.arg_size() > 1)
10415 if (
auto *FPMO = dyn_cast<FPMathOperator>(&
I))
10418 switch (Intrinsic) {
10419 case Intrinsic::vector_reduce_fadd:
10427 case Intrinsic::vector_reduce_fmul:
10435 case Intrinsic::vector_reduce_add:
10438 case Intrinsic::vector_reduce_mul:
10441 case Intrinsic::vector_reduce_and:
10444 case Intrinsic::vector_reduce_or:
10447 case Intrinsic::vector_reduce_xor:
10450 case Intrinsic::vector_reduce_smax:
10453 case Intrinsic::vector_reduce_smin:
10456 case Intrinsic::vector_reduce_umax:
10459 case Intrinsic::vector_reduce_umin:
10462 case Intrinsic::vector_reduce_fmax:
10465 case Intrinsic::vector_reduce_fmin:
10468 case Intrinsic::vector_reduce_fmaximum:
10471 case Intrinsic::vector_reduce_fminimum:
10485 Attrs.push_back(Attribute::SExt);
10487 Attrs.push_back(Attribute::ZExt);
10489 Attrs.push_back(Attribute::InReg);
10499std::pair<SDValue, SDValue>
10513 RetTys.
swap(OldRetTys);
10514 Offsets.swap(OldOffsets);
10516 for (
size_t i = 0, e = OldRetTys.
size(); i != e; ++i) {
10517 EVT RetVT = OldRetTys[i];
10521 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
10522 RetTys.
append(NumRegs, RegisterVT);
10523 for (
unsigned j = 0; j != NumRegs; ++j)
10524 Offsets.push_back(
Offset + j * RegisterVTByteSZ);
10536 int DemoteStackIdx = -100;
10547 DL.getAllocaAddrSpace());
10551 Entry.Node = DemoteStackSlot;
10552 Entry.Ty = StackSlotPtrType;
10553 Entry.IsSExt =
false;
10554 Entry.IsZExt =
false;
10555 Entry.IsInReg =
false;
10556 Entry.IsSRet =
true;
10557 Entry.IsNest =
false;
10558 Entry.IsByVal =
false;
10559 Entry.IsByRef =
false;
10560 Entry.IsReturned =
false;
10561 Entry.IsSwiftSelf =
false;
10562 Entry.IsSwiftAsync =
false;
10563 Entry.IsSwiftError =
false;
10564 Entry.IsCFGuardTarget =
false;
10565 Entry.Alignment = Alignment;
10577 for (
unsigned I = 0,
E = RetTys.
size();
I !=
E; ++
I) {
10579 if (NeedsRegBlock) {
10580 Flags.setInConsecutiveRegs();
10581 if (
I == RetTys.
size() - 1)
10582 Flags.setInConsecutiveRegsLast();
10584 EVT VT = RetTys[
I];
10589 for (
unsigned i = 0; i != NumRegs; ++i) {
10591 MyFlags.
Flags = Flags;
10592 MyFlags.
VT = RegisterVT;
10593 MyFlags.
ArgVT = VT;
10598 cast<PointerType>(CLI.
RetTy)->getAddressSpace());
10606 CLI.
Ins.push_back(MyFlags);
10620 CLI.
Ins.push_back(MyFlags);
10628 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
10632 Type *FinalType = Args[i].Ty;
10633 if (Args[i].IsByVal)
10634 FinalType = Args[i].IndirectType;
10637 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
Value != NumValues;
10642 Args[i].Node.getResNo() +
Value);
10649 Flags.setOrigAlign(OriginalAlignment);
10651 if (Args[i].Ty->isPointerTy()) {
10652 Flags.setPointer();
10653 Flags.setPointerAddrSpace(
10654 cast<PointerType>(Args[i].Ty)->getAddressSpace());
10656 if (Args[i].IsZExt)
10658 if (Args[i].IsSExt)
10660 if (Args[i].IsInReg) {
10664 isa<StructType>(FinalType)) {
10667 Flags.setHvaStart();
10673 if (Args[i].IsSRet)
10675 if (Args[i].IsSwiftSelf)
10676 Flags.setSwiftSelf();
10677 if (Args[i].IsSwiftAsync)
10678 Flags.setSwiftAsync();
10679 if (Args[i].IsSwiftError)
10680 Flags.setSwiftError();
10681 if (Args[i].IsCFGuardTarget)
10682 Flags.setCFGuardTarget();
10683 if (Args[i].IsByVal)
10685 if (Args[i].IsByRef)
10687 if (Args[i].IsPreallocated) {
10688 Flags.setPreallocated();
10696 if (Args[i].IsInAlloca) {
10697 Flags.setInAlloca();
10706 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
10707 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
10708 Flags.setByValSize(FrameSize);
10711 if (
auto MA = Args[i].Alignment)
10715 }
else if (
auto MA = Args[i].Alignment) {
10718 MemAlign = OriginalAlignment;
10720 Flags.setMemAlign(MemAlign);
10721 if (Args[i].IsNest)
10724 Flags.setInConsecutiveRegs();
10733 if (Args[i].IsSExt)
10735 else if (Args[i].IsZExt)
10740 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
10745 Args[i].Ty->getPointerAddressSpace())) &&
10746 RetTys.
size() == NumValues &&
"unexpected use of 'returned'");
10759 CLI.
RetZExt == Args[i].IsZExt))
10760 Flags.setReturned();
10766 for (
unsigned j = 0; j != NumParts; ++j) {
10773 j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
10774 if (NumParts > 1 && j == 0)
10778 if (j == NumParts - 1)
10782 CLI.
Outs.push_back(MyFlags);
10783 CLI.
OutVals.push_back(Parts[j]);
10786 if (NeedsRegBlock &&
Value == NumValues - 1)
10787 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
10799 "LowerCall didn't return a valid chain!");
10801 "LowerCall emitted a return value for a tail call!");
10803 "LowerCall didn't emit the correct number of values!");
10815 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
10816 assert(InVals[i].getNode() &&
"LowerCall emitted a null value!");
10817 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
10818 "LowerCall emitted a value with the wrong type!");
10831 assert(PVTs.
size() == 1 &&
"Pointers should fit in one register");
10832 EVT PtrVT = PVTs[0];
10834 unsigned NumValues = RetTys.
size();
10835 ReturnValues.
resize(NumValues);
10841 Flags.setNoUnsignedWrap(
true);
10845 for (
unsigned i = 0; i < NumValues; ++i) {
10852 DemoteStackIdx, Offsets[i]),
10854 ReturnValues[i] = L;
10855 Chains[i] = L.getValue(1);
10862 std::optional<ISD::NodeType> AssertOp;
10867 unsigned CurReg = 0;
10868 for (
EVT VT : RetTys) {
10875 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
10883 if (ReturnValues.
empty())
10889 return std::make_pair(Res, CLI.
Chain);
10906 if (
N->getNumValues() == 1) {
10914 "Lowering returned the wrong number of results!");
10917 for (
unsigned I = 0,
E =
N->getNumValues();
I !=
E; ++
I)
10930 cast<RegisterSDNode>(
Op.getOperand(1))->getReg() != Reg) &&
10931 "Copy from a reg to the same reg!");
10945 ExtendType = PreferredExtendIt->second;
10948 PendingExports.push_back(Chain);
10960 return A->use_empty();
10962 const BasicBlock &Entry =
A->getParent()->front();
10963 for (
const User *U :
A->users())
10964 if (cast<Instruction>(U)->
getParent() != &Entry || isa<SwitchInst>(U))
10972 std::pair<const AllocaInst *, const StoreInst *>>;
10984 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
10986 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
10987 StaticAllocas.
reserve(NumArgs * 2);
10989 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
10992 V = V->stripPointerCasts();
10993 const auto *AI = dyn_cast<AllocaInst>(V);
10994 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
10997 return &Iter.first->second;
11007 const auto *SI = dyn_cast<StoreInst>(&
I);
11014 if (
I.isDebugOrPseudoInst())
11018 for (
const Use &U :
I.operands()) {
11019 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11020 *
Info = StaticAllocaInfo::Clobbered;
11026 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
11027 *
Info = StaticAllocaInfo::Clobbered;
11030 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
11031 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11034 const AllocaInst *AI = cast<AllocaInst>(Dst);
11037 if (*
Info != StaticAllocaInfo::Unknown)
11045 const Value *Val = SI->getValueOperand()->stripPointerCasts();
11046 const auto *Arg = dyn_cast<Argument>(Val);
11047 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11048 Arg->getType()->isEmptyTy() ||
11049 DL.getTypeStoreSize(Arg->getType()) !=
11051 !
DL.typeSizeEqualsStoreSize(Arg->getType()) ||
11052 ArgCopyElisionCandidates.
count(Arg)) {
11053 *
Info = StaticAllocaInfo::Clobbered;
11057 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11061 *
Info = StaticAllocaInfo::Elidable;
11062 ArgCopyElisionCandidates.
insert({Arg, {AI, SI}});
11067 if (ArgCopyElisionCandidates.
size() == NumArgs)
11081 auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
11084 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
11091 auto ArgCopyIter = ArgCopyElisionCandidates.
find(&Arg);
11092 assert(ArgCopyIter != ArgCopyElisionCandidates.
end());
11093 const AllocaInst *AI = ArgCopyIter->second.first;
11094 int FixedIndex = FINode->getIndex();
11096 int OldIndex = AllocaIndex;
11100 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11106 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11107 "greater than stack argument alignment ("
11108 <<
DebugStr(RequiredAlignment) <<
" vs "
11116 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11117 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11122 AllocaIndex = FixedIndex;
11123 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11124 for (
SDValue ArgVal : ArgVals)
11128 const StoreInst *SI = ArgCopyIter->second.second;
11129 ElidedArgCopyInstrs.
insert(SI);
11141void SelectionDAGISel::LowerArguments(
const Function &
F) {
11148 if (
F.hasFnAttribute(Attribute::Naked))
11166 Ins.push_back(RetArg);
11174 ArgCopyElisionCandidates);
11178 unsigned ArgNo = Arg.getArgNo();
11181 bool isArgValueUsed = !Arg.use_empty();
11182 unsigned PartBase = 0;
11183 Type *FinalType = Arg.getType();
11184 if (Arg.hasAttribute(Attribute::ByVal))
11185 FinalType = Arg.getParamByValType();
11187 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11188 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
11195 if (Arg.getType()->isPointerTy()) {
11196 Flags.setPointer();
11197 Flags.setPointerAddrSpace(
11198 cast<PointerType>(Arg.getType())->getAddressSpace());
11200 if (Arg.hasAttribute(Attribute::ZExt))
11202 if (Arg.hasAttribute(Attribute::SExt))
11204 if (Arg.hasAttribute(Attribute::InReg)) {
11208 isa<StructType>(Arg.getType())) {
11211 Flags.setHvaStart();
11217 if (Arg.hasAttribute(Attribute::StructRet))
11219 if (Arg.hasAttribute(Attribute::SwiftSelf))
11220 Flags.setSwiftSelf();
11221 if (Arg.hasAttribute(Attribute::SwiftAsync))
11222 Flags.setSwiftAsync();
11223 if (Arg.hasAttribute(Attribute::SwiftError))
11224 Flags.setSwiftError();
11225 if (Arg.hasAttribute(Attribute::ByVal))
11227 if (Arg.hasAttribute(Attribute::ByRef))
11229 if (Arg.hasAttribute(Attribute::InAlloca)) {
11230 Flags.setInAlloca();
11238 if (Arg.hasAttribute(Attribute::Preallocated)) {
11239 Flags.setPreallocated();
11251 const Align OriginalAlignment(
11253 Flags.setOrigAlign(OriginalAlignment);
11256 Type *ArgMemTy =
nullptr;
11257 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11260 ArgMemTy = Arg.getPointeeInMemoryValueType();
11262 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11267 if (
auto ParamAlign = Arg.getParamStackAlign())
11268 MemAlign = *ParamAlign;
11269 else if ((ParamAlign = Arg.getParamAlign()))
11270 MemAlign = *ParamAlign;
11273 if (
Flags.isByRef())
11274 Flags.setByRefSize(MemSize);
11276 Flags.setByValSize(MemSize);
11277 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11278 MemAlign = *ParamAlign;
11280 MemAlign = OriginalAlignment;
11282 Flags.setMemAlign(MemAlign);
11284 if (Arg.hasAttribute(Attribute::Nest))
11287 Flags.setInConsecutiveRegs();
11288 if (ArgCopyElisionCandidates.
count(&Arg))
11289 Flags.setCopyElisionCandidate();
11290 if (Arg.hasAttribute(Attribute::Returned))
11291 Flags.setReturned();
11297 for (
unsigned i = 0; i != NumRegs; ++i) {
11302 Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11304 if (NumRegs > 1 && i == 0)
11305 MyFlags.Flags.setSplit();
11308 MyFlags.Flags.setOrigAlign(
Align(1));
11309 if (i == NumRegs - 1)
11310 MyFlags.Flags.setSplitEnd();
11312 Ins.push_back(MyFlags);
11314 if (NeedsRegBlock &&
Value == NumValues - 1)
11315 Ins[
Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11323 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11327 "LowerFormalArguments didn't return a valid chain!");
11329 "LowerFormalArguments didn't emit the correct number of values!");
11331 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
11332 assert(InVals[i].getNode() &&
11333 "LowerFormalArguments emitted a null value!");
11334 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11335 "LowerFormalArguments emitted a value with the wrong type!");
11352 MVT VT = ValueVTs[0].getSimpleVT();
11354 std::optional<ISD::NodeType> AssertOp;
11357 F.getCallingConv(), AssertOp);
11363 FuncInfo->DemoteRegister = SRetReg;
11365 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11378 unsigned NumValues = ValueVTs.
size();
11379 if (NumValues == 0)
11382 bool ArgHasUses = !Arg.use_empty();
11386 if (Ins[i].
Flags.isCopyElisionCandidate()) {
11387 unsigned NumParts = 0;
11388 for (
EVT VT : ValueVTs)
11390 F.getCallingConv(), VT);
11394 ArrayRef(&InVals[i], NumParts), ArgHasUses);
11399 bool isSwiftErrorArg =
11401 Arg.hasAttribute(Attribute::SwiftError);
11402 if (!ArgHasUses && !isSwiftErrorArg) {
11403 SDB->setUnusedArgValue(&Arg, InVals[i]);
11407 dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11408 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11411 for (
unsigned Val = 0; Val != NumValues; ++Val) {
11412 EVT VT = ValueVTs[Val];
11414 F.getCallingConv(), VT);
11421 if (ArgHasUses || isSwiftErrorArg) {
11422 std::optional<ISD::NodeType> AssertOp;
11423 if (Arg.hasAttribute(Attribute::SExt))
11425 else if (Arg.hasAttribute(Attribute::ZExt))
11429 PartVT, VT,
nullptr, NewRoot,
11430 F.getCallingConv(), AssertOp));
11437 if (ArgValues.
empty())
11442 dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11443 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11446 SDB->getCurSDLoc());
11448 SDB->setValue(&Arg, Res);
11461 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11462 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11471 unsigned Reg = cast<RegisterSDNode>(Res.
getOperand(1))->getReg();
11483 unsigned Reg = cast<RegisterSDNode>(Res.
getOperand(1))->getReg();
11490 FuncInfo->InitializeRegForValue(&Arg);
11491 SDB->CopyToExportRegsIfNeeded(&Arg);
11495 if (!Chains.
empty()) {
11502 assert(i == InVals.
size() &&
"Argument register count mismatch!");
11506 if (!ArgCopyElisionFrameIndexMap.
empty()) {
11509 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
11510 if (
I != ArgCopyElisionFrameIndexMap.
end())
11511 VI.updateStackSlot(
I->second);
11526SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
11534 if (!isa<PHINode>(SuccBB->begin()))
continue;
11539 if (!SuccsHandled.
insert(SuccMBB).second)
11547 for (
const PHINode &PN : SuccBB->phis()) {
11549 if (PN.use_empty())
11553 if (PN.getType()->isEmptyTy())
11557 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
11559 if (
const auto *
C = dyn_cast<Constant>(PHIOp)) {
11566 if (
auto *CI = dyn_cast<ConstantInt>(
C))
11578 assert(isa<AllocaInst>(PHIOp) &&
11580 "Didn't codegen value into a register!??");
11590 for (
EVT VT : ValueVTs) {
11592 for (
unsigned i = 0; i != NumRegisters; ++i)
11594 std::make_pair(&*
MBBI++, Reg + i));
11595 Reg += NumRegisters;
11615void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
11617 if (MaybeTC.
getNode() !=
nullptr)
11632 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
11636 if (
Size == 2 &&
W.MBB == SwitchMBB) {
11649 const APInt &SmallValue =
Small.Low->getValue();
11650 const APInt &BigValue =
Big.Low->getValue();
11653 APInt CommonBit = BigValue ^ SmallValue;
11668 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
11670 addSuccessorWithProb(
11671 SwitchMBB, DefaultMBB,
11675 addSuccessorWithProb(SwitchMBB, DefaultMBB);
11698 return a.Prob != b.Prob ?
11700 a.Low->getValue().slt(b.Low->getValue());
11707 if (
I->Prob >
W.LastCluster->Prob)
11709 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
11720 UnhandledProbs +=
I->Prob;
11724 bool FallthroughUnreachable =
false;
11726 if (
I ==
W.LastCluster) {
11728 Fallthrough = DefaultMBB;
11729 FallthroughUnreachable = isa<UnreachableInst>(
11733 CurMF->
insert(BBI, Fallthrough);
11737 UnhandledProbs -=
I->Prob;
11747 CurMF->
insert(BBI, JumpMBB);
11749 auto JumpProb =
I->Prob;
11750 auto FallthroughProb = UnhandledProbs;
11758 if (*SI == DefaultMBB) {
11759 JumpProb += DefaultProb / 2;
11760 FallthroughProb -= DefaultProb / 2;
11778 if (FallthroughUnreachable) {
11780 bool HasBranchTargetEnforcement =
false;
11782 HasBranchTargetEnforcement =
11786 HasBranchTargetEnforcement =
11788 "branch-target-enforcement");
11790 if (!HasBranchTargetEnforcement)
11795 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
11796 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
11802 JT->Default = Fallthrough;
11805 if (CurMBB == SwitchMBB) {
11828 BTB->
Prob += DefaultProb / 2;
11832 if (FallthroughUnreachable)
11836 if (CurMBB == SwitchMBB) {
11845 if (
I->Low ==
I->High) {
11860 if (FallthroughUnreachable)
11864 CaseBlock CB(
CC, LHS, RHS, MHS,
I->MBB, Fallthrough, CurMBB,
11867 if (CurMBB == SwitchMBB)
11870 SL->SwitchCases.push_back(CB);
11875 CurMBB = Fallthrough;
11879void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
11883 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
11884 "Clusters not sorted?");
11885 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
11887 auto [LastLeft, FirstRight, LeftProb, RightProb] =
11888 SL->computeSplitWorkItemInfo(W);
11893 assert(PivotCluster >
W.FirstCluster);
11894 assert(PivotCluster <=
W.LastCluster);
11909 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
11910 FirstLeft->Low ==
W.GE &&
11911 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
11912 LeftMBB = FirstLeft->MBB;
11917 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
11926 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
11927 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
11928 RightMBB = FirstRight->MBB;
11933 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
11942 if (
W.MBB == SwitchMBB)
11945 SL->SwitchCases.push_back(CB);
11978 unsigned PeeledCaseIndex = 0;
11979 bool SwitchPeeled =
false;
11982 if (
CC.Prob < TopCaseProb)
11984 TopCaseProb =
CC.Prob;
11985 PeeledCaseIndex =
Index;
11986 SwitchPeeled =
true;
11991 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
11992 << TopCaseProb <<
"\n");
12002 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12004 nullptr,
nullptr, TopCaseProb.
getCompl()};
12005 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12007 Clusters.erase(PeeledCaseIt);
12010 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12011 <<
CC.Prob <<
"\n");
12015 PeeledCaseProb = TopCaseProb;
12016 return PeeledSwitchMBB;
12019void SelectionDAGBuilder::visitSwitch(
const SwitchInst &SI) {
12023 Clusters.reserve(
SI.getNumCases());
12024 for (
auto I :
SI.cases()) {
12043 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12047 if (Clusters.empty()) {
12048 assert(PeeledSwitchMBB == SwitchMBB);
12050 if (DefaultMBB != NextBlock(SwitchMBB)) {
12059 SL->findBitTestClusters(Clusters, &SI);
12062 dbgs() <<
"Case clusters: ";
12069 C.Low->getValue().print(
dbgs(),
true);
12070 if (
C.Low !=
C.High) {
12072 C.High->getValue().print(
dbgs(),
true);
12079 assert(!Clusters.empty());
12083 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12090 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12092 while (!WorkList.
empty()) {
12094 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12099 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12103 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12107void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12114void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12120 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12131 for (
unsigned i = 0; i != NumElts; ++i)
12132 Mask.push_back(NumElts - 1 - i);
12137void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I) {
12168void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I) {
12193void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12197 unsigned NumValues = ValueVTs.
size();
12198 if (NumValues == 0)
return;
12203 for (
unsigned i = 0; i != NumValues; ++i)
12211void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12218 int64_t
Imm = cast<ConstantInt>(
I.getOperand(2))->getSExtValue();
12234 for (
unsigned i = 0; i < NumElts; ++i)
12263 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12264 "start of copy chain MUST be COPY");
12265 Reg =
MI->getOperand(1).getReg();
12266 MI =
MRI.def_begin(Reg)->getParent();
12268 if (
MI->getOpcode() == TargetOpcode::COPY) {
12269 assert(Reg.isVirtual() &&
"expected COPY of virtual register");
12270 Reg =
MI->getOperand(1).getReg();
12271 assert(Reg.isPhysical() &&
"expected COPY of physical register");
12272 MI =
MRI.def_begin(Reg)->getParent();
12275 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12276 "end of copy chain MUST be INLINEASM_BR");
12284void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12288 cast<CallBrInst>(
I.getParent()->getUniquePredecessor()->getTerminator());
12300 for (
auto &
T : TargetConstraints) {
12301 SDISelAsmOperandInfo OpInfo(
T);
12309 switch (OpInfo.ConstraintType) {
12317 for (
size_t i = 0, e = OpInfo.AssignedRegs.Regs.size(); i != e; ++i) {
12322 OpInfo.AssignedRegs.Regs[i] = OriginalDef;
12325 SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12328 ResultVTs.
push_back(OpInfo.ConstraintVT);
12337 ResultVTs.
push_back(OpInfo.ConstraintVT);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
unsigned const TargetRegisterInfo * TRI
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
This file provides utility analysis objects describing memory locations.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< unsigned, TypeSize > > &Regs, const SDValue &N)
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, ISD::MemIndexType &IndexType, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static void findWasmUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
static SymbolRef::Type getType(const Symbol *Sym)
support::ulittle16_t & Lo
support::ulittle16_t & Hi
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
This class represents the atomic memcpy intrinsic i.e.
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
bool getValueAsBool() const
Return the attribute's value as a boolean.
LLVM Basic Block Representation.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
const Instruction & back() const
This class represents a no-op cast from one type to another.
bool test(unsigned Idx) const
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
size_type size() const
size - Returns the number of bits in this bitvector.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
uint32_t getNumerator() const
uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This class represents a range of values.
APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
bool isEmptySet() const
Return true if this set contains no members.
bool isUpperWrapped() const
Return true if the exclusive upper bound wraps around the unsigned domain.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Base class for variables.
std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
Records a position in IR for a source label (DILabel).
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DIExpression * getExpression() const
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LocationType getType() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
unsigned getAllocaAddrSpace() const
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
iterator_range< location_op_iterator > getValues() const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
bool isKillLocation() const
DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
Register CreateRegs(const Value *V)
Register DemoteRegister
DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg allocated to hold a pointer to ...
BitVector DescribedArgs
Bitvector with a bit set if corresponding argument is described in ArgDbgValues.
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
SmallPtrSet< const DPValue *, 8 > PreprocessedDPVDeclares
bool isExportedInst(const Value *V) const
isExportedInst - Return true if the specified value is an instruction exported from its block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
Register InitializeRegForValue(const Value *V)
unsigned ExceptionPointerVirtReg
If the current MBB is a landing pad, the exception pointer and exception selector registers are copie...
SmallPtrSet< const DbgDeclareInst *, 8 > PreprocessedDbgDeclares
Collection of dbg.declare instructions handled after argument lowering and before ISel proper.
DenseMap< const Value *, Register > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
unsigned ExceptionSelectorVirtReg
SmallVector< MachineInstr *, 8 > ArgDbgValues
ArgDbgValues - A list of DBG_VALUE instructions created during isel for function arguments that are i...
MachineRegisterInfo * RegInfo
Register CreateReg(MVT VT, bool isDivergent=false)
CreateReg - Allocate a single virtual register for the given type.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
DenseMap< const Value *, ISD::NodeType > PreferredExtendType
Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND) for a value.
Register getCatchPadExceptionPointerVReg(const Value *CPI, const TargetRegisterClass *RC)
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
void addStackRoot(int Num, const Constant *Metadata)
addStackRoot - Registers a root that lives on the stack.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const BasicBlock * getParent() const
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
uint64_t getScalarSizeInBits() const
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
bool isEHPad() const
Returns true if the block is a landing pad.
void setIsEHCatchretTarget(bool V=true)
Indicates if this is a target block of a catchret.
void setIsCleanupFuncletEntry(bool V=true)
Indicates if this is the entry block of a cleanup funclet.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
std::vector< MachineBasicBlock * >::iterator succ_iterator
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setHasPatchPoint(bool s=true)
void setHasStackMap(bool s=true)
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
Description of the location of a variable whose Address is valid and unchanging during function execu...
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
void setCallsUnwindInit(bool b)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void setHasEHCatchret(bool V)
void setCallsEHReturn(bool b)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
auto getInStackSlotVariableDbgInfo()
Returns the collection of variables for which we have debug info and that have been assigned a stack ...
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineModuleInfo & getMMI() const
const MachineBasicBlock & front() const
bool hasEHFunclets() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
const MCContext & getContext() const
const Module * getModule() const
void setCurrentCallSite(unsigned Site)
Set the call site currently being processed.
unsigned getCurrentCallSite()
Get the call site currently being processed, if any.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
ArrayRef< std::pair< MCRegister, Register > > liveins() const
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Representation for a specific memory location.
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
Metadata * getModuleFlag(StringRef Key) const
Return the corresponding value if Key appears in module flags, otherwise return null.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(unsigned VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
void CopyValueToVirtualRegister(const Value *V, unsigned Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, unsigned Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr)
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void init(GCFunctionInfo *gfi, AAResults *AA, AssumptionCache *AC, const TargetLibraryInfo *li)
DenseMap< const Constant *, unsigned > ConstantsOut
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
virtual void emitFunctionEntryCode()
SwiftErrorValueTracking * SwiftError
std::unique_ptr< SelectionDAGBuilder > SDB
Targets can subclass this to parameterize the SelectionDAG lowering and instruction selection process...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, MachinePointerInfo SrcPtrInfo) const
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
Help to insert SDNodeFlags automatically in transforming.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
BlockFrequencyInfo * getBFI() const
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, unsigned VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
bool shouldOptForSize() const
SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
void DeleteNode(SDNode *N)
Remove the specified node from the system.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
ProfileSummaryInfo * getPSI() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
SDValue getStepVector(const SDLoc &DL, EVT ResVT, APInt StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getValueType(EVT)
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
const FunctionVarLocs * getFunctionVarLocs() const
Returns the result of the AssignmentTrackingAnalysis pass if it's available, otherwise return nullptr...
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex, int64_t Size, int64_t Offset=-1)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the por...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
void addPCSections(const SDNode *Node, MDNode *MD)
Set PCSections to be associated with Node.
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL, bool LegalTypes=true)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
void clear()
Clear the memory usage of this object.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)
Set the swifterror virtual register in the VRegDefMap for this basic block.
Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a use of a swifterror by an instruction.
Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction.
const Value * getFunctionArg() const
Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
TargetIntrinsicInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps, const Value *, const Value *) const
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual const char * getClearCacheBuiltinName() const
Return the builtin name for the __builtin___clear_cache intrinsic Default is to invoke the clear cach...
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual SDValue LowerFormalArguments(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::InputArg > &, const SDLoc &, SelectionDAG &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, const SDLoc &, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array,...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
virtual TargetTransformInfo getTargetTransformInfo(const Function &F) const
Return a TargetTransformInfo for a given function.
CodeModel::Model getCodeModel() const
Returns the code model.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
unsigned getID() const
Return the register class ID number.
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
TypeID
Definitions of all of the base types for the Type system.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SET_FPENV
Sets the current floating-point environment.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ RESET_FPENV
Set floating-point environment to default state.
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ SET_ROUNDING
Set rounding mode.
@ SIGN_EXTEND
Conversion operators.
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ BR
Control flow instructions. These all have token chains.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ CLEANUPRET
CLEANUPRET - Represents a return from a cleanup block funclet.
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
@ GET_FPENV
Gets the current floating-point environment.
@ SHL
Shift and rotation operations.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ PCMARKER
PCMARKER - This corresponds to the pcmarker intrinsic.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ CATCHRET
CATCHRET - Represents a return from a catch block funclet.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
VScaleVal_match m_VScale()
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
std::vector< CaseCluster > CaseClusterVector
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
CaseClusterVector::iterator CaseClusterIt
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int popcount(T Value) noexcept
Count the number of set bits in a value.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
gep_type_iterator gep_type_end(const User *GEP)
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
unsigned succ_size(const MachineBasicBlock *BB)
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
static const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isScalableVT() const
Return true if the type is a scalable type.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setPointerAddrSpace(unsigned AS)
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< unsigned, 4 > Regs
This list holds the registers assigned to the values.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< std::pair< unsigned, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
bool hasAllowReassociation() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
A cluster of case labels.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)