79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsAMDGPU.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
114#define DEBUG_TYPE "isel"
122 cl::desc(
"Insert the experimental `assertalign` node."),
127 cl::desc(
"Generate low-precision inline sequences "
128 "for some float libcalls"),
134 cl::desc(
"Set the case probability threshold for peeling the case from a "
135 "switch statement. A value greater than 100 will void this "
155 const SDValue *Parts,
unsigned NumParts,
158 std::optional<CallingConv::ID> CC);
167 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
169 std::optional<CallingConv::ID> CC = std::nullopt,
170 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
174 PartVT, ValueVT, CC))
181 assert(NumParts > 0 &&
"No parts to assemble!");
192 unsigned RoundBits = PartBits * RoundParts;
193 EVT RoundVT = RoundBits == ValueBits ?
199 if (RoundParts > 2) {
203 PartVT, HalfVT, V, InChain);
214 if (RoundParts < NumParts) {
216 unsigned OddParts = NumParts - RoundParts;
219 OddVT, V, InChain, CC);
235 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
246 !PartVT.
isVector() &&
"Unexpected split");
258 if (PartEVT == ValueVT)
262 ValueVT.
bitsLT(PartEVT)) {
275 if (ValueVT.
bitsLT(PartEVT)) {
280 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
295 llvm::Attribute::StrictFP)) {
297 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
309 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
310 ValueVT.
bitsLT(PartEVT)) {
319 const Twine &ErrMsg) {
322 return Ctx.emitError(ErrMsg);
325 if (CI->isInlineAsm()) {
327 *CI, ErrMsg +
", possible invalid constraint for vector type"));
330 return Ctx.emitError(
I, ErrMsg);
339 const SDValue *Parts,
unsigned NumParts,
342 std::optional<CallingConv::ID> CallConv) {
344 assert(NumParts > 0 &&
"No parts to assemble!");
345 const bool IsABIRegCopy = CallConv.has_value();
354 unsigned NumIntermediates;
359 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
360 NumIntermediates, RegisterVT);
364 NumIntermediates, RegisterVT);
367 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
369 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
372 "Part type sizes don't match!");
376 if (NumIntermediates == NumParts) {
379 for (
unsigned i = 0; i != NumParts; ++i)
381 V, InChain, CallConv);
382 }
else if (NumParts > 0) {
385 assert(NumParts % NumIntermediates == 0 &&
386 "Must expand into a divisible number of parts!");
387 unsigned Factor = NumParts / NumIntermediates;
388 for (
unsigned i = 0; i != NumIntermediates; ++i)
390 IntermediateVT, V, InChain, CallConv);
405 DL, BuiltVectorTy,
Ops);
411 if (PartEVT == ValueVT)
427 "Cannot narrow, it would be a lossy transformation");
433 if (PartEVT == ValueVT)
458 }
else if (ValueVT.
bitsLT(PartEVT)) {
467 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
498 std::optional<CallingConv::ID> CallConv);
505 unsigned NumParts,
MVT PartVT,
const Value *V,
506 std::optional<CallingConv::ID> CallConv = std::nullopt,
520 unsigned OrigNumParts = NumParts;
522 "Copying to an illegal type!");
528 EVT PartEVT = PartVT;
529 if (PartEVT == ValueVT) {
530 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
539 assert(NumParts == 1 &&
"Do not know what to promote to!");
550 "Unknown mismatch!");
552 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
553 if (PartVT == MVT::x86mmx)
558 assert(NumParts == 1 && PartEVT != ValueVT);
564 "Unknown mismatch!");
567 if (PartVT == MVT::x86mmx)
574 "Failed to tile the value with PartVT!");
577 if (PartEVT != ValueVT) {
579 "scalar-to-vector conversion failed");
588 if (NumParts & (NumParts - 1)) {
591 "Do not know what to expand to!");
593 unsigned RoundBits = RoundParts * PartBits;
594 unsigned OddParts = NumParts - RoundParts;
603 std::reverse(Parts + RoundParts, Parts + NumParts);
605 NumParts = RoundParts;
617 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
618 for (
unsigned i = 0; i < NumParts; i += StepSize) {
619 unsigned ThisBits = StepSize * PartBits / 2;
622 SDValue &Part1 = Parts[i+StepSize/2];
629 if (ThisBits == PartBits && ThisVT != PartVT) {
637 std::reverse(Parts, Parts + OrigNumParts);
659 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
661 "Cannot widen to illegal type");
665 }
else if (PartEVT != ValueEVT) {
680 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
691 std::optional<CallingConv::ID> CallConv) {
695 const bool IsABIRegCopy = CallConv.has_value();
698 EVT PartEVT = PartVT;
699 if (PartEVT == ValueVT) {
745 "lossy conversion of vector to scalar type");
760 unsigned NumIntermediates;
764 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
769 NumIntermediates, RegisterVT);
772 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
774 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
777 "Mixing scalable and fixed vectors when copying in parts");
779 std::optional<ElementCount> DestEltCnt;
789 if (ValueVT == BuiltVectorTy) {
813 for (
unsigned i = 0; i != NumIntermediates; ++i) {
828 if (NumParts == NumIntermediates) {
831 for (
unsigned i = 0; i != NumParts; ++i)
833 }
else if (NumParts > 0) {
836 assert(NumIntermediates != 0 &&
"division by zero");
837 assert(NumParts % NumIntermediates == 0 &&
838 "Must expand into a divisible number of parts!");
839 unsigned Factor = NumParts / NumIntermediates;
840 for (
unsigned i = 0; i != NumIntermediates; ++i)
848 if (
I.hasOperandBundlesOtherThan(AllowedBundles)) {
852 for (
unsigned i = 0, e =
I.getNumOperandBundles(); i != e; ++i) {
855 OS << LS << U.getTagName();
858 Twine(
"cannot lower ", Name)
864 EVT valuevt, std::optional<CallingConv::ID> CC)
870 std::optional<CallingConv::ID> CC) {
884 for (
unsigned i = 0; i != NumRegs; ++i)
885 Regs.push_back(Reg + i);
886 RegVTs.push_back(RegisterVT);
888 Reg = Reg.id() + NumRegs;
915 for (
unsigned i = 0; i != NumRegs; ++i) {
921 *Glue =
P.getValue(2);
924 Chain =
P.getValue(1);
952 EVT FromVT(MVT::Other);
956 }
else if (NumSignBits > 1) {
964 assert(FromVT != MVT::Other);
970 RegisterVT, ValueVT, V, Chain,
CallConv);
986 unsigned NumRegs =
Regs.size();
1000 NumParts, RegisterVT, V,
CallConv, ExtendKind);
1006 for (
unsigned i = 0; i != NumRegs; ++i) {
1018 if (NumRegs == 1 || Glue)
1029 Chain = Chains[NumRegs-1];
1035 unsigned MatchingIdx,
const SDLoc &dl,
1037 std::vector<SDValue> &
Ops)
const {
1042 Flag.setMatchingOp(MatchingIdx);
1043 else if (!
Regs.empty() &&
Regs.front().isVirtual()) {
1051 Flag.setRegClass(RC->
getID());
1062 "No 1:1 mapping from clobbers to regs?");
1065 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1070 "If we clobbered the stack pointer, MFI should know about it.");
1079 for (
unsigned i = 0; i != NumRegs; ++i) {
1080 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1092 unsigned RegCount = std::get<0>(CountAndVT);
1093 MVT RegisterVT = std::get<1>(CountAndVT);
1111 SL->init(
DAG.getTargetLoweringInfo(), TM,
DAG.getDataLayout());
1113 *
DAG.getMachineFunction().getFunction().getParent());
1118 UnusedArgNodeMap.clear();
1120 PendingExports.clear();
1121 PendingConstrainedFP.clear();
1122 PendingConstrainedFPStrict.clear();
1130 DanglingDebugInfoMap.clear();
1137 if (Pending.
empty())
1143 unsigned i = 0, e = Pending.
size();
1144 for (; i != e; ++i) {
1146 if (Pending[i].
getNode()->getOperand(0) == Root)
1154 if (Pending.
size() == 1)
1181 if (!PendingConstrainedFPStrict.empty()) {
1182 assert(PendingConstrainedFP.empty());
1183 updateRoot(PendingConstrainedFPStrict);
1196 if (!PendingConstrainedFP.empty()) {
1197 assert(PendingConstrainedFPStrict.empty());
1198 updateRoot(PendingConstrainedFP);
1202 return DAG.getRoot();
1210 PendingConstrainedFP.size() +
1211 PendingConstrainedFPStrict.size());
1213 PendingConstrainedFP.end());
1214 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1215 PendingConstrainedFPStrict.end());
1216 PendingConstrainedFP.clear();
1217 PendingConstrainedFPStrict.clear();
1224 PendingExports.append(PendingConstrainedFPStrict.begin(),
1225 PendingConstrainedFPStrict.end());
1226 PendingConstrainedFPStrict.clear();
1227 return updateRoot(PendingExports);
1234 assert(Variable &&
"Missing variable");
1241 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1257 if (IsParameter && FINode) {
1259 SDV =
DAG.getFrameIndexDbgValue(Variable,
Expression, FINode->getIndex(),
1260 true,
DL, SDNodeOrder);
1265 FuncArgumentDbgValueKind::Declare,
N);
1268 SDV =
DAG.getDbgValue(Variable,
Expression,
N.getNode(),
N.getResNo(),
1269 true,
DL, SDNodeOrder);
1271 DAG.AddDbgValue(SDV, IsParameter);
1276 FuncArgumentDbgValueKind::Declare,
N)) {
1278 <<
" (could not emit func-arg dbg_value)\n");
1289 for (
auto It = FnVarLocs->locs_begin(&
I), End = FnVarLocs->locs_end(&
I);
1291 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1293 if (It->Values.isKillLocation(It->Expr)) {
1299 It->Values.hasArgList())) {
1302 FnVarLocs->getDILocalVariable(It->VariableID),
1303 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1316 bool SkipDbgVariableRecords =
DAG.getFunctionVarLocs();
1319 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1321 assert(DLR->getLabel() &&
"Missing label");
1323 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1324 DAG.AddDbgLabel(SDV);
1328 if (SkipDbgVariableRecords)
1336 if (
FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1338 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1347 if (Values.
empty()) {
1364 SDNodeOrder, IsVariadic)) {
1375 if (
I.isTerminator()) {
1376 HandlePHINodesInSuccessorBlocks(
I.getParent());
1383 bool NodeInserted =
false;
1384 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1385 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1386 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1387 if (PCSectionsMD || MMRA) {
1388 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1389 DAG, [&](
SDNode *) { NodeInserted =
true; });
1399 if (PCSectionsMD || MMRA) {
1400 auto It = NodeMap.find(&
I);
1401 if (It != NodeMap.end()) {
1403 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1405 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1406 }
else if (NodeInserted) {
1409 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1410 <<
I.getModule()->getName() <<
"]\n";
1419void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1429#define HANDLE_INST(NUM, OPCODE, CLASS) \
1430 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1431#include "llvm/IR/Instruction.def"
1443 for (
const Value *V : Values) {
1468 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1473 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1474 DIVariable *DanglingVariable = DDI.getVariable();
1476 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1478 << printDDI(
nullptr, DDI) <<
"\n");
1484 for (
auto &DDIMI : DanglingDebugInfoMap) {
1485 DanglingDebugInfoVector &DDIV = DDIMI.second;
1489 for (
auto &DDI : DDIV)
1490 if (isMatchingDbgValue(DDI))
1493 erase_if(DDIV, isMatchingDbgValue);
1501 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1502 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1505 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1506 for (
auto &DDI : DDIV) {
1508 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1511 assert(Variable->isValidLocationForIntrinsic(
DL) &&
1512 "Expected inlined-at fields to agree");
1522 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1523 FuncArgumentDbgValueKind::Value, Val)) {
1525 << printDDI(V, DDI) <<
"\n");
1532 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1533 << ValSDNodeOrder <<
"\n");
1534 SDV = getDbgValue(Val, Variable, Expr,
DL,
1535 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1536 DAG.AddDbgValue(SDV,
false);
1540 <<
" in EmitFuncArgumentDbgValue\n");
1542 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1546 DAG.getConstantDbgValue(Variable, Expr,
Poison,
DL, DbgSDNodeOrder);
1547 DAG.AddDbgValue(SDV,
false);
1554 DanglingDebugInfo &DDI) {
1559 const Value *OrigV = V;
1563 unsigned SDOrder = DDI.getSDNodeOrder();
1567 bool StackValue =
true;
1592 if (!AdditionalValues.
empty())
1602 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1603 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1611 assert(OrigV &&
"V shouldn't be null");
1613 auto *SDV =
DAG.getConstantDbgValue(Var, Expr,
Poison,
DL, SDNodeOrder);
1614 DAG.AddDbgValue(SDV,
false);
1616 << printDDI(OrigV, DDI) <<
"\n");
1633 unsigned Order,
bool IsVariadic) {
1638 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1643 for (
const Value *V : Values) {
1653 if (CE->getOpcode() == Instruction::IntToPtr) {
1672 N = UnusedArgNodeMap[V];
1677 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1678 FuncArgumentDbgValueKind::Value,
N))
1705 bool IsParamOfFunc =
1713 auto VMI =
FuncInfo.ValueMap.find(V);
1714 if (VMI !=
FuncInfo.ValueMap.end()) {
1719 V->getType(), std::nullopt);
1725 unsigned BitsToDescribe = 0;
1727 BitsToDescribe = *VarSize;
1729 BitsToDescribe = Fragment->SizeInBits;
1732 if (
Offset >= BitsToDescribe)
1735 unsigned RegisterSize = RegAndSize.second;
1736 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1737 ? BitsToDescribe -
Offset
1740 Expr,
Offset, FragmentSize);
1744 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1745 DAG.AddDbgValue(SDV,
false);
1761 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1762 false, DbgLoc, Order, IsVariadic);
1763 DAG.AddDbgValue(SDV,
false);
1769 for (
auto &Pair : DanglingDebugInfoMap)
1770 for (
auto &DDI : Pair.second)
1781 if (It !=
FuncInfo.ValueMap.end()) {
1785 DAG.getDataLayout(), InReg, Ty,
1802 if (
N.getNode())
return N;
1862 return DAG.getSplatBuildVector(
1865 return DAG.getConstant(*CI,
DL, VT);
1874 getValue(CPA->getAddrDiscriminator()),
1875 getValue(CPA->getDiscriminator()));
1891 visit(CE->getOpcode(), *CE);
1893 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1899 for (
const Use &U :
C->operands()) {
1905 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1906 Constants.push_back(
SDValue(Val, i));
1915 for (
uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1919 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1928 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1930 "Unknown struct or array constant!");
1934 unsigned NumElts = ValueVTs.
size();
1938 for (
unsigned i = 0; i != NumElts; ++i) {
1939 EVT EltVT = ValueVTs[i];
1941 Constants[i] =
DAG.getUNDEF(EltVT);
1952 return DAG.getBlockAddress(BA, VT);
1955 return getValue(Equiv->getGlobalValue());
1960 if (VT == MVT::aarch64svcount) {
1961 assert(
C->isNullValue() &&
"Can only zero this target type!");
1967 assert(
C->isNullValue() &&
"Can only zero this target type!");
1984 for (
unsigned i = 0; i != NumElements; ++i)
2012 return DAG.getFrameIndex(
2020 std::optional<CallingConv::ID> CallConv;
2022 if (CB && !CB->isInlineAsm())
2023 CallConv = CB->getCallingConv();
2026 Inst->getType(), CallConv);
2040void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
2053 if (IsMSVCCXX || IsCoreCLR)
2059 MachineBasicBlock *TargetMBB =
FuncInfo.getMBB(
I.getSuccessor());
2060 FuncInfo.MBB->addSuccessor(TargetMBB);
2067 if (TargetMBB != NextBlock(
FuncInfo.MBB) ||
2076 DAG.getMachineFunction().setHasEHContTarget(
true);
2082 Value *ParentPad =
I.getCatchSwitchParentPad();
2085 SuccessorColor = &
FuncInfo.Fn->getEntryBlock();
2088 assert(SuccessorColor &&
"No parent funclet for catchret!");
2089 MachineBasicBlock *SuccessorColorMBB =
FuncInfo.getMBB(SuccessorColor);
2090 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2095 DAG.getBasicBlock(SuccessorColorMBB));
2099void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2105 FuncInfo.MBB->setIsEHFuncletEntry();
2106 FuncInfo.MBB->setIsCleanupFuncletEntry();
2135 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2141 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2142 UnwindDests.back().first->setIsEHScopeEntry();
2145 UnwindDests.back().first->setIsEHFuncletEntry();
2149 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2150 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2152 if (IsMSVCCXX || IsCoreCLR)
2153 UnwindDests.back().first->setIsEHFuncletEntry();
2155 UnwindDests.back().first->setIsEHScopeEntry();
2157 NewEHPadBB = CatchSwitch->getUnwindDest();
2163 if (BPI && NewEHPadBB)
2165 EHPadBB = NewEHPadBB;
2172 auto UnwindDest =
I.getUnwindDest();
2173 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
2174 BranchProbability UnwindDestProb =
2179 for (
auto &UnwindDest : UnwindDests) {
2180 UnwindDest.first->setIsEHPad();
2181 addSuccessorWithProb(
FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2183 FuncInfo.MBB->normalizeSuccProbs();
2186 MachineBasicBlock *CleanupPadMBB =
2187 FuncInfo.getMBB(
I.getCleanupPad()->getParent());
2193void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2197void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2198 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
2199 auto &
DL =
DAG.getDataLayout();
2211 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2228 SmallVector<uint64_t, 4>
Offsets;
2231 unsigned NumValues = ValueVTs.
size();
2234 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2235 for (
unsigned i = 0; i != NumValues; ++i) {
2242 if (MemVTs[i] != ValueVTs[i])
2244 Chains[i] =
DAG.getStore(
2252 MVT::Other, Chains);
2253 }
else if (
I.getNumOperands() != 0) {
2256 unsigned NumValues =
Types.size();
2260 const Function *
F =
I.getParent()->getParent();
2263 I.getOperand(0)->getType(),
F->getCallingConv(),
2267 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2269 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2272 LLVMContext &
Context =
F->getContext();
2273 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2275 for (
unsigned j = 0;
j != NumValues; ++
j) {
2288 &Parts[0], NumParts, PartVT, &
I, CC, ExtendKind);
2291 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2295 if (
I.getOperand(0)->getType()->isPointerTy()) {
2297 Flags.setPointerAddrSpace(
2301 if (NeedsRegBlock) {
2302 Flags.setInConsecutiveRegs();
2303 if (j == NumValues - 1)
2304 Flags.setInConsecutiveRegsLast();
2312 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2315 for (
unsigned i = 0; i < NumParts; ++i) {
2318 VT, Types[j], 0, 0));
2328 const Function *
F =
I.getParent()->getParent();
2330 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2332 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2333 Flags.setSwiftError();
2345 bool isVarArg =
DAG.getMachineFunction().getFunction().isVarArg();
2347 DAG.getMachineFunction().getFunction().getCallingConv();
2348 Chain =
DAG.getTargetLoweringInfo().LowerReturn(
2353 "LowerReturn didn't return a valid chain!");
2364 if (V->getType()->isEmptyTy())
2368 if (VMI !=
FuncInfo.ValueMap.end()) {
2370 "Unused value assigned virtual registers!");
2383 if (
FuncInfo.isExportedInst(V))
return;
2395 if (VI->getParent() == FromBB)
2421 const BasicBlock *SrcBB = Src->getBasicBlock();
2422 const BasicBlock *DstBB = Dst->getBasicBlock();
2426 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2436 Src->addSuccessorWithoutProb(Dst);
2439 Prob = getEdgeProbability(Src, Dst);
2440 Src->addSuccessor(Dst, Prob);
2446 return I->getParent() == BB;
2470 if (CurBB == SwitchBB ||
2476 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2481 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2483 if (TM.Options.NoNaNsFPMath)
2487 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2489 SL->SwitchCases.push_back(CB);
2498 SL->SwitchCases.push_back(CB);
2506 unsigned Depth = 0) {
2515 if (Necessary !=
nullptr) {
2518 if (Necessary->contains(
I))
2537 if (
I.getNumSuccessors() != 2)
2540 if (!
I.isConditional())
2552 if (BPI !=
nullptr) {
2558 std::optional<bool> Likely;
2561 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2565 if (
Opc == (*Likely ? Instruction::And : Instruction::Or))
2577 if (CostThresh <= 0)
2598 Value *BrCond =
I.getCondition();
2599 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2600 for (
const auto *U : Ins->users()) {
2603 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2616 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2618 for (
const auto &InsPair : RhsDeps) {
2619 if (!ShouldCountInsn(InsPair.first)) {
2620 ToDrop = InsPair.first;
2624 if (ToDrop ==
nullptr)
2626 RhsDeps.erase(ToDrop);
2629 for (
const auto &InsPair : RhsDeps) {
2634 CostOfIncluding +=
TTI->getInstructionCost(
2637 if (CostOfIncluding > CostThresh)
2663 const Value *BOpOp0, *BOpOp1;
2677 if (BOpc == Instruction::And)
2678 BOpc = Instruction::Or;
2679 else if (BOpc == Instruction::Or)
2680 BOpc = Instruction::And;
2686 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
2691 TProb, FProb, InvertCond);
2701 if (
Opc == Instruction::Or) {
2722 auto NewTrueProb = TProb / 2;
2723 auto NewFalseProb = TProb / 2 + FProb;
2726 NewFalseProb, InvertCond);
2733 Probs[1], InvertCond);
2735 assert(
Opc == Instruction::And &&
"Unknown merge op!");
2755 auto NewTrueProb = TProb + FProb / 2;
2756 auto NewFalseProb = FProb / 2;
2759 NewFalseProb, InvertCond);
2766 Probs[1], InvertCond);
2775 if (Cases.size() != 2)
return true;
2779 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2780 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2781 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2782 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2788 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2789 Cases[0].CC == Cases[1].CC &&
2792 if (Cases[0].CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2794 if (Cases[0].CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2801void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2807 if (
I.isUnconditional()) {
2813 if (Succ0MBB != NextBlock(BrMBB) ||
2826 const Value *CondVal =
I.getCondition();
2827 MachineBasicBlock *Succ1MBB =
FuncInfo.getMBB(
I.getSuccessor(1));
2846 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2848 if (!
DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2851 const Value *BOp0, *BOp1;
2854 Opcode = Instruction::And;
2856 Opcode = Instruction::Or;
2863 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2864 Opcode, BOp0, BOp1))) {
2866 getEdgeProbability(BrMBB, Succ0MBB),
2867 getEdgeProbability(BrMBB, Succ1MBB),
2872 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2876 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2883 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2889 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2890 FuncInfo.MF->erase(
SL->SwitchCases[i].ThisBB);
2892 SL->SwitchCases.clear();
2898 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2919 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2926 auto &TLI =
DAG.getTargetLoweringInfo();
2950 Cond =
DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.
CC);
2962 Cond =
DAG.getSetCC(dl, MVT::i1, CmpOp,
DAG.getConstant(
High, dl, VT),
2966 VT, CmpOp,
DAG.getConstant(
Low, dl, VT));
2967 Cond =
DAG.getSetCC(dl, MVT::i1, SUB,
2982 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2998 BrCond =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrCond,
3001 DAG.setRoot(BrCond);
3007 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3008 assert(JT.Reg &&
"Should lower JT Header first!");
3009 EVT PTy =
DAG.getTargetLoweringInfo().getJumpTableRegTy(
DAG.getDataLayout());
3011 SDValue Table =
DAG.getJumpTable(JT.JTI, PTy);
3013 Index.getValue(1), Table, Index);
3014 DAG.setRoot(BrJumpTable);
3022 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3023 const SDLoc &dl = *JT.SL;
3029 DAG.getConstant(JTH.
First, dl, VT));
3044 JT.Reg = JumpTableReg;
3052 Sub.getValueType()),
3056 MVT::Other, CopyTo, CMP,
3057 DAG.getBasicBlock(JT.Default));
3060 if (JT.MBB != NextBlock(SwitchBB))
3061 BrCond =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrCond,
3062 DAG.getBasicBlock(JT.MBB));
3064 DAG.setRoot(BrCond);
3067 if (JT.MBB != NextBlock(SwitchBB))
3069 DAG.getBasicBlock(JT.MBB)));
3071 DAG.setRoot(CopyTo);
3094 if (PtrTy != PtrMemTy)
3110 auto &
DL =
DAG.getDataLayout();
3119 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3126 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3139 assert(GuardCheckFn &&
"Guard check function is null");
3150 Entry.IsInReg =
true;
3151 Args.push_back(Entry);
3157 getValue(GuardCheckFn), std::move(Args));
3159 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3160 DAG.setRoot(Result.second);
3172 Guard =
DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3178 Guard =
DAG.getPOISON(PtrMemTy);
3221 auto &
DL =
DAG.getDataLayout();
3229 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3235 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3250 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3251 Entry.IsInReg =
true;
3252 Args.push_back(Entry);
3258 getValue(GuardCheckFn), std::move(Args));
3264 Chain = TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3287 DAG.getNode(
ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(
B.First, dl, VT));
3291 bool UsePtrType =
false;
3315 if (!
B.FallthroughUnreachable)
3316 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3317 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3321 if (!
B.FallthroughUnreachable) {
3330 DAG.getBasicBlock(
B.Default));
3334 if (
MBB != NextBlock(SwitchBB))
3352 if (PopCount == 1) {
3359 }
else if (PopCount == BB.
Range) {
3367 DAG.getConstant(1, dl, VT), ShiftOp);
3371 VT, SwitchVal,
DAG.getConstant(
B.Mask, dl, VT));
3378 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3380 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3388 Cmp,
DAG.getBasicBlock(
B.TargetBB));
3391 if (NextMBB != NextBlock(SwitchBB))
3392 BrAnd =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrAnd,
3393 DAG.getBasicBlock(NextMBB));
3398void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3416 const Value *Callee(
I.getCalledOperand());
3419 visitInlineAsm(
I, EHPadBB);
3424 case Intrinsic::donothing:
3426 case Intrinsic::seh_try_begin:
3427 case Intrinsic::seh_scope_begin:
3428 case Intrinsic::seh_try_end:
3429 case Intrinsic::seh_scope_end:
3435 case Intrinsic::experimental_patchpoint_void:
3436 case Intrinsic::experimental_patchpoint:
3437 visitPatchpoint(
I, EHPadBB);
3439 case Intrinsic::experimental_gc_statepoint:
3445 case Intrinsic::wasm_throw: {
3447 std::array<SDValue, 4>
Ops = {
3458 case Intrinsic::wasm_rethrow: {
3459 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3460 std::array<SDValue, 2>
Ops = {
3469 }
else if (
I.hasDeoptState()) {
3490 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
3491 BranchProbability EHPadBBProb =
3497 addSuccessorWithProb(InvokeMBB, Return);
3498 for (
auto &UnwindDest : UnwindDests) {
3499 UnwindDest.first->setIsEHPad();
3500 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3506 DAG.getBasicBlock(Return)));
3515void SelectionDAGBuilder::visitCallBrIntrinsic(
const CallBrInst &
I) {
3516 TargetLowering::IntrinsicInfo
Info;
3517 assert(!
DAG.getTargetLoweringInfo().getTgtMemIntrinsic(
3518 Info,
I,
DAG.getMachineFunction(),
I.getIntrinsicID()) &&
3519 "Intrinsic touches memory");
3521 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(
I);
3524 getTargetIntrinsicOperands(
I, HasChain, OnlyLoad);
3525 SDVTList VTs = getTargetIntrinsicVTList(
I, HasChain);
3529 getTargetNonMemIntrinsicNode(*
I.getType(), HasChain,
Ops, VTs);
3530 Result = handleTargetIntrinsicRet(
I, HasChain, OnlyLoad, Result);
3535void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3536 MachineBasicBlock *CallBrMBB =
FuncInfo.MBB;
3538 if (
I.isInlineAsm()) {
3545 assert(!
I.hasOperandBundles() &&
3546 "Can't have operand bundles for intrinsics");
3547 visitCallBrIntrinsic(
I);
3552 SmallPtrSet<BasicBlock *, 8> Dests;
3553 Dests.
insert(
I.getDefaultDest());
3563 if (
I.isInlineAsm()) {
3564 for (BasicBlock *Dest :
I.getIndirectDests()) {
3566 Target->setIsInlineAsmBrIndirectTarget();
3572 Target->setLabelMustBeEmitted();
3574 if (Dests.
insert(Dest).second)
3583 DAG.getBasicBlock(Return)));
3586void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3587 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3590void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3592 "Call to landingpad not in landing pad!");
3596 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3612 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3617 if (
FuncInfo.ExceptionPointerVirtReg) {
3618 Ops[0] =
DAG.getZExtOrTrunc(
3619 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3626 Ops[1] =
DAG.getZExtOrTrunc(
3627 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3634 DAG.getVTList(ValueVTs),
Ops);
3642 if (JTB.first.HeaderBB ==
First)
3643 JTB.first.HeaderBB =
Last;
3656 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3658 bool Inserted =
Done.insert(BB).second;
3663 addSuccessorWithProb(IndirectBrMBB, Succ);
3673 if (!
I.shouldLowerToTrap(
DAG.getTarget().Options.TrapUnreachable,
3674 DAG.getTarget().Options.NoTrapAfterNoreturn))
3680void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3683 Flags.copyFMF(*FPOp);
3691void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3694 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3695 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3698 Flags.setExact(ExactOp->isExact());
3700 Flags.setDisjoint(DisjointOp->isDisjoint());
3702 Flags.copyFMF(*FPOp);
3711void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3715 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
3720 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3722 "Unexpected shift type");
3732 if (
const OverflowingBinaryOperator *OFBinOp =
3734 nuw = OFBinOp->hasNoUnsignedWrap();
3735 nsw = OFBinOp->hasNoSignedWrap();
3737 if (
const PossiblyExactOperator *ExactOp =
3739 exact = ExactOp->isExact();
3742 Flags.setExact(exact);
3743 Flags.setNoSignedWrap(nsw);
3744 Flags.setNoUnsignedWrap(nuw);
3750void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3761void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3767 auto &TLI =
DAG.getTargetLoweringInfo();
3780 Flags.setSameSign(
I.hasSameSign());
3781 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3783 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3788void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3795 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3799 Flags.copyFMF(*FPMO);
3800 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3802 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3811 return isa<SelectInst>(V);
3815void SelectionDAGBuilder::visitSelect(
const User &
I) {
3819 unsigned NumValues = ValueVTs.
size();
3820 if (NumValues == 0)
return;
3830 bool IsUnaryAbs =
false;
3831 bool Negate =
false;
3835 Flags.copyFMF(*FPOp);
3837 Flags.setUnpredictable(
3842 EVT VT = ValueVTs[0];
3843 LLVMContext &Ctx = *
DAG.getContext();
3844 auto &TLI =
DAG.getTargetLoweringInfo();
3854 bool UseScalarMinMax = VT.
isVector() &&
3863 switch (SPR.Flavor) {
3869 switch (SPR.NaNBehavior) {
3882 switch (SPR.NaNBehavior) {
3926 for (
unsigned i = 0; i != NumValues; ++i) {
3932 Values[i] =
DAG.getNegative(Values[i], dl, VT);
3935 for (
unsigned i = 0; i != NumValues; ++i) {
3939 Values[i] =
DAG.getNode(
3946 DAG.getVTList(ValueVTs), Values));
3949void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3952 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3956 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3957 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3963void SelectionDAGBuilder::visitZExt(
const User &
I) {
3967 auto &TLI =
DAG.getTargetLoweringInfo();
3972 Flags.setNonNeg(PNI->hasNonNeg());
3977 if (
Flags.hasNonNeg() &&
3986void SelectionDAGBuilder::visitSExt(
const User &
I) {
3990 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3995void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
4001 Flags.copyFMF(*FPOp);
4002 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4005 DAG.getTargetConstant(
4010void SelectionDAGBuilder::visitFPExt(
const User &
I) {
4013 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4017 Flags.copyFMF(*FPOp);
4021void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
4024 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4029void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
4032 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4037void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
4040 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4044 Flags.setNonNeg(PNI->hasNonNeg());
4049void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
4052 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4057void SelectionDAGBuilder::visitPtrToAddr(
const User &
I) {
4060 const auto &TLI =
DAG.getTargetLoweringInfo();
4068void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
4072 auto &TLI =
DAG.getTargetLoweringInfo();
4073 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4082void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
4086 auto &TLI =
DAG.getTargetLoweringInfo();
4094void SelectionDAGBuilder::visitBitCast(
const User &
I) {
4097 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4102 if (DestVT !=
N.getValueType())
4110 setValue(&
I,
DAG.getConstant(
C->getValue(), dl, DestVT,
false,
4116void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
4117 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4118 const Value *SV =
I.getOperand(0);
4123 unsigned DestAS =
I.getType()->getPointerAddressSpace();
4125 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4131void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4132 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4139 InVec, InVal, InIdx));
4142void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4143 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4152void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4157 Mask = SVI->getShuffleMask();
4161 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4165 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4170 DAG.getVectorIdxConstant(0,
DL));
4181 unsigned MaskNumElts =
Mask.size();
4183 if (SrcNumElts == MaskNumElts) {
4189 if (SrcNumElts < MaskNumElts) {
4193 if (MaskNumElts % SrcNumElts == 0) {
4197 unsigned NumConcat = MaskNumElts / SrcNumElts;
4198 bool IsConcat =
true;
4199 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4200 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4206 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4207 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4208 ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts))) {
4213 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4220 for (
auto Src : ConcatSrcs) {
4233 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4234 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4250 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4251 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4253 if (Idx >= (
int)SrcNumElts)
4254 Idx -= SrcNumElts - PaddedMaskNumElts;
4262 if (MaskNumElts != PaddedMaskNumElts)
4264 DAG.getVectorIdxConstant(0,
DL));
4270 assert(SrcNumElts > MaskNumElts);
4274 int StartIdx[2] = {-1, -1};
4275 bool CanExtract =
true;
4276 for (
int Idx : Mask) {
4281 if (Idx >= (
int)SrcNumElts) {
4289 int NewStartIdx =
alignDown(Idx, MaskNumElts);
4290 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4291 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4295 StartIdx[Input] = NewStartIdx;
4298 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4304 for (
unsigned Input = 0; Input < 2; ++Input) {
4305 SDValue &Src = Input == 0 ? Src1 : Src2;
4306 if (StartIdx[Input] < 0)
4307 Src =
DAG.getUNDEF(VT);
4310 DAG.getVectorIdxConstant(StartIdx[Input],
DL));
4315 SmallVector<int, 8> MappedOps(Mask);
4316 for (
int &Idx : MappedOps) {
4317 if (Idx >= (
int)SrcNumElts)
4318 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4323 setValue(&
I,
DAG.getVectorShuffle(VT,
DL, Src1, Src2, MappedOps));
4332 for (
int Idx : Mask) {
4336 Res =
DAG.getUNDEF(EltVT);
4338 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4339 if (Idx >= (
int)SrcNumElts) Idx -= SrcNumElts;
4342 DAG.getVectorIdxConstant(Idx,
DL));
4352 ArrayRef<unsigned> Indices =
I.getIndices();
4353 const Value *Op0 =
I.getOperand(0);
4355 Type *AggTy =
I.getType();
4362 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4368 unsigned NumAggValues = AggValueVTs.
size();
4369 unsigned NumValValues = ValValueVTs.
size();
4373 if (!NumAggValues) {
4381 for (; i != LinearIndex; ++i)
4382 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4387 for (; i != LinearIndex + NumValValues; ++i)
4388 Values[i] = FromUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4392 for (; i != NumAggValues; ++i)
4393 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4397 DAG.getVTList(AggValueVTs), Values));
4401 ArrayRef<unsigned> Indices =
I.getIndices();
4402 const Value *Op0 =
I.getOperand(0);
4404 Type *ValTy =
I.getType();
4409 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4413 unsigned NumValValues = ValValueVTs.
size();
4416 if (!NumValValues) {
4425 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4426 Values[i - LinearIndex] =
4432 DAG.getVTList(ValValueVTs), Values));
4435void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4436 Value *Op0 =
I.getOperand(0);
4442 auto &TLI =
DAG.getTargetLoweringInfo();
4447 bool IsVectorGEP =
I.getType()->isVectorTy();
4448 ElementCount VectorElementCount =
4454 const Value *Idx = GTI.getOperand();
4455 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4460 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(
Field);
4470 N =
DAG.getMemBasePlusOffset(
4471 N,
DAG.getConstant(
Offset, dl,
N.getValueType()), dl, Flags);
4477 unsigned IdxSize =
DAG.getDataLayout().getIndexSizeInBits(AS);
4479 TypeSize ElementSize =
4480 GTI.getSequentialElementStride(
DAG.getDataLayout());
4485 bool ElementScalable = ElementSize.
isScalable();
4491 C =
C->getSplatValue();
4494 if (CI && CI->isZero())
4496 if (CI && !ElementScalable) {
4497 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4500 if (
N.getValueType().isVector())
4501 OffsVal =
DAG.getConstant(
4504 OffsVal =
DAG.getConstant(Offs, dl, IdxTy);
4511 Flags.setNoUnsignedWrap(
true);
4514 OffsVal =
DAG.getSExtOrTrunc(OffsVal, dl,
N.getValueType());
4516 N =
DAG.getMemBasePlusOffset(
N, OffsVal, dl, Flags);
4524 if (
N.getValueType().isVector()) {
4526 VectorElementCount);
4527 IdxN =
DAG.getSplat(VT, dl, IdxN);
4531 N =
DAG.getSplat(VT, dl,
N);
4537 IdxN =
DAG.getSExtOrTrunc(IdxN, dl,
N.getValueType());
4539 SDNodeFlags ScaleFlags;
4548 if (ElementScalable) {
4549 EVT VScaleTy =
N.getValueType().getScalarType();
4552 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4553 if (
N.getValueType().isVector())
4554 VScale =
DAG.getSplatVector(
N.getValueType(), dl, VScale);
4555 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, VScale,
4560 if (ElementMul != 1) {
4561 if (ElementMul.isPowerOf2()) {
4562 unsigned Amt = ElementMul.logBase2();
4565 DAG.getShiftAmountConstant(Amt,
N.getValueType(), dl),
4568 SDValue Scale =
DAG.getConstant(ElementMul.getZExtValue(), dl,
4570 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, Scale,
4580 SDNodeFlags AddFlags;
4584 N =
DAG.getMemBasePlusOffset(
N, IdxN, dl, AddFlags);
4588 if (IsVectorGEP && !
N.getValueType().isVector()) {
4590 N =
DAG.getSplat(VT, dl,
N);
4601 N =
DAG.getPtrExtendInReg(
N, dl, PtrMemTy);
4606void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4613 Type *Ty =
I.getAllocatedType();
4614 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4615 auto &
DL =
DAG.getDataLayout();
4616 TypeSize TySize =
DL.getTypeAllocSize(Ty);
4617 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4623 AllocSize =
DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4625 AllocSize =
DAG.getNode(
4627 DAG.getZExtOrTrunc(
DAG.getTypeSize(dl, MVT::i64, TySize), dl, IntPtr));
4632 Align StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlign();
4633 if (*Alignment <= StackAlign)
4634 Alignment = std::nullopt;
4636 const uint64_t StackAlignMask = StackAlign.
value() - 1U;
4641 DAG.getConstant(StackAlignMask, dl, IntPtr),
4646 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4650 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4660 return I.getMetadata(LLVMContext::MD_range);
4665 if (std::optional<ConstantRange> CR = CB->getRange())
4669 return std::nullopt;
4674 return CB->getRetNoFPClass();
4678void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4680 return visitAtomicLoad(
I);
4682 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4683 const Value *SV =
I.getOperand(0);
4688 if (Arg->hasSwiftErrorAttr())
4689 return visitLoadFromSwiftError(
I);
4693 if (Alloca->isSwiftError())
4694 return visitLoadFromSwiftError(
I);
4700 Type *Ty =
I.getType();
4704 unsigned NumValues = ValueVTs.
size();
4708 Align Alignment =
I.getAlign();
4709 AAMDNodes AAInfo =
I.getAAMetadata();
4711 bool isVolatile =
I.isVolatile();
4716 bool ConstantMemory =
false;
4723 BatchAA->pointsToConstantMemory(MemoryLocation(
4728 Root =
DAG.getEntryNode();
4729 ConstantMemory =
true;
4733 Root =
DAG.getRoot();
4744 unsigned ChainI = 0;
4745 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4761 MachinePointerInfo PtrInfo =
4763 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4764 : MachinePointerInfo();
4766 SDValue A =
DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4767 SDValue L =
DAG.getLoad(MemVTs[i], dl, Root,
A, PtrInfo, Alignment,
4768 MMOFlags, AAInfo, Ranges);
4769 Chains[ChainI] =
L.getValue(1);
4771 if (MemVTs[i] != ValueVTs[i])
4772 L =
DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4777 if (!ConstantMemory) {
4787 DAG.getVTList(ValueVTs), Values));
4790void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4791 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4792 "call visitStoreToSwiftError when backend supports swifterror");
4795 SmallVector<uint64_t, 4>
Offsets;
4796 const Value *SrcV =
I.getOperand(0);
4798 SrcV->
getType(), ValueVTs,
nullptr, &Offsets, 0);
4799 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4800 "expect a single EVT for swifterror");
4809 SDValue(Src.getNode(), Src.getResNo()));
4810 DAG.setRoot(CopyNode);
4813void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4814 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4815 "call visitLoadFromSwiftError when backend supports swifterror");
4818 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4819 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4820 "Support volatile, non temporal, invariant for load_from_swift_error");
4822 const Value *SV =
I.getOperand(0);
4823 Type *Ty =
I.getType();
4826 !
BatchAA->pointsToConstantMemory(MemoryLocation(
4828 I.getAAMetadata()))) &&
4829 "load_from_swift_error should not be constant memory");
4832 SmallVector<uint64_t, 4>
Offsets;
4834 ValueVTs,
nullptr, &Offsets, 0);
4835 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4836 "expect a single EVT for swifterror");
4846void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4848 return visitAtomicStore(
I);
4850 const Value *SrcV =
I.getOperand(0);
4851 const Value *PtrV =
I.getOperand(1);
4853 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4858 if (Arg->hasSwiftErrorAttr())
4859 return visitStoreToSwiftError(
I);
4863 if (Alloca->isSwiftError())
4864 return visitStoreToSwiftError(
I);
4871 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4872 unsigned NumValues = ValueVTs.
size();
4885 Align Alignment =
I.getAlign();
4886 AAMDNodes AAInfo =
I.getAAMetadata();
4890 unsigned ChainI = 0;
4891 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4901 MachinePointerInfo PtrInfo =
4903 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4904 : MachinePointerInfo();
4908 if (MemVTs[i] != ValueVTs[i])
4909 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4911 DAG.getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4912 Chains[ChainI] = St;
4918 DAG.setRoot(StoreNode);
4921void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4922 bool IsCompressing) {
4925 Value *Src0Operand =
I.getArgOperand(0);
4926 Value *PtrOperand =
I.getArgOperand(1);
4927 Value *MaskOperand =
I.getArgOperand(2);
4928 Align Alignment =
I.getParamAlign(1).valueOrOne();
4938 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4941 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4942 MachinePointerInfo(PtrOperand), MMOFlags,
4945 const auto &TLI =
DAG.getTargetLoweringInfo();
4948 !IsCompressing &&
TTI->hasConditionalLoadStoreForType(
4949 I.getArgOperand(0)->getType(),
true)
4955 DAG.setRoot(StoreNode);
4985 C =
C->getSplatValue();
4999 if (!
GEP ||
GEP->getParent() != CurBB)
5002 if (
GEP->getNumOperands() != 2)
5005 const Value *BasePtr =
GEP->getPointerOperand();
5006 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
5012 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
5017 if (ScaleVal != 1 &&
5029void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
5033 const Value *Ptr =
I.getArgOperand(1);
5037 Align Alignment =
I.getParamAlign(1).valueOrOne();
5038 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5047 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5057 EVT IdxVT =
Index.getValueType();
5065 SDValue Scatter =
DAG.getMaskedScatter(
DAG.getVTList(MVT::Other), VT, sdl,
5067 DAG.setRoot(Scatter);
5071void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
5074 Value *PtrOperand =
I.getArgOperand(0);
5075 Value *MaskOperand =
I.getArgOperand(1);
5076 Value *Src0Operand =
I.getArgOperand(2);
5077 Align Alignment =
I.getParamAlign(0).valueOrOne();
5085 AAMDNodes AAInfo =
I.getAAMetadata();
5092 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
5095 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5097 if (
I.hasMetadata(LLVMContext::MD_invariant_load))
5100 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5101 MachinePointerInfo(PtrOperand), MMOFlags,
5104 const auto &TLI =
DAG.getTargetLoweringInfo();
5111 TTI->hasConditionalLoadStoreForType(Src0Operand->
getType(),
5116 DAG.getMaskedLoad(VT, sdl, InChain, Ptr,
Offset, Mask, Src0, VT, MMO,
5123void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5127 const Value *Ptr =
I.getArgOperand(0);
5131 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5133 Align Alignment =
I.getParamAlign(0).valueOrOne();
5144 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5156 EVT IdxVT =
Index.getValueType();
5165 DAG.getMaskedGather(
DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO,
5181 SDVTList VTs =
DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5183 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5186 MachineFunction &MF =
DAG.getMachineFunction();
5188 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5189 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, SuccessOrdering,
5193 dl, MemVT, VTs, InChain,
5201 DAG.setRoot(OutChain);
5204void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5207 switch (
I.getOperation()) {
5249 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5252 MachineFunction &MF =
DAG.getMachineFunction();
5254 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5255 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, Ordering);
5258 DAG.getAtomic(NT, dl, MemVT, InChain,
5265 DAG.setRoot(OutChain);
5268void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5270 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5273 Ops[1] =
DAG.getTargetConstant((
unsigned)
I.getOrdering(), dl,
5275 Ops[2] =
DAG.getTargetConstant(
I.getSyncScopeID(), dl,
5282void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5289 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5300 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5301 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5302 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5312 L =
DAG.getPtrExtOrTrunc(L, dl, VT);
5315 DAG.setRoot(OutChain);
5318void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5326 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5336 MachineFunction &MF =
DAG.getMachineFunction();
5338 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5339 I.getAlign(), AAMDNodes(),
nullptr, SSID, Ordering);
5343 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5350 DAG.setRoot(OutChain);
5358std::pair<bool, bool>
5359SelectionDAGBuilder::getTargetIntrinsicCallProperties(
const CallBase &
I) {
5361 bool HasChain = !
F->doesNotAccessMemory();
5363 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5365 return {HasChain, OnlyLoad};
5369 const CallBase &
I,
bool HasChain,
bool OnlyLoad,
5371 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5378 Ops.push_back(
DAG.getRoot());
5391 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5392 const Value *Arg =
I.getArgOperand(i);
5393 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5401 assert(CI->getBitWidth() <= 64 &&
5402 "large intrinsic immediates not handled");
5403 Ops.push_back(
DAG.getTargetConstant(*CI, SDLoc(), VT));
5410 if (std::optional<OperandBundleUse> Bundle =
5412 auto *Sym = Bundle->Inputs[0].get();
5415 Ops.push_back(SDSym);
5418 if (std::optional<OperandBundleUse> Bundle =
5420 Value *Token = Bundle->Inputs[0].get();
5422 assert(
Ops.back().getValueType() != MVT::Glue &&
5423 "Did not expect another glue node here.");
5426 Ops.push_back(ConvControlToken);
5434 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5442 return DAG.getVTList(ValueVTs);
5446SDValue SelectionDAGBuilder::getTargetNonMemIntrinsicNode(
5469 if (
I.getType()->isVoidTy())
5484void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5486 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(
I);
5489 TargetLowering::IntrinsicInfo
Info;
5490 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5491 bool IsTgtMemIntrinsic =
5495 I, HasChain, OnlyLoad, IsTgtMemIntrinsic ? &
Info :
nullptr);
5496 SDVTList VTs = getTargetIntrinsicVTList(
I, HasChain);
5501 Flags.copyFMF(*FPMO);
5502 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
5509 if (IsTgtMemIntrinsic) {
5514 MachinePointerInfo MPI;
5516 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
5517 else if (
Info.fallbackAddressSpace)
5518 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
5519 EVT MemVT =
Info.memVT;
5521 if (
Size.hasValue() && !
Size.getValue())
5523 Align Alignment =
Info.align.value_or(
DAG.getEVTAlign(MemVT));
5524 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5525 MPI,
Info.flags,
Size, Alignment,
I.getAAMetadata(),
nullptr,
5530 Result = getTargetNonMemIntrinsicNode(*
I.getType(), HasChain,
Ops, VTs);
5533 Result = handleTargetIntrinsicRet(
I, HasChain, OnlyLoad, Result);
5590 SDValue TwoToFractionalPartOfX;
5667 if (
Op.getValueType() == MVT::f32 &&
5691 if (
Op.getValueType() == MVT::f32 &&
5790 if (
Op.getValueType() == MVT::f32 &&
5874 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5887 if (
Op.getValueType() == MVT::f32 &&
5964 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5975 if (
Op.getValueType() == MVT::f32 &&
5988 bool IsExp10 =
false;
5989 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5993 IsExp10 = LHSC->isExactlyValue(Ten);
6020 unsigned Val = RHSC->getSExtValue();
6049 CurSquare, CurSquare);
6054 if (RHSC->getSExtValue() < 0)
6068 EVT VT =
LHS.getValueType();
6091 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
6095 Opcode, VT, ScaleInt);
6130 switch (
N.getOpcode()) {
6134 Op.getValueType().getSizeInBits());
6159bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6166 MachineFunction &MF =
DAG.getMachineFunction();
6167 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
6171 auto MakeVRegDbgValue = [&](
Register Reg, DIExpression *FragExpr,
6176 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6183 auto *NewDIExpr = FragExpr;
6190 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6193 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6194 return BuildMI(MF,
DL, Inst, Indirect,
Reg, Variable, FragExpr);
6198 if (Kind == FuncArgumentDbgValueKind::Value) {
6203 if (!IsInEntryBlock)
6219 bool VariableIsFunctionInputArg =
Variable->isParameter() &&
6220 !
DL->getInlinedAt();
6222 if (!IsInPrologue && !VariableIsFunctionInputArg)
6256 if (VariableIsFunctionInputArg) {
6258 if (ArgNo >=
FuncInfo.DescribedArgs.size())
6259 FuncInfo.DescribedArgs.resize(ArgNo + 1,
false);
6260 else if (!IsInPrologue &&
FuncInfo.DescribedArgs.test(ArgNo))
6261 return !NodeMap[
V].getNode();
6266 bool IsIndirect =
false;
6267 std::optional<MachineOperand>
Op;
6269 int FI =
FuncInfo.getArgumentFrameIndex(Arg);
6270 if (FI != std::numeric_limits<int>::max())
6274 if (!
Op &&
N.getNode()) {
6277 if (ArgRegsAndSizes.
size() == 1)
6278 Reg = ArgRegsAndSizes.
front().first;
6281 MachineRegisterInfo &RegInfo = MF.
getRegInfo();
6288 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6292 if (!
Op &&
N.getNode()) {
6296 if (FrameIndexSDNode *FINode =
6306 for (
const auto &RegAndSize : SplitRegs) {
6310 int RegFragmentSizeInBits = RegAndSize.second;
6312 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6315 if (
Offset >= ExprFragmentSizeInBits)
6319 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6320 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6325 Expr,
Offset, RegFragmentSizeInBits);
6326 Offset += RegAndSize.second;
6329 if (!FragmentExpr) {
6330 SDDbgValue *SDV =
DAG.getConstantDbgValue(
6332 DAG.AddDbgValue(SDV,
false);
6335 MachineInstr *NewMI =
6336 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6337 Kind != FuncArgumentDbgValueKind::Value);
6338 FuncInfo.ArgDbgValues.push_back(NewMI);
6345 if (VMI !=
FuncInfo.ValueMap.end()) {
6346 const auto &TLI =
DAG.getTargetLoweringInfo();
6347 RegsForValue RFV(
V->getContext(), TLI,
DAG.getDataLayout(), VMI->second,
6348 V->getType(), std::nullopt);
6349 if (RFV.occupiesMultipleRegs()) {
6350 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6355 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6356 }
else if (ArgRegsAndSizes.
size() > 1) {
6359 splitMultiRegDbgValue(ArgRegsAndSizes);
6368 "Expected inlined-at fields to agree");
6369 MachineInstr *NewMI =
nullptr;
6372 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6374 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6378 FuncInfo.ArgDbgValues.push_back(NewMI);
6387 unsigned DbgSDNodeOrder) {
6399 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6400 false, dl, DbgSDNodeOrder);
6402 return DAG.getDbgValue(Variable, Expr,
N.getNode(),
N.getResNo(),
6403 false, dl, DbgSDNodeOrder);
6408 case Intrinsic::smul_fix:
6410 case Intrinsic::umul_fix:
6412 case Intrinsic::smul_fix_sat:
6414 case Intrinsic::umul_fix_sat:
6416 case Intrinsic::sdiv_fix:
6418 case Intrinsic::udiv_fix:
6420 case Intrinsic::sdiv_fix_sat:
6422 case Intrinsic::udiv_fix_sat:
6435 "expected call_preallocated_setup Value");
6436 for (
const auto *U : PreallocatedSetup->
users()) {
6438 const Function *Fn = UseCall->getCalledFunction();
6439 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6449bool SelectionDAGBuilder::visitEntryValueDbgValue(
6459 auto ArgIt =
FuncInfo.ValueMap.find(Arg);
6460 if (ArgIt ==
FuncInfo.ValueMap.end()) {
6462 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6463 "couldn't find an associated register for the Argument\n");
6466 Register ArgVReg = ArgIt->getSecond();
6468 for (
auto [PhysReg, VirtReg] :
FuncInfo.RegInfo->liveins())
6469 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6470 SDDbgValue *SDV =
DAG.getVRegDbgValue(
6471 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6472 DAG.AddDbgValue(SDV,
false );
6475 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6476 "couldn't find a physical register\n");
6481void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6484 switch (Intrinsic) {
6485 case Intrinsic::experimental_convergence_anchor:
6488 case Intrinsic::experimental_convergence_entry:
6491 case Intrinsic::experimental_convergence_loop: {
6493 auto *Token = Bundle->Inputs[0].get();
6501void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6502 unsigned IntrinsicID) {
6505 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6506 "Tried to lower unsupported histogram type");
6512 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6513 DataLayout TargetDL =
DAG.getDataLayout();
6515 Align Alignment =
DAG.getEVTAlign(VT);
6528 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
6529 MachinePointerInfo(AS),
6540 EVT IdxVT =
Index.getValueType();
6547 SDValue ID =
DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6550 SDValue Histogram =
DAG.getMaskedHistogram(
DAG.getVTList(MVT::Other), VT, sdl,
6554 DAG.setRoot(Histogram);
6557void SelectionDAGBuilder::visitVectorExtractLastActive(
const CallInst &
I,
6559 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6560 "Tried lowering invalid vector extract last");
6562 const DataLayout &Layout =
DAG.getDataLayout();
6566 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6576 EVT BoolVT =
Mask.getValueType().getScalarType();
6578 Result =
DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6585void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6587 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6594 Flags.copyFMF(*FPOp);
6596 switch (Intrinsic) {
6599 visitTargetIntrinsic(
I, Intrinsic);
6601 case Intrinsic::vscale: {
6606 case Intrinsic::vastart: visitVAStart(
I);
return;
6607 case Intrinsic::vaend: visitVAEnd(
I);
return;
6608 case Intrinsic::vacopy: visitVACopy(
I);
return;
6609 case Intrinsic::returnaddress:
6614 case Intrinsic::addressofreturnaddress:
6619 case Intrinsic::sponentry:
6624 case Intrinsic::frameaddress:
6629 case Intrinsic::read_volatile_register:
6630 case Intrinsic::read_register: {
6631 Value *
Reg =
I.getArgOperand(0);
6637 DAG.getVTList(VT, MVT::Other), Chain,
RegName);
6642 case Intrinsic::write_register: {
6643 Value *
Reg =
I.getArgOperand(0);
6644 Value *RegValue =
I.getArgOperand(1);
6652 case Intrinsic::memcpy:
6653 case Intrinsic::memcpy_inline: {
6659 "memcpy_inline needs constant size");
6661 Align DstAlign = MCI.getDestAlign().valueOrOne();
6662 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6663 Align Alignment = std::min(DstAlign, SrcAlign);
6664 bool isVol = MCI.isVolatile();
6668 SDValue MC =
DAG.getMemcpy(Root, sdl, Dst, Src,
Size, Alignment, isVol,
6669 MCI.isForceInlined(), &
I, std::nullopt,
6670 MachinePointerInfo(
I.getArgOperand(0)),
6671 MachinePointerInfo(
I.getArgOperand(1)),
6673 updateDAGForMaybeTailCall(MC);
6676 case Intrinsic::memset:
6677 case Intrinsic::memset_inline: {
6683 "memset_inline needs constant size");
6685 Align DstAlign = MSII.getDestAlign().valueOrOne();
6686 bool isVol = MSII.isVolatile();
6689 Root, sdl, Dst, Value,
Size, DstAlign, isVol, MSII.isForceInlined(),
6690 &
I, MachinePointerInfo(
I.getArgOperand(0)),
I.getAAMetadata());
6691 updateDAGForMaybeTailCall(MC);
6694 case Intrinsic::memmove: {
6700 Align DstAlign = MMI.getDestAlign().valueOrOne();
6701 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6702 Align Alignment = std::min(DstAlign, SrcAlign);
6703 bool isVol = MMI.isVolatile();
6707 SDValue MM =
DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &
I,
6709 MachinePointerInfo(
I.getArgOperand(0)),
6710 MachinePointerInfo(
I.getArgOperand(1)),
6712 updateDAGForMaybeTailCall(MM);
6715 case Intrinsic::memcpy_element_unordered_atomic: {
6721 Type *LengthTy =
MI.getLength()->getType();
6722 unsigned ElemSz =
MI.getElementSizeInBytes();
6726 isTC, MachinePointerInfo(
MI.getRawDest()),
6727 MachinePointerInfo(
MI.getRawSource()));
6728 updateDAGForMaybeTailCall(MC);
6731 case Intrinsic::memmove_element_unordered_atomic: {
6737 Type *LengthTy =
MI.getLength()->getType();
6738 unsigned ElemSz =
MI.getElementSizeInBytes();
6742 isTC, MachinePointerInfo(
MI.getRawDest()),
6743 MachinePointerInfo(
MI.getRawSource()));
6744 updateDAGForMaybeTailCall(MC);
6747 case Intrinsic::memset_element_unordered_atomic: {
6753 Type *LengthTy =
MI.getLength()->getType();
6754 unsigned ElemSz =
MI.getElementSizeInBytes();
6758 isTC, MachinePointerInfo(
MI.getRawDest()));
6759 updateDAGForMaybeTailCall(MC);
6762 case Intrinsic::call_preallocated_setup: {
6764 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6771 case Intrinsic::call_preallocated_arg: {
6773 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6787 case Intrinsic::eh_typeid_for: {
6790 unsigned TypeID =
DAG.getMachineFunction().getTypeIDFor(GV);
6791 Res =
DAG.getConstant(
TypeID, sdl, MVT::i32);
6796 case Intrinsic::eh_return_i32:
6797 case Intrinsic::eh_return_i64:
6798 DAG.getMachineFunction().setCallsEHReturn(
true);
6805 case Intrinsic::eh_unwind_init:
6806 DAG.getMachineFunction().setCallsUnwindInit(
true);
6808 case Intrinsic::eh_dwarf_cfa:
6813 case Intrinsic::eh_sjlj_callsite: {
6815 assert(
FuncInfo.getCurrentCallSite() == 0 &&
"Overlapping call sites!");
6820 case Intrinsic::eh_sjlj_functioncontext: {
6822 MachineFrameInfo &MFI =
DAG.getMachineFunction().getFrameInfo();
6825 int FI =
FuncInfo.StaticAllocaMap[FnCtx];
6829 case Intrinsic::eh_sjlj_setjmp: {
6834 DAG.getVTList(MVT::i32, MVT::Other),
Ops);
6836 DAG.setRoot(
Op.getValue(1));
6839 case Intrinsic::eh_sjlj_longjmp:
6843 case Intrinsic::eh_sjlj_setup_dispatch:
6847 case Intrinsic::masked_gather:
6848 visitMaskedGather(
I);
6850 case Intrinsic::masked_load:
6853 case Intrinsic::masked_scatter:
6854 visitMaskedScatter(
I);
6856 case Intrinsic::masked_store:
6857 visitMaskedStore(
I);
6859 case Intrinsic::masked_expandload:
6860 visitMaskedLoad(
I,
true );
6862 case Intrinsic::masked_compressstore:
6863 visitMaskedStore(
I,
true );
6865 case Intrinsic::powi:
6869 case Intrinsic::log:
6872 case Intrinsic::log2:
6876 case Intrinsic::log10:
6880 case Intrinsic::exp:
6883 case Intrinsic::exp2:
6887 case Intrinsic::pow:
6891 case Intrinsic::sqrt:
6892 case Intrinsic::fabs:
6893 case Intrinsic::sin:
6894 case Intrinsic::cos:
6895 case Intrinsic::tan:
6896 case Intrinsic::asin:
6897 case Intrinsic::acos:
6898 case Intrinsic::atan:
6899 case Intrinsic::sinh:
6900 case Intrinsic::cosh:
6901 case Intrinsic::tanh:
6902 case Intrinsic::exp10:
6903 case Intrinsic::floor:
6904 case Intrinsic::ceil:
6905 case Intrinsic::trunc:
6906 case Intrinsic::rint:
6907 case Intrinsic::nearbyint:
6908 case Intrinsic::round:
6909 case Intrinsic::roundeven:
6910 case Intrinsic::canonicalize: {
6913 switch (Intrinsic) {
6915 case Intrinsic::sqrt: Opcode =
ISD::FSQRT;
break;
6916 case Intrinsic::fabs: Opcode =
ISD::FABS;
break;
6917 case Intrinsic::sin: Opcode =
ISD::FSIN;
break;
6918 case Intrinsic::cos: Opcode =
ISD::FCOS;
break;
6919 case Intrinsic::tan: Opcode =
ISD::FTAN;
break;
6920 case Intrinsic::asin: Opcode =
ISD::FASIN;
break;
6921 case Intrinsic::acos: Opcode =
ISD::FACOS;
break;
6922 case Intrinsic::atan: Opcode =
ISD::FATAN;
break;
6923 case Intrinsic::sinh: Opcode =
ISD::FSINH;
break;
6924 case Intrinsic::cosh: Opcode =
ISD::FCOSH;
break;
6925 case Intrinsic::tanh: Opcode =
ISD::FTANH;
break;
6926 case Intrinsic::exp10: Opcode =
ISD::FEXP10;
break;
6927 case Intrinsic::floor: Opcode =
ISD::FFLOOR;
break;
6928 case Intrinsic::ceil: Opcode =
ISD::FCEIL;
break;
6929 case Intrinsic::trunc: Opcode =
ISD::FTRUNC;
break;
6930 case Intrinsic::rint: Opcode =
ISD::FRINT;
break;
6932 case Intrinsic::round: Opcode =
ISD::FROUND;
break;
6939 getValue(
I.getArgOperand(0)).getValueType(),
6943 case Intrinsic::atan2:
6945 getValue(
I.getArgOperand(0)).getValueType(),
6949 case Intrinsic::lround:
6950 case Intrinsic::llround:
6951 case Intrinsic::lrint:
6952 case Intrinsic::llrint: {
6955 switch (Intrinsic) {
6957 case Intrinsic::lround: Opcode =
ISD::LROUND;
break;
6959 case Intrinsic::lrint: Opcode =
ISD::LRINT;
break;
6960 case Intrinsic::llrint: Opcode =
ISD::LLRINT;
break;
6969 case Intrinsic::minnum:
6971 getValue(
I.getArgOperand(0)).getValueType(),
6975 case Intrinsic::maxnum:
6977 getValue(
I.getArgOperand(0)).getValueType(),
6981 case Intrinsic::minimum:
6983 getValue(
I.getArgOperand(0)).getValueType(),
6987 case Intrinsic::maximum:
6989 getValue(
I.getArgOperand(0)).getValueType(),
6993 case Intrinsic::minimumnum:
6995 getValue(
I.getArgOperand(0)).getValueType(),
6999 case Intrinsic::maximumnum:
7001 getValue(
I.getArgOperand(0)).getValueType(),
7005 case Intrinsic::copysign:
7007 getValue(
I.getArgOperand(0)).getValueType(),
7011 case Intrinsic::ldexp:
7013 getValue(
I.getArgOperand(0)).getValueType(),
7017 case Intrinsic::modf:
7018 case Intrinsic::sincos:
7019 case Intrinsic::sincospi:
7020 case Intrinsic::frexp: {
7022 switch (Intrinsic) {
7025 case Intrinsic::sincos:
7028 case Intrinsic::sincospi:
7031 case Intrinsic::modf:
7034 case Intrinsic::frexp:
7040 SDVTList VTs =
DAG.getVTList(ValueVTs);
7042 &
I,
DAG.getNode(Opcode, sdl, VTs,
getValue(
I.getArgOperand(0)), Flags));
7045 case Intrinsic::arithmetic_fence: {
7047 getValue(
I.getArgOperand(0)).getValueType(),
7051 case Intrinsic::fma:
7057#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
7058 case Intrinsic::INTRINSIC:
7059#include "llvm/IR/ConstrainedOps.def"
7062#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
7063#include "llvm/IR/VPIntrinsics.def"
7066 case Intrinsic::fptrunc_round: {
7070 std::optional<RoundingMode> RoundMode =
7078 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
7083 DAG.getTargetConstant((
int)*RoundMode, sdl, MVT::i32));
7088 case Intrinsic::fmuladd: {
7093 getValue(
I.getArgOperand(0)).getValueType(),
7100 getValue(
I.getArgOperand(0)).getValueType(),
7116 case Intrinsic::convert_to_fp16:
7120 DAG.getTargetConstant(0, sdl,
7123 case Intrinsic::convert_from_fp16:
7129 case Intrinsic::fptosi_sat: {
7136 case Intrinsic::fptoui_sat: {
7143 case Intrinsic::set_rounding:
7149 case Intrinsic::is_fpclass: {
7150 const DataLayout DLayout =
DAG.getDataLayout();
7152 EVT ArgVT = TLI.
getValueType(DLayout,
I.getArgOperand(0)->getType());
7155 MachineFunction &MF =
DAG.getMachineFunction();
7159 Flags.setNoFPExcept(
7160 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7176 case Intrinsic::get_fpenv: {
7177 const DataLayout DLayout =
DAG.getDataLayout();
7179 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7194 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7197 Chain =
DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7198 Res =
DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7204 case Intrinsic::set_fpenv: {
7205 const DataLayout DLayout =
DAG.getDataLayout();
7208 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7221 Chain =
DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7223 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7226 Chain =
DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7231 case Intrinsic::reset_fpenv:
7234 case Intrinsic::get_fpmode:
7243 case Intrinsic::set_fpmode:
7248 case Intrinsic::reset_fpmode: {
7253 case Intrinsic::pcmarker: {
7258 case Intrinsic::readcyclecounter: {
7261 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7266 case Intrinsic::readsteadycounter: {
7269 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7274 case Intrinsic::bitreverse:
7276 getValue(
I.getArgOperand(0)).getValueType(),
7279 case Intrinsic::bswap:
7281 getValue(
I.getArgOperand(0)).getValueType(),
7284 case Intrinsic::cttz: {
7292 case Intrinsic::ctlz: {
7300 case Intrinsic::ctpop: {
7306 case Intrinsic::fshl:
7307 case Intrinsic::fshr: {
7308 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7312 EVT VT =
X.getValueType();
7323 case Intrinsic::sadd_sat: {
7329 case Intrinsic::uadd_sat: {
7335 case Intrinsic::ssub_sat: {
7341 case Intrinsic::usub_sat: {
7347 case Intrinsic::sshl_sat:
7348 case Intrinsic::ushl_sat: {
7352 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
7357 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
7360 "Unexpected shift type");
7369 case Intrinsic::smul_fix:
7370 case Intrinsic::umul_fix:
7371 case Intrinsic::smul_fix_sat:
7372 case Intrinsic::umul_fix_sat: {
7380 case Intrinsic::sdiv_fix:
7381 case Intrinsic::udiv_fix:
7382 case Intrinsic::sdiv_fix_sat:
7383 case Intrinsic::udiv_fix_sat: {
7388 Op1, Op2, Op3,
DAG, TLI));
7391 case Intrinsic::smax: {
7397 case Intrinsic::smin: {
7403 case Intrinsic::umax: {
7409 case Intrinsic::umin: {
7415 case Intrinsic::abs: {
7421 case Intrinsic::scmp: {
7428 case Intrinsic::ucmp: {
7435 case Intrinsic::stacksave: {
7443 case Intrinsic::stackrestore:
7447 case Intrinsic::get_dynamic_area_offset: {
7456 case Intrinsic::stackguard: {
7457 MachineFunction &MF =
DAG.getMachineFunction();
7463 Res =
DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7467 LLVMContext &Ctx = *
DAG.getContext();
7468 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
7475 MachinePointerInfo(
Global, 0), Align,
7484 case Intrinsic::stackprotector: {
7486 MachineFunction &MF =
DAG.getMachineFunction();
7506 Chain, sdl, Src, FIN,
7513 case Intrinsic::objectsize:
7516 case Intrinsic::is_constant:
7519 case Intrinsic::annotation:
7520 case Intrinsic::ptr_annotation:
7521 case Intrinsic::launder_invariant_group:
7522 case Intrinsic::strip_invariant_group:
7527 case Intrinsic::type_test:
7528 case Intrinsic::public_type_test:
7532 case Intrinsic::assume:
7533 case Intrinsic::experimental_noalias_scope_decl:
7534 case Intrinsic::var_annotation:
7535 case Intrinsic::sideeffect:
7540 case Intrinsic::codeview_annotation: {
7542 MachineFunction &MF =
DAG.getMachineFunction();
7551 case Intrinsic::init_trampoline: {
7559 Ops[4] =
DAG.getSrcValue(
I.getArgOperand(0));
7567 case Intrinsic::adjust_trampoline:
7572 case Intrinsic::gcroot: {
7573 assert(
DAG.getMachineFunction().getFunction().hasGC() &&
7574 "only valid in functions with gc specified, enforced by Verifier");
7576 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7583 case Intrinsic::gcread:
7584 case Intrinsic::gcwrite:
7586 case Intrinsic::get_rounding:
7592 case Intrinsic::expect:
7593 case Intrinsic::expect_with_probability:
7599 case Intrinsic::ubsantrap:
7600 case Intrinsic::debugtrap:
7601 case Intrinsic::trap: {
7602 StringRef TrapFuncName =
7603 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7604 if (TrapFuncName.
empty()) {
7605 switch (Intrinsic) {
7606 case Intrinsic::trap:
7609 case Intrinsic::debugtrap:
7612 case Intrinsic::ubsantrap:
7615 DAG.getTargetConstant(
7621 DAG.addNoMergeSiteInfo(
DAG.getRoot().getNode(),
7622 I.hasFnAttr(Attribute::NoMerge));
7626 if (Intrinsic == Intrinsic::ubsantrap) {
7627 Value *Arg =
I.getArgOperand(0);
7631 TargetLowering::CallLoweringInfo CLI(
DAG);
7632 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7634 DAG.getExternalSymbol(TrapFuncName.
data(),
7637 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7643 case Intrinsic::allow_runtime_check:
7644 case Intrinsic::allow_ubsan_check:
7648 case Intrinsic::uadd_with_overflow:
7649 case Intrinsic::sadd_with_overflow:
7650 case Intrinsic::usub_with_overflow:
7651 case Intrinsic::ssub_with_overflow:
7652 case Intrinsic::umul_with_overflow:
7653 case Intrinsic::smul_with_overflow: {
7655 switch (Intrinsic) {
7657 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7658 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7659 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7660 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7661 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7662 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7670 SDVTList VTs =
DAG.getVTList(ResultVT, OverflowVT);
7674 case Intrinsic::prefetch: {
7689 std::nullopt, Flags);
7695 DAG.setRoot(Result);
7698 case Intrinsic::lifetime_start:
7699 case Intrinsic::lifetime_end: {
7700 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7706 if (!LifetimeObject)
7711 auto SI =
FuncInfo.StaticAllocaMap.find(LifetimeObject);
7712 if (SI ==
FuncInfo.StaticAllocaMap.end())
7716 Res =
DAG.getLifetimeNode(IsStart, sdl,
getRoot(), FrameIndex);
7720 case Intrinsic::pseudoprobe: {
7728 case Intrinsic::invariant_start:
7733 case Intrinsic::invariant_end:
7736 case Intrinsic::clear_cache: {
7741 {InputChain, StartVal, EndVal});
7746 case Intrinsic::donothing:
7747 case Intrinsic::seh_try_begin:
7748 case Intrinsic::seh_scope_begin:
7749 case Intrinsic::seh_try_end:
7750 case Intrinsic::seh_scope_end:
7753 case Intrinsic::experimental_stackmap:
7756 case Intrinsic::experimental_patchpoint_void:
7757 case Intrinsic::experimental_patchpoint:
7760 case Intrinsic::experimental_gc_statepoint:
7763 case Intrinsic::experimental_gc_result:
7766 case Intrinsic::experimental_gc_relocate:
7769 case Intrinsic::instrprof_cover:
7771 case Intrinsic::instrprof_increment:
7773 case Intrinsic::instrprof_timestamp:
7775 case Intrinsic::instrprof_value_profile:
7777 case Intrinsic::instrprof_mcdc_parameters:
7779 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7781 case Intrinsic::localescape: {
7782 MachineFunction &MF =
DAG.getMachineFunction();
7783 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
7787 for (
unsigned Idx = 0,
E =
I.arg_size(); Idx <
E; ++Idx) {
7793 "can only escape static allocas");
7798 TII->get(TargetOpcode::LOCAL_ESCAPE))
7806 case Intrinsic::localrecover: {
7808 MachineFunction &MF =
DAG.getMachineFunction();
7814 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7818 Value *
FP =
I.getArgOperand(1);
7824 SDValue OffsetSym =
DAG.getMCSymbol(FrameAllocSym, PtrVT);
7829 SDValue Add =
DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7835 case Intrinsic::fake_use: {
7836 Value *
V =
I.getArgOperand(0);
7841 auto FakeUseValue = [&]() ->
SDValue {
7855 if (!FakeUseValue || FakeUseValue.isUndef())
7858 Ops[1] = FakeUseValue;
7867 case Intrinsic::reloc_none: {
7872 DAG.getTargetExternalSymbol(
7878 case Intrinsic::eh_exceptionpointer:
7879 case Intrinsic::eh_exceptioncode: {
7885 SDValue N =
DAG.getCopyFromReg(
DAG.getEntryNode(), sdl, VReg, PtrVT);
7886 if (Intrinsic == Intrinsic::eh_exceptioncode)
7887 N =
DAG.getZExtOrTrunc(
N, sdl, MVT::i32);
7891 case Intrinsic::xray_customevent: {
7894 const auto &Triple =
DAG.getTarget().getTargetTriple();
7903 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7905 Ops.push_back(LogEntryVal);
7906 Ops.push_back(StrSizeVal);
7907 Ops.push_back(Chain);
7913 MachineSDNode *MN =
DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7916 DAG.setRoot(patchableNode);
7920 case Intrinsic::xray_typedevent: {
7923 const auto &Triple =
DAG.getTarget().getTargetTriple();
7935 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7937 Ops.push_back(LogTypeId);
7938 Ops.push_back(LogEntryVal);
7939 Ops.push_back(StrSizeVal);
7940 Ops.push_back(Chain);
7946 MachineSDNode *MN =
DAG.getMachineNode(
7947 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys,
Ops);
7949 DAG.setRoot(patchableNode);
7953 case Intrinsic::experimental_deoptimize:
7956 case Intrinsic::stepvector:
7959 case Intrinsic::vector_reduce_fadd:
7960 case Intrinsic::vector_reduce_fmul:
7961 case Intrinsic::vector_reduce_add:
7962 case Intrinsic::vector_reduce_mul:
7963 case Intrinsic::vector_reduce_and:
7964 case Intrinsic::vector_reduce_or:
7965 case Intrinsic::vector_reduce_xor:
7966 case Intrinsic::vector_reduce_smax:
7967 case Intrinsic::vector_reduce_smin:
7968 case Intrinsic::vector_reduce_umax:
7969 case Intrinsic::vector_reduce_umin:
7970 case Intrinsic::vector_reduce_fmax:
7971 case Intrinsic::vector_reduce_fmin:
7972 case Intrinsic::vector_reduce_fmaximum:
7973 case Intrinsic::vector_reduce_fminimum:
7974 visitVectorReduce(
I, Intrinsic);
7977 case Intrinsic::icall_branch_funnel: {
7983 I.getArgOperand(1),
Offset,
DAG.getDataLayout()));
7986 "llvm.icall.branch.funnel operand must be a GlobalValue");
7987 Ops.push_back(
DAG.getTargetGlobalAddress(
Base, sdl, MVT::i64, 0));
7989 struct BranchFunnelTarget {
7995 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7998 if (ElemBase !=
Base)
8000 "to the same GlobalValue");
8006 "llvm.icall.branch.funnel operand must be a GlobalValue");
8012 [](
const BranchFunnelTarget &
T1,
const BranchFunnelTarget &T2) {
8013 return T1.Offset < T2.Offset;
8016 for (
auto &
T : Targets) {
8017 Ops.push_back(
DAG.getTargetConstant(
T.Offset, sdl, MVT::i32));
8018 Ops.push_back(
T.Target);
8021 Ops.push_back(
DAG.getRoot());
8022 SDValue N(
DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
8031 case Intrinsic::wasm_landingpad_index:
8037 case Intrinsic::aarch64_settag:
8038 case Intrinsic::aarch64_settag_zero: {
8039 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
8040 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
8043 getValue(
I.getArgOperand(1)), MachinePointerInfo(
I.getArgOperand(0)),
8049 case Intrinsic::amdgcn_cs_chain: {
8054 Type *RetTy =
I.getType();
8064 for (
unsigned Idx : {2, 3, 1}) {
8065 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8067 Arg.setAttributes(&
I, Idx);
8068 Args.push_back(Arg);
8071 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
8072 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
8073 Args[2].IsInReg =
true;
8076 for (
unsigned Idx = 4; Idx <
I.arg_size(); ++Idx) {
8077 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8079 Arg.setAttributes(&
I, Idx);
8080 Args.push_back(Arg);
8083 TargetLowering::CallLoweringInfo CLI(
DAG);
8086 .setCallee(CC, RetTy, Callee, std::move(Args))
8089 .setConvergent(
I.isConvergent());
8091 std::pair<SDValue, SDValue>
Result =
8095 "Should've lowered as tail call");
8100 case Intrinsic::amdgcn_call_whole_wave: {
8102 bool isTailCall =
I.isTailCall();
8105 for (
unsigned Idx = 1; Idx <
I.arg_size(); ++Idx) {
8106 TargetLowering::ArgListEntry Arg(
getValue(
I.getArgOperand(Idx)),
8107 I.getArgOperand(Idx)->getType());
8108 Arg.setAttributes(&
I, Idx);
8115 Args.push_back(Arg);
8120 auto *Token = Bundle->Inputs[0].get();
8121 ConvControlToken =
getValue(Token);
8124 TargetLowering::CallLoweringInfo CLI(
DAG);
8128 getValue(
I.getArgOperand(0)), std::move(Args))
8132 .setConvergent(
I.isConvergent())
8133 .setConvergenceControlToken(ConvControlToken);
8136 std::pair<SDValue, SDValue>
Result =
8139 if (
Result.first.getNode())
8143 case Intrinsic::ptrmask: {
8159 auto HighOnes =
DAG.getNode(
8160 ISD::SHL, sdl, PtrVT,
DAG.getAllOnesConstant(sdl, PtrVT),
8161 DAG.getShiftAmountConstant(
Mask.getValueType().getFixedSizeInBits(),
8164 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8165 }
else if (
Mask.getValueType() != PtrVT)
8166 Mask =
DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8172 case Intrinsic::threadlocal_address: {
8176 case Intrinsic::get_active_lane_mask: {
8180 EVT ElementVT =
Index.getValueType();
8191 SDValue VectorIndex =
DAG.getSplat(VecTy, sdl, Index);
8192 SDValue VectorTripCount =
DAG.getSplat(VecTy, sdl, TripCount);
8193 SDValue VectorStep =
DAG.getStepVector(sdl, VecTy);
8196 SDValue SetCC =
DAG.getSetCC(sdl, CCVT, VectorInduction,
8201 case Intrinsic::experimental_get_vector_length: {
8203 "Expected positive VF");
8208 EVT CountVT =
Count.getValueType();
8211 visitTargetIntrinsic(
I, Intrinsic);
8220 if (CountVT.
bitsLT(VT)) {
8225 SDValue MaxEVL =
DAG.getElementCount(sdl, CountVT,
8235 case Intrinsic::vector_partial_reduce_add: {
8243 case Intrinsic::vector_partial_reduce_fadd: {
8251 case Intrinsic::experimental_cttz_elts: {
8254 EVT OpVT =
Op.getValueType();
8257 visitTargetIntrinsic(
I, Intrinsic);
8273 ConstantRange VScaleRange(1,
true);
8302 case Intrinsic::vector_insert: {
8310 if (
Index.getValueType() != VectorIdxTy)
8311 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8318 case Intrinsic::vector_extract: {
8326 if (
Index.getValueType() != VectorIdxTy)
8327 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8333 case Intrinsic::experimental_vector_match: {
8339 EVT ResVT =
Mask.getValueType();
8345 visitTargetIntrinsic(
I, Intrinsic);
8349 SDValue Ret =
DAG.getConstant(0, sdl, ResVT);
8351 for (
unsigned i = 0; i < SearchSize; ++i) {
8354 DAG.getVectorIdxConstant(i, sdl));
8357 Ret =
DAG.getNode(
ISD::OR, sdl, ResVT, Ret, Cmp);
8363 case Intrinsic::vector_reverse:
8364 visitVectorReverse(
I);
8366 case Intrinsic::vector_splice:
8367 visitVectorSplice(
I);
8369 case Intrinsic::callbr_landingpad:
8370 visitCallBrLandingPad(
I);
8372 case Intrinsic::vector_interleave2:
8373 visitVectorInterleave(
I, 2);
8375 case Intrinsic::vector_interleave3:
8376 visitVectorInterleave(
I, 3);
8378 case Intrinsic::vector_interleave4:
8379 visitVectorInterleave(
I, 4);
8381 case Intrinsic::vector_interleave5:
8382 visitVectorInterleave(
I, 5);
8384 case Intrinsic::vector_interleave6:
8385 visitVectorInterleave(
I, 6);
8387 case Intrinsic::vector_interleave7:
8388 visitVectorInterleave(
I, 7);
8390 case Intrinsic::vector_interleave8:
8391 visitVectorInterleave(
I, 8);
8393 case Intrinsic::vector_deinterleave2:
8394 visitVectorDeinterleave(
I, 2);
8396 case Intrinsic::vector_deinterleave3:
8397 visitVectorDeinterleave(
I, 3);
8399 case Intrinsic::vector_deinterleave4:
8400 visitVectorDeinterleave(
I, 4);
8402 case Intrinsic::vector_deinterleave5:
8403 visitVectorDeinterleave(
I, 5);
8405 case Intrinsic::vector_deinterleave6:
8406 visitVectorDeinterleave(
I, 6);
8408 case Intrinsic::vector_deinterleave7:
8409 visitVectorDeinterleave(
I, 7);
8411 case Intrinsic::vector_deinterleave8:
8412 visitVectorDeinterleave(
I, 8);
8414 case Intrinsic::experimental_vector_compress:
8416 getValue(
I.getArgOperand(0)).getValueType(),
8421 case Intrinsic::experimental_convergence_anchor:
8422 case Intrinsic::experimental_convergence_entry:
8423 case Intrinsic::experimental_convergence_loop:
8424 visitConvergenceControl(
I, Intrinsic);
8426 case Intrinsic::experimental_vector_histogram_add: {
8427 visitVectorHistogram(
I, Intrinsic);
8430 case Intrinsic::experimental_vector_extract_last_active: {
8431 visitVectorExtractLastActive(
I, Intrinsic);
8434 case Intrinsic::loop_dependence_war_mask:
8439 DAG.getConstant(0, sdl, MVT::i64)));
8441 case Intrinsic::loop_dependence_raw_mask:
8446 DAG.getConstant(0, sdl, MVT::i64)));
8451void SelectionDAGBuilder::pushFPOpOutChain(
SDValue Result,
8467 PendingConstrainedFP.push_back(OutChain);
8470 PendingConstrainedFPStrict.push_back(OutChain);
8475void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8489 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8491 SDVTList VTs =
DAG.getVTList(VT, MVT::Other);
8495 Flags.setNoFPExcept(
true);
8498 Flags.copyFMF(*FPOp);
8503#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8504 case Intrinsic::INTRINSIC: \
8505 Opcode = ISD::STRICT_##DAGN; \
8507#include "llvm/IR/ConstrainedOps.def"
8508 case Intrinsic::experimental_constrained_fmuladd: {
8515 pushFPOpOutChain(
Mul, EB);
8538 if (TM.Options.NoNaNsFPMath)
8546 pushFPOpOutChain(Result, EB);
8553 std::optional<unsigned> ResOPC;
8555 case Intrinsic::vp_ctlz: {
8557 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8560 case Intrinsic::vp_cttz: {
8562 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8565 case Intrinsic::vp_cttz_elts: {
8567 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8570#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8571 case Intrinsic::VPID: \
8572 ResOPC = ISD::VPSD; \
8574#include "llvm/IR/VPIntrinsics.def"
8579 "Inconsistency: no SDNode available for this VPIntrinsic!");
8581 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8582 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8584 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8585 : ISD::VP_REDUCE_FMUL;
8591void SelectionDAGBuilder::visitVPLoad(
8603 Alignment =
DAG.getEVTAlign(VT);
8606 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8607 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8610 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8611 MachinePointerInfo(PtrOperand), MMOFlags,
8613 LD =
DAG.getLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8620void SelectionDAGBuilder::visitVPLoadFF(
8623 assert(OpValues.
size() == 3 &&
"Unexpected number of operands");
8633 Alignment =
DAG.getEVTAlign(VT);
8636 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8637 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8640 LD =
DAG.getLoadFFVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8645 setValue(&VPIntrin,
DAG.getMergeValues({LD.getValue(0), Trunc},
DL));
8648void SelectionDAGBuilder::visitVPGather(
8652 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8664 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8666 *Alignment, AAInfo, Ranges);
8676 EVT IdxVT =
Index.getValueType();
8682 LD =
DAG.getGatherVP(
8683 DAG.getVTList(VT, MVT::Other), VT,
DL,
8684 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8690void SelectionDAGBuilder::visitVPStore(
8694 EVT VT = OpValues[0].getValueType();
8699 Alignment =
DAG.getEVTAlign(VT);
8702 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8705 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8706 MachinePointerInfo(PtrOperand), MMOFlags,
8715void SelectionDAGBuilder::visitVPScatter(
8718 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8720 EVT VT = OpValues[0].getValueType();
8730 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8732 *Alignment, AAInfo);
8742 EVT IdxVT =
Index.getValueType();
8748 ST =
DAG.getScatterVP(
DAG.getVTList(MVT::Other), VT,
DL,
8749 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8750 OpValues[2], OpValues[3]},
8756void SelectionDAGBuilder::visitVPStridedLoad(
8768 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8770 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8773 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8775 *Alignment, AAInfo, Ranges);
8777 SDValue LD =
DAG.getStridedLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1],
8778 OpValues[2], OpValues[3], MMO,
8786void SelectionDAGBuilder::visitVPStridedStore(
8790 EVT VT = OpValues[0].getValueType();
8796 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8799 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8801 *Alignment, AAInfo);
8805 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8813void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8814 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8825 if (TM.Options.NoNaNsFPMath)
8838 "Unexpected target EVL type");
8841 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8844 DAG.getSetCCVP(
DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8847void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8855 return visitVPCmp(*CmpI);
8858 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8860 SDVTList VTs =
DAG.getVTList(ValueVTs);
8866 "Unexpected target EVL type");
8870 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8872 if (
I == EVLParamPos)
8879 SDNodeFlags SDFlags;
8887 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8889 case ISD::VP_LOAD_FF:
8890 visitVPLoadFF(VPIntrin, ValueVTs[0], ValueVTs[1], OpValues);
8892 case ISD::VP_GATHER:
8893 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8895 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8896 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8899 visitVPStore(VPIntrin, OpValues);
8901 case ISD::VP_SCATTER:
8902 visitVPScatter(VPIntrin, OpValues);
8904 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8905 visitVPStridedStore(VPIntrin, OpValues);
8907 case ISD::VP_FMULADD: {
8908 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8909 SDNodeFlags SDFlags;
8914 setValue(&VPIntrin,
DAG.getNode(ISD::VP_FMA,
DL, VTs, OpValues, SDFlags));
8917 ISD::VP_FMUL,
DL, VTs,
8918 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8920 DAG.getNode(ISD::VP_FADD,
DL, VTs,
8921 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8926 case ISD::VP_IS_FPCLASS: {
8927 const DataLayout DLayout =
DAG.getDataLayout();
8929 auto Constant = OpValues[1]->getAsZExtVal();
8932 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8936 case ISD::VP_INTTOPTR: {
8947 case ISD::VP_PTRTOINT: {
8949 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8962 case ISD::VP_CTLZ_ZERO_UNDEF:
8964 case ISD::VP_CTTZ_ZERO_UNDEF:
8965 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8966 case ISD::VP_CTTZ_ELTS: {
8968 DAG.getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8978 MachineFunction &MF =
DAG.getMachineFunction();
8986 unsigned CallSiteIndex =
FuncInfo.getCurrentCallSite();
8987 if (CallSiteIndex) {
9001 assert(BeginLabel &&
"BeginLabel should've been set");
9003 MachineFunction &MF =
DAG.getMachineFunction();
9015 assert(
II &&
"II should've been set");
9026std::pair<SDValue, SDValue>
9040 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
9043 "Non-null chain expected with non-tail call!");
9044 assert((Result.second.getNode() || !Result.first.getNode()) &&
9045 "Null value expected with tail call!");
9047 if (!Result.second.getNode()) {
9054 PendingExports.clear();
9056 DAG.setRoot(Result.second);
9074 if (!isMustTailCall &&
9075 Caller->getFnAttribute(
"disable-tail-calls").getValueAsBool())
9081 if (
DAG.getTargetLoweringInfo().supportSwiftError() &&
9082 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
9091 bool isTailCall,
bool isMustTailCall,
9094 auto &
DL =
DAG.getDataLayout();
9101 const Value *SwiftErrorVal =
nullptr;
9108 const Value *V = *
I;
9111 if (V->getType()->isEmptyTy())
9116 Entry.setAttributes(&CB,
I - CB.
arg_begin());
9128 Args.push_back(Entry);
9139 Value *V = Bundle->Inputs[0];
9141 Entry.IsCFGuardTarget =
true;
9142 Args.push_back(Entry);
9155 "Target doesn't support calls with kcfi operand bundles.");
9163 auto *Token = Bundle->Inputs[0].get();
9164 ConvControlToken =
getValue(Token);
9175 .
setCallee(RetTy, FTy, Callee, std::move(Args), CB)
9188 "This target doesn't support calls with ptrauth operand bundles.");
9192 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
9194 if (Result.first.getNode()) {
9209 DAG.setRoot(CopyNode);
9225 LoadTy, Builder.DAG.getDataLayout()))
9226 return Builder.getValue(LoadCst);
9232 bool ConstantMemory =
false;
9235 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9236 Root = Builder.DAG.getEntryNode();
9237 ConstantMemory =
true;
9240 Root = Builder.DAG.getRoot();
9245 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
9248 if (!ConstantMemory)
9249 Builder.PendingLoads.push_back(LoadVal.
getValue(1));
9255void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
9258 EVT VT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9269bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
9270 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
9271 const Value *
Size =
I.getArgOperand(2);
9274 EVT CallVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9280 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9284 if (Res.first.getNode()) {
9285 processIntegerCallValue(
I, Res.first,
true);
9299 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
9300 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
9322 switch (NumBitsToCompare) {
9334 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9347 LoadL =
DAG.getBitcast(CmpVT, LoadL);
9348 LoadR =
DAG.getBitcast(CmpVT, LoadR);
9352 processIntegerCallValue(
I, Cmp,
false);
9361bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9362 const Value *Src =
I.getArgOperand(0);
9363 const Value *
Char =
I.getArgOperand(1);
9364 const Value *
Length =
I.getArgOperand(2);
9366 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9367 std::pair<SDValue, SDValue> Res =
9370 MachinePointerInfo(Src));
9371 if (Res.first.getNode()) {
9385bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9390 Align DstAlign =
DAG.InferPtrAlign(Dst).valueOrOne();
9391 Align SrcAlign =
DAG.InferPtrAlign(Src).valueOrOne();
9393 Align Alignment = std::min(DstAlign, SrcAlign);
9402 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9403 std::nullopt, MachinePointerInfo(
I.getArgOperand(0)),
9404 MachinePointerInfo(
I.getArgOperand(1)),
I.getAAMetadata());
9406 "** memcpy should not be lowered as TailCall in mempcpy context **");
9410 Size =
DAG.getSExtOrTrunc(
Size, sdl, Dst.getValueType());
9423bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9424 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9426 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9427 std::pair<SDValue, SDValue> Res =
9430 MachinePointerInfo(Arg0),
9431 MachinePointerInfo(Arg1), isStpcpy);
9432 if (Res.first.getNode()) {
9434 DAG.setRoot(Res.second);
9446bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9447 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9449 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9450 std::pair<SDValue, SDValue> Res =
9453 MachinePointerInfo(Arg0),
9454 MachinePointerInfo(Arg1));
9455 if (Res.first.getNode()) {
9456 processIntegerCallValue(
I, Res.first,
true);
9469bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9470 const Value *Arg0 =
I.getArgOperand(0);
9472 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9475 if (Res.first.getNode()) {
9476 processIntegerCallValue(
I, Res.first,
false);
9489bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9490 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9492 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9493 std::pair<SDValue, SDValue> Res =
9496 MachinePointerInfo(Arg0));
9497 if (Res.first.getNode()) {
9498 processIntegerCallValue(
I, Res.first,
false);
9511bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9516 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9533bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9538 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9551void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9553 if (
I.isInlineAsm()) {
9560 if (Function *
F =
I.getCalledFunction()) {
9561 if (
F->isDeclaration()) {
9563 if (
unsigned IID =
F->getIntrinsicID()) {
9564 visitIntrinsicCall(
I, IID);
9575 if (!
I.isNoBuiltin() && !
F->hasLocalLinkage() &&
F->hasName() &&
9576 LibInfo->getLibFunc(*
F, Func) &&
LibInfo->hasOptimizedCodeGen(Func)) {
9580 if (visitMemCmpBCmpCall(
I))
9583 case LibFunc_copysign:
9584 case LibFunc_copysignf:
9585 case LibFunc_copysignl:
9588 if (
I.onlyReadsMemory()) {
9640 case LibFunc_atan2f:
9641 case LibFunc_atan2l:
9666 case LibFunc_sqrt_finite:
9667 case LibFunc_sqrtf_finite:
9668 case LibFunc_sqrtl_finite:
9685 case LibFunc_exp10f:
9686 case LibFunc_exp10l:
9691 case LibFunc_ldexpf:
9692 case LibFunc_ldexpl:
9696 case LibFunc_memcmp:
9697 if (visitMemCmpBCmpCall(
I))
9700 case LibFunc_mempcpy:
9701 if (visitMemPCpyCall(
I))
9704 case LibFunc_memchr:
9705 if (visitMemChrCall(
I))
9708 case LibFunc_strcpy:
9709 if (visitStrCpyCall(
I,
false))
9712 case LibFunc_stpcpy:
9713 if (visitStrCpyCall(
I,
true))
9716 case LibFunc_strcmp:
9717 if (visitStrCmpCall(
I))
9720 case LibFunc_strlen:
9721 if (visitStrLenCall(
I))
9724 case LibFunc_strnlen:
9725 if (visitStrNLenCall(
I))
9749 if (
I.hasDeoptState())
9766 const Value *Discriminator = PAB->Inputs[1];
9768 assert(
Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9769 assert(Discriminator->getType()->isIntegerTy(64) &&
9770 "Invalid ptrauth discriminator");
9775 if (CalleeCPA->isKnownCompatibleWith(
Key, Discriminator,
9776 DAG.getDataLayout()))
9816 for (
const auto &Code : Codes)
9831 SDISelAsmOperandInfo &MatchingOpInfo,
9833 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9839 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9841 OpInfo.ConstraintVT);
9842 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9844 MatchingOpInfo.ConstraintVT);
9845 const bool OutOpIsIntOrFP =
9846 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9847 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9848 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9849 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9852 " with a matching output constraint of"
9853 " incompatible type!");
9855 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9862 SDISelAsmOperandInfo &OpInfo,
9875 const Value *OpVal = OpInfo.CallOperandVal;
9893 DL.getPrefTypeAlign(Ty),
false,
9896 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9899 OpInfo.CallOperand = StackSlot;
9912static std::optional<unsigned>
9914 SDISelAsmOperandInfo &OpInfo,
9915 SDISelAsmOperandInfo &RefOpInfo) {
9926 return std::nullopt;
9930 unsigned AssignedReg;
9933 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9936 return std::nullopt;
9941 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9943 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9952 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9957 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9962 OpInfo.CallOperand =
9964 OpInfo.ConstraintVT = RegVT;
9968 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9971 OpInfo.CallOperand =
9973 OpInfo.ConstraintVT = VT;
9980 if (OpInfo.isMatchingInputConstraint())
9981 return std::nullopt;
9983 EVT ValueVT = OpInfo.ConstraintVT;
9984 if (OpInfo.ConstraintVT == MVT::Other)
9988 unsigned NumRegs = 1;
9989 if (OpInfo.ConstraintVT != MVT::Other)
10004 I = std::find(
I, RC->
end(), AssignedReg);
10005 if (
I == RC->
end()) {
10008 return {AssignedReg};
10012 for (; NumRegs; --NumRegs, ++
I) {
10013 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
10018 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
10019 return std::nullopt;
10024 const std::vector<SDValue> &AsmNodeOperands) {
10027 for (; OperandNo; --OperandNo) {
10029 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
10032 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
10033 "Skipped past definitions?");
10034 CurOp +=
F.getNumOperandRegisters() + 1;
10042 unsigned Flags = 0;
10045 explicit ExtraFlags(
const CallBase &
Call) {
10047 if (
IA->hasSideEffects())
10049 if (
IA->isAlignStack())
10056 void update(
const TargetLowering::AsmOperandInfo &OpInfo) {
10072 unsigned get()
const {
return Flags; }
10095void SelectionDAGBuilder::visitInlineAsm(
const CallBase &
Call,
10102 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10104 DAG.getDataLayout(),
DAG.getSubtarget().getRegisterInfo(),
Call);
10108 bool HasSideEffect =
IA->hasSideEffects();
10109 ExtraFlags ExtraInfo(
Call);
10111 for (
auto &
T : TargetConstraints) {
10112 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
10113 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
10115 if (OpInfo.CallOperandVal)
10116 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
10118 if (!HasSideEffect)
10119 HasSideEffect = OpInfo.hasMemory(TLI);
10131 return emitInlineAsmError(
Call,
"constraint '" + Twine(
T.ConstraintCode) +
10132 "' expects an integer constant "
10135 ExtraInfo.update(
T);
10143 if (EmitEHLabels) {
10144 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
10148 if (IsCallBr || EmitEHLabels) {
10156 if (EmitEHLabels) {
10157 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10162 IA->collectAsmStrs(AsmStrs);
10165 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10173 if (OpInfo.hasMatchingInput()) {
10174 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10205 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
10208 OpInfo.isIndirect =
false;
10215 !OpInfo.isIndirect) {
10216 assert((OpInfo.isMultipleAlternative ||
10218 "Can only indirectify direct input operands!");
10224 OpInfo.CallOperandVal =
nullptr;
10227 OpInfo.isIndirect =
true;
10233 std::vector<SDValue> AsmNodeOperands;
10234 AsmNodeOperands.push_back(
SDValue());
10235 AsmNodeOperands.push_back(
DAG.getTargetExternalSymbol(
10242 AsmNodeOperands.push_back(
DAG.getMDNode(SrcLoc));
10246 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10251 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10253 SDISelAsmOperandInfo &RefOpInfo =
10254 OpInfo.isMatchingInputConstraint()
10255 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10257 const auto RegError =
10260 const MachineFunction &MF =
DAG.getMachineFunction();
10262 const char *
RegName =
TRI.getName(*RegError);
10263 emitInlineAsmError(
Call,
"register '" + Twine(
RegName) +
10264 "' allocated for constraint '" +
10265 Twine(OpInfo.ConstraintCode) +
10266 "' does not match required type");
10270 auto DetectWriteToReservedRegister = [&]() {
10271 const MachineFunction &MF =
DAG.getMachineFunction();
10276 emitInlineAsmError(
Call,
"write to reserved register '" +
10285 !OpInfo.isMatchingInputConstraint())) &&
10286 "Only address as input operand is allowed.");
10288 switch (OpInfo.Type) {
10294 "Failed to convert memory constraint code to constraint id.");
10298 OpFlags.setMemConstraint(ConstraintID);
10299 AsmNodeOperands.push_back(
DAG.getTargetConstant(OpFlags,
getCurSDLoc(),
10301 AsmNodeOperands.push_back(OpInfo.CallOperand);
10306 if (OpInfo.AssignedRegs.
Regs.empty()) {
10307 emitInlineAsmError(
10308 Call,
"couldn't allocate output register for constraint '" +
10309 Twine(OpInfo.ConstraintCode) +
"'");
10313 if (DetectWriteToReservedRegister())
10327 SDValue InOperandVal = OpInfo.CallOperand;
10329 if (OpInfo.isMatchingInputConstraint()) {
10334 InlineAsm::Flag
Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10335 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
10336 if (OpInfo.isIndirect) {
10338 emitInlineAsmError(
Call,
"inline asm not supported yet: "
10339 "don't know how to handle tied "
10340 "indirect register inputs");
10345 MachineFunction &MF =
DAG.getMachineFunction();
10350 MVT RegVT =
R->getSimpleValueType(0);
10351 const TargetRegisterClass *RC =
10354 :
TRI.getMinimalPhysRegClass(TiedReg);
10355 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
10358 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.
getValueType());
10362 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &
Call);
10364 OpInfo.getMatchedOperand(), dl,
DAG,
10369 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
10370 assert(
Flag.getNumOperandRegisters() == 1 &&
10371 "Unexpected number of operands");
10374 Flag.clearMemConstraint();
10375 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10376 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10378 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10389 std::vector<SDValue>
Ops;
10395 emitInlineAsmError(
Call,
"value out of range for constraint '" +
10396 Twine(OpInfo.ConstraintCode) +
"'");
10400 emitInlineAsmError(
Call,
10401 "invalid operand for inline asm constraint '" +
10402 Twine(OpInfo.ConstraintCode) +
"'");
10408 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10415 assert((OpInfo.isIndirect ||
10417 "Operand must be indirect to be a mem!");
10420 "Memory operands expect pointer values");
10425 "Failed to convert memory constraint code to constraint id.");
10429 ResOpType.setMemConstraint(ConstraintID);
10430 AsmNodeOperands.push_back(
DAG.getTargetConstant(ResOpType,
10433 AsmNodeOperands.push_back(InOperandVal);
10441 "Failed to convert memory constraint code to constraint id.");
10445 SDValue AsmOp = InOperandVal;
10449 AsmOp =
DAG.getTargetGlobalAddress(GA->getGlobal(),
getCurSDLoc(),
10455 ResOpType.setMemConstraint(ConstraintID);
10457 AsmNodeOperands.push_back(
10460 AsmNodeOperands.push_back(AsmOp);
10466 emitInlineAsmError(
Call,
"unknown asm constraint '" +
10467 Twine(OpInfo.ConstraintCode) +
"'");
10472 if (OpInfo.isIndirect) {
10473 emitInlineAsmError(
10474 Call,
"Don't know how to handle indirect register inputs yet "
10475 "for constraint '" +
10476 Twine(OpInfo.ConstraintCode) +
"'");
10481 if (OpInfo.AssignedRegs.
Regs.empty()) {
10482 emitInlineAsmError(
Call,
10483 "couldn't allocate input reg for constraint '" +
10484 Twine(OpInfo.ConstraintCode) +
"'");
10488 if (DetectWriteToReservedRegister())
10497 0, dl,
DAG, AsmNodeOperands);
10503 if (!OpInfo.AssignedRegs.
Regs.empty())
10513 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10517 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10529 ResultTypes = StructResult->elements();
10530 else if (!CallResultType->
isVoidTy())
10531 ResultTypes =
ArrayRef(CallResultType);
10533 auto CurResultType = ResultTypes.
begin();
10534 auto handleRegAssign = [&](
SDValue V) {
10535 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10536 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10537 EVT ResultVT = TLI.
getValueType(
DAG.getDataLayout(), *CurResultType);
10549 if (ResultVT !=
V.getValueType() &&
10552 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10553 V.getValueType().isInteger()) {
10559 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10565 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10569 if (OpInfo.AssignedRegs.
Regs.empty())
10572 switch (OpInfo.ConstraintType) {
10576 Chain, &Glue, &
Call);
10588 assert(
false &&
"Unexpected unknown constraint");
10592 if (OpInfo.isIndirect) {
10593 const Value *Ptr = OpInfo.CallOperandVal;
10594 assert(Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10596 MachinePointerInfo(Ptr));
10603 handleRegAssign(V);
10605 handleRegAssign(Val);
10611 if (!ResultValues.
empty()) {
10612 assert(CurResultType == ResultTypes.
end() &&
10613 "Mismatch in number of ResultTypes");
10615 "Mismatch in number of output operands in asm result");
10618 DAG.getVTList(ResultVTs), ResultValues);
10623 if (!OutChains.
empty())
10626 if (EmitEHLabels) {
10631 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10633 DAG.setRoot(Chain);
10636void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &
Call,
10637 const Twine &Message) {
10638 LLVMContext &Ctx = *
DAG.getContext();
10642 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10646 if (ValueVTs.
empty())
10650 for (
const EVT &VT : ValueVTs)
10651 Ops.push_back(
DAG.getUNDEF(VT));
10656void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10660 DAG.getSrcValue(
I.getArgOperand(0))));
10663void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10664 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10665 const DataLayout &
DL =
DAG.getDataLayout();
10669 DL.getABITypeAlign(
I.getType()).value());
10670 DAG.setRoot(
V.getValue(1));
10672 if (
I.getType()->isPointerTy())
10673 V =
DAG.getPtrExtOrTrunc(
10678void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10682 DAG.getSrcValue(
I.getArgOperand(0))));
10685void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10690 DAG.getSrcValue(
I.getArgOperand(0)),
10691 DAG.getSrcValue(
I.getArgOperand(1))));
10697 std::optional<ConstantRange> CR =
getRange(
I);
10699 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10702 APInt Lo = CR->getUnsignedMin();
10703 if (!
Lo.isMinValue())
10706 APInt Hi = CR->getUnsignedMax();
10707 unsigned Bits = std::max(
Hi.getActiveBits(),
10715 DAG.getValueType(SmallVT));
10716 unsigned NumVals =
Op.getNode()->getNumValues();
10722 Ops.push_back(ZExt);
10723 for (
unsigned I = 1;
I != NumVals; ++
I)
10724 Ops.push_back(
Op.getValue(
I));
10726 return DAG.getMergeValues(
Ops,
SL);
10736 SDValue TestConst =
DAG.getTargetConstant(Classes,
SDLoc(), MVT::i32);
10744 for (
unsigned I = 0, E =
Ops.size();
I != E; ++
I) {
10747 MergeOp, TestConst);
10750 return DAG.getMergeValues(
Ops,
SL);
10761 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10764 Args.reserve(NumArgs);
10768 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10769 ArgI != ArgE; ++ArgI) {
10770 const Value *V =
Call->getOperand(ArgI);
10772 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10775 Entry.setAttributes(
Call, ArgI);
10776 Args.push_back(Entry);
10781 .
setCallee(
Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10810 for (
unsigned I = StartIdx;
I <
Call.arg_size();
I++) {
10819 Ops.push_back(Builder.getValue(
Call.getArgOperand(
I)));
10825void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10851 Ops.push_back(Chain);
10852 Ops.push_back(InGlue);
10859 assert(
ID.getValueType() == MVT::i64);
10861 DAG.getTargetConstant(
ID->getAsZExtVal(),
DL,
ID.getValueType());
10862 Ops.push_back(IDConst);
10868 Ops.push_back(ShadConst);
10874 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10878 Chain =
DAG.getCALLSEQ_END(Chain, 0, 0, InGlue,
DL);
10883 DAG.setRoot(Chain);
10886 FuncInfo.MF->getFrameInfo().setHasStackMap();
10890void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10907 Callee =
DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10910 Callee =
DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10911 SDLoc(SymbolicCallee),
10912 SymbolicCallee->getValueType(0));
10922 "Not enough arguments provided to the patchpoint intrinsic");
10925 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10929 TargetLowering::CallLoweringInfo CLI(
DAG);
10934 SDNode *CallEnd =
Result.second.getNode();
10943 "Expected a callseq node.");
10945 bool HasGlue =
Call->getGluedNode();
10970 Ops.push_back(Callee);
10976 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10977 Ops.push_back(
DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10980 Ops.push_back(
DAG.getTargetConstant((
unsigned)CC, dl, MVT::i32));
10985 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10996 if (IsAnyRegCC && HasDef) {
10998 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11001 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
11006 NodeTys =
DAG.getVTList(ValueVTs);
11008 NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
11025 if (IsAnyRegCC && HasDef) {
11028 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
11034 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
11037void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
11039 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11042 if (
I.arg_size() > 1)
11047 SDNodeFlags SDFlags;
11051 switch (Intrinsic) {
11052 case Intrinsic::vector_reduce_fadd:
11060 case Intrinsic::vector_reduce_fmul:
11068 case Intrinsic::vector_reduce_add:
11071 case Intrinsic::vector_reduce_mul:
11074 case Intrinsic::vector_reduce_and:
11077 case Intrinsic::vector_reduce_or:
11080 case Intrinsic::vector_reduce_xor:
11083 case Intrinsic::vector_reduce_smax:
11086 case Intrinsic::vector_reduce_smin:
11089 case Intrinsic::vector_reduce_umax:
11092 case Intrinsic::vector_reduce_umin:
11095 case Intrinsic::vector_reduce_fmax:
11098 case Intrinsic::vector_reduce_fmin:
11101 case Intrinsic::vector_reduce_fmaximum:
11104 case Intrinsic::vector_reduce_fminimum:
11118 Attrs.push_back(Attribute::SExt);
11120 Attrs.push_back(Attribute::ZExt);
11122 Attrs.push_back(Attribute::InReg);
11124 return AttributeList::get(CLI.
RetTy->
getContext(), AttributeList::ReturnIndex,
11132std::pair<SDValue, SDValue>
11146 "Only supported for non-aggregate returns");
11149 for (
Type *Ty : RetOrigTys)
11158 RetOrigTys.
swap(OldRetOrigTys);
11159 RetVTs.
swap(OldRetVTs);
11160 Offsets.swap(OldOffsets);
11162 for (
size_t i = 0, e = OldRetVTs.
size(); i != e; ++i) {
11163 EVT RetVT = OldRetVTs[i];
11167 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
11168 RetOrigTys.
append(NumRegs, OldRetOrigTys[i]);
11169 RetVTs.
append(NumRegs, RegisterVT);
11170 for (
unsigned j = 0; j != NumRegs; ++j)
11183 int DemoteStackIdx = -100;
11196 ArgListEntry Entry(DemoteStackSlot, StackSlotPtrType);
11197 Entry.IsSRet =
true;
11198 Entry.Alignment = Alignment;
11210 for (
unsigned I = 0, E = RetVTs.
size();
I != E; ++
I) {
11212 if (NeedsRegBlock) {
11213 Flags.setInConsecutiveRegs();
11214 if (
I == RetVTs.
size() - 1)
11215 Flags.setInConsecutiveRegsLast();
11217 EVT VT = RetVTs[
I];
11221 for (
unsigned i = 0; i != NumRegs; ++i) {
11235 CLI.
Ins.push_back(Ret);
11244 if (Arg.IsSwiftError) {
11250 CLI.
Ins.push_back(Ret);
11258 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
11262 Type *FinalType = Args[i].Ty;
11263 if (Args[i].IsByVal)
11264 FinalType = Args[i].IndirectType;
11267 for (
unsigned Value = 0, NumValues = OrigArgTys.
size();
Value != NumValues;
11270 Type *ArgTy = OrigArgTy;
11271 if (Args[i].Ty != Args[i].OrigTy) {
11272 assert(
Value == 0 &&
"Only supported for non-aggregate arguments");
11273 ArgTy = Args[i].Ty;
11278 Args[i].Node.getResNo() +
Value);
11285 Flags.setOrigAlign(OriginalAlignment);
11290 Flags.setPointer();
11293 if (Args[i].IsZExt)
11295 if (Args[i].IsSExt)
11297 if (Args[i].IsNoExt)
11299 if (Args[i].IsInReg) {
11306 Flags.setHvaStart();
11312 if (Args[i].IsSRet)
11314 if (Args[i].IsSwiftSelf)
11315 Flags.setSwiftSelf();
11316 if (Args[i].IsSwiftAsync)
11317 Flags.setSwiftAsync();
11318 if (Args[i].IsSwiftError)
11319 Flags.setSwiftError();
11320 if (Args[i].IsCFGuardTarget)
11321 Flags.setCFGuardTarget();
11322 if (Args[i].IsByVal)
11324 if (Args[i].IsByRef)
11326 if (Args[i].IsPreallocated) {
11327 Flags.setPreallocated();
11335 if (Args[i].IsInAlloca) {
11336 Flags.setInAlloca();
11345 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11346 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11347 Flags.setByValSize(FrameSize);
11350 if (
auto MA = Args[i].Alignment)
11354 }
else if (
auto MA = Args[i].Alignment) {
11357 MemAlign = OriginalAlignment;
11359 Flags.setMemAlign(MemAlign);
11360 if (Args[i].IsNest)
11363 Flags.setInConsecutiveRegs();
11366 unsigned NumParts =
11371 if (Args[i].IsSExt)
11373 else if (Args[i].IsZExt)
11378 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11383 Args[i].Ty->getPointerAddressSpace())) &&
11384 RetVTs.
size() == NumValues &&
"unexpected use of 'returned'");
11397 CLI.
RetZExt == Args[i].IsZExt))
11398 Flags.setReturned();
11404 for (
unsigned j = 0; j != NumParts; ++j) {
11410 j * Parts[j].
getValueType().getStoreSize().getKnownMinValue());
11411 if (NumParts > 1 && j == 0)
11415 if (j == NumParts - 1)
11419 CLI.
Outs.push_back(MyFlags);
11420 CLI.
OutVals.push_back(Parts[j]);
11423 if (NeedsRegBlock &&
Value == NumValues - 1)
11424 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11436 "LowerCall didn't return a valid chain!");
11438 "LowerCall emitted a return value for a tail call!");
11440 "LowerCall didn't emit the correct number of values!");
11452 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11453 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11454 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11455 "LowerCall emitted a value with the wrong type!");
11465 unsigned NumValues = RetVTs.
size();
11466 ReturnValues.
resize(NumValues);
11473 for (
unsigned i = 0; i < NumValues; ++i) {
11480 DemoteStackIdx, Offsets[i]),
11482 ReturnValues[i] = L;
11483 Chains[i] = L.getValue(1);
11490 std::optional<ISD::NodeType> AssertOp;
11495 unsigned CurReg = 0;
11496 for (
EVT VT : RetVTs) {
11502 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11510 if (ReturnValues.
empty())
11516 return std::make_pair(Res, CLI.
Chain);
11533 if (
N->getNumValues() == 1) {
11541 "Lowering returned the wrong number of results!");
11544 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11558 "Copy from a reg to the same reg!");
11559 assert(!Reg.isPhysical() &&
"Is a physreg");
11565 RegsForValue RFV(V->getContext(), TLI,
DAG.getDataLayout(), Reg, V->getType(),
11570 auto PreferredExtendIt =
FuncInfo.PreferredExtendType.find(V);
11571 if (PreferredExtendIt !=
FuncInfo.PreferredExtendType.end())
11572 ExtendType = PreferredExtendIt->second;
11575 PendingExports.push_back(Chain);
11587 return A->use_empty();
11589 const BasicBlock &Entry =
A->getParent()->front();
11590 for (
const User *U :
A->users())
11599 std::pair<const AllocaInst *, const StoreInst *>>;
11611 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11613 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11614 StaticAllocas.
reserve(NumArgs * 2);
11616 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11619 V = V->stripPointerCasts();
11621 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11624 return &Iter.first->second;
11641 if (
I.isDebugOrPseudoInst())
11645 for (
const Use &U :
I.operands()) {
11646 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11647 *
Info = StaticAllocaInfo::Clobbered;
11653 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(
SI->getValueOperand()))
11654 *
Info = StaticAllocaInfo::Clobbered;
11657 const Value *Dst =
SI->getPointerOperand()->stripPointerCasts();
11658 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11664 if (*
Info != StaticAllocaInfo::Unknown)
11672 const Value *Val =
SI->getValueOperand()->stripPointerCasts();
11674 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11678 !
DL.typeSizeEqualsStoreSize(Arg->
getType()) ||
11679 ArgCopyElisionCandidates.count(Arg)) {
11680 *
Info = StaticAllocaInfo::Clobbered;
11684 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11688 *
Info = StaticAllocaInfo::Elidable;
11689 ArgCopyElisionCandidates.insert({Arg, {AI,
SI}});
11694 if (ArgCopyElisionCandidates.size() == NumArgs)
11718 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11719 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11720 const AllocaInst *AI = ArgCopyIter->second.first;
11721 int FixedIndex = FINode->getIndex();
11723 int OldIndex = AllocaIndex;
11727 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11733 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11734 "greater than stack argument alignment ("
11735 <<
DebugStr(RequiredAlignment) <<
" vs "
11743 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11744 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11750 AllocaIndex = FixedIndex;
11751 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11752 for (
SDValue ArgVal : ArgVals)
11756 const StoreInst *
SI = ArgCopyIter->second.second;
11769void SelectionDAGISel::LowerArguments(
const Function &
F) {
11770 SelectionDAG &DAG =
SDB->DAG;
11771 SDLoc dl =
SDB->getCurSDLoc();
11776 if (
F.hasFnAttribute(Attribute::Naked))
11781 MVT ValueVT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11783 ISD::ArgFlagsTy
Flags;
11785 MVT RegisterVT =
TLI->getRegisterType(*DAG.
getContext(), ValueVT);
11786 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT,
F.getReturnType(),
true,
11796 ArgCopyElisionCandidates);
11799 for (
const Argument &Arg :
F.args()) {
11800 unsigned ArgNo = Arg.getArgNo();
11803 bool isArgValueUsed = !Arg.
use_empty();
11805 if (Arg.hasAttribute(Attribute::ByVal))
11806 FinalType = Arg.getParamByValType();
11807 bool NeedsRegBlock =
TLI->functionArgumentNeedsConsecutiveRegisters(
11808 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11809 for (
unsigned Value = 0, NumValues =
Types.size();
Value != NumValues;
11812 EVT VT =
TLI->getValueType(
DL, ArgTy);
11813 ISD::ArgFlagsTy
Flags;
11816 Flags.setPointer();
11819 if (Arg.hasAttribute(Attribute::ZExt))
11821 if (Arg.hasAttribute(Attribute::SExt))
11823 if (Arg.hasAttribute(Attribute::InReg)) {
11830 Flags.setHvaStart();
11836 if (Arg.hasAttribute(Attribute::StructRet))
11838 if (Arg.hasAttribute(Attribute::SwiftSelf))
11839 Flags.setSwiftSelf();
11840 if (Arg.hasAttribute(Attribute::SwiftAsync))
11841 Flags.setSwiftAsync();
11842 if (Arg.hasAttribute(Attribute::SwiftError))
11843 Flags.setSwiftError();
11844 if (Arg.hasAttribute(Attribute::ByVal))
11846 if (Arg.hasAttribute(Attribute::ByRef))
11848 if (Arg.hasAttribute(Attribute::InAlloca)) {
11849 Flags.setInAlloca();
11857 if (Arg.hasAttribute(Attribute::Preallocated)) {
11858 Flags.setPreallocated();
11870 const Align OriginalAlignment(
11871 TLI->getABIAlignmentForCallingConv(ArgTy,
DL));
11872 Flags.setOrigAlign(OriginalAlignment);
11875 Type *ArgMemTy =
nullptr;
11876 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11879 ArgMemTy = Arg.getPointeeInMemoryValueType();
11881 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11886 if (
auto ParamAlign = Arg.getParamStackAlign())
11887 MemAlign = *ParamAlign;
11888 else if ((ParamAlign = Arg.getParamAlign()))
11889 MemAlign = *ParamAlign;
11891 MemAlign =
TLI->getByValTypeAlignment(ArgMemTy,
DL);
11892 if (
Flags.isByRef())
11893 Flags.setByRefSize(MemSize);
11895 Flags.setByValSize(MemSize);
11896 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11897 MemAlign = *ParamAlign;
11899 MemAlign = OriginalAlignment;
11901 Flags.setMemAlign(MemAlign);
11903 if (Arg.hasAttribute(Attribute::Nest))
11906 Flags.setInConsecutiveRegs();
11907 if (ArgCopyElisionCandidates.count(&Arg))
11908 Flags.setCopyElisionCandidate();
11909 if (Arg.hasAttribute(Attribute::Returned))
11910 Flags.setReturned();
11912 MVT RegisterVT =
TLI->getRegisterTypeForCallingConv(
11913 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11914 unsigned NumRegs =
TLI->getNumRegistersForCallingConv(
11915 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11916 for (
unsigned i = 0; i != NumRegs; ++i) {
11920 ISD::InputArg MyFlags(
11921 Flags, RegisterVT, VT, ArgTy, isArgValueUsed, ArgNo,
11923 if (NumRegs > 1 && i == 0)
11924 MyFlags.Flags.setSplit();
11927 MyFlags.Flags.setOrigAlign(
Align(1));
11928 if (i == NumRegs - 1)
11929 MyFlags.Flags.setSplitEnd();
11933 if (NeedsRegBlock &&
Value == NumValues - 1)
11934 Ins[Ins.
size() - 1].Flags.setInConsecutiveRegsLast();
11940 SDValue NewRoot =
TLI->LowerFormalArguments(
11941 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11945 "LowerFormalArguments didn't return a valid chain!");
11947 "LowerFormalArguments didn't emit the correct number of values!");
11949 for (
unsigned i = 0, e = Ins.
size(); i != e; ++i) {
11951 "LowerFormalArguments emitted a null value!");
11953 "LowerFormalArguments emitted a value with the wrong type!");
11965 MVT VT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11966 MVT RegVT =
TLI->getRegisterType(*
CurDAG->getContext(), VT);
11967 std::optional<ISD::NodeType> AssertOp;
11970 F.getCallingConv(), AssertOp);
11972 MachineFunction&
MF =
SDB->DAG.getMachineFunction();
11973 MachineRegisterInfo&
RegInfo =
MF.getRegInfo();
11975 RegInfo.createVirtualRegister(
TLI->getRegClassFor(RegVT));
11976 FuncInfo->DemoteRegister = SRetReg;
11978 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11986 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11987 for (
const Argument &Arg :
F.args()) {
11991 unsigned NumValues = ValueVTs.
size();
11992 if (NumValues == 0)
11999 if (Ins[i].
Flags.isCopyElisionCandidate()) {
12000 unsigned NumParts = 0;
12001 for (EVT VT : ValueVTs)
12002 NumParts +=
TLI->getNumRegistersForCallingConv(*
CurDAG->getContext(),
12003 F.getCallingConv(), VT);
12007 ArrayRef(&InVals[i], NumParts), ArgHasUses);
12012 bool isSwiftErrorArg =
12013 TLI->supportSwiftError() &&
12014 Arg.hasAttribute(Attribute::SwiftError);
12015 if (!ArgHasUses && !isSwiftErrorArg) {
12016 SDB->setUnusedArgValue(&Arg, InVals[i]);
12019 if (FrameIndexSDNode *FI =
12021 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12024 for (
unsigned Val = 0; Val != NumValues; ++Val) {
12025 EVT VT = ValueVTs[Val];
12026 MVT PartVT =
TLI->getRegisterTypeForCallingConv(*
CurDAG->getContext(),
12027 F.getCallingConv(), VT);
12028 unsigned NumParts =
TLI->getNumRegistersForCallingConv(
12029 *
CurDAG->getContext(),
F.getCallingConv(), VT);
12034 if (ArgHasUses || isSwiftErrorArg) {
12035 std::optional<ISD::NodeType> AssertOp;
12036 if (Arg.hasAttribute(Attribute::SExt))
12038 else if (Arg.hasAttribute(Attribute::ZExt))
12043 NewRoot,
F.getCallingConv(), AssertOp);
12046 if (NoFPClass !=
fcNone) {
12048 static_cast<uint64_t
>(NoFPClass), dl, MVT::i32);
12050 OutVal, SDNoFPClass);
12059 if (ArgValues.
empty())
12063 if (FrameIndexSDNode *FI =
12065 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12068 SDB->getCurSDLoc());
12070 SDB->setValue(&Arg, Res);
12080 if (LoadSDNode *LNode =
12082 if (FrameIndexSDNode *FI =
12084 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12112 FuncInfo->InitializeRegForValue(&Arg);
12113 SDB->CopyToExportRegsIfNeeded(&Arg);
12117 if (!Chains.
empty()) {
12124 assert(i == InVals.
size() &&
"Argument register count mismatch!");
12128 if (!ArgCopyElisionFrameIndexMap.
empty()) {
12129 for (MachineFunction::VariableDbgInfo &VI :
12130 MF->getInStackSlotVariableDbgInfo()) {
12131 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
12132 if (
I != ArgCopyElisionFrameIndexMap.
end())
12133 VI.updateStackSlot(
I->second);
12148SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
12149 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12151 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12157 MachineBasicBlock *SuccMBB =
FuncInfo.getMBB(SuccBB);
12161 if (!SuccsHandled.
insert(SuccMBB).second)
12169 for (
const PHINode &PN : SuccBB->phis()) {
12171 if (PN.use_empty())
12175 if (PN.getType()->isEmptyTy())
12179 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12184 RegOut =
FuncInfo.CreateRegs(&PN);
12202 "Didn't codegen value into a register!??");
12212 for (EVT VT : ValueVTs) {
12214 for (
unsigned i = 0; i != NumRegisters; ++i)
12216 Reg += NumRegisters;
12236void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
12238 if (MaybeTC.
getNode() !=
nullptr)
12239 DAG.setRoot(MaybeTC);
12244void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W,
Value *
Cond,
12247 MachineFunction *CurMF =
FuncInfo.MF;
12248 MachineBasicBlock *NextMBB =
nullptr;
12253 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
12255 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12257 if (
Size == 2 &&
W.MBB == SwitchMBB) {
12265 CaseCluster &
Small = *
W.FirstCluster;
12266 CaseCluster &
Big = *
W.LastCluster;
12270 const APInt &SmallValue =
Small.Low->getValue();
12271 const APInt &BigValue =
Big.Low->getValue();
12274 APInt CommonBit = BigValue ^ SmallValue;
12281 DAG.getConstant(CommonBit,
DL, VT));
12283 DL, MVT::i1,
Or,
DAG.getConstant(BigValue | SmallValue,
DL, VT),
12289 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
12291 addSuccessorWithProb(
12292 SwitchMBB, DefaultMBB,
12296 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12304 DAG.getBasicBlock(DefaultMBB));
12306 DAG.setRoot(BrCond);
12318 [](
const CaseCluster &a,
const CaseCluster &b) {
12319 return a.Prob != b.Prob ?
12321 a.Low->getValue().slt(b.Low->getValue());
12328 if (
I->Prob >
W.LastCluster->Prob)
12330 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12338 BranchProbability DefaultProb =
W.DefaultProb;
12339 BranchProbability UnhandledProbs = DefaultProb;
12341 UnhandledProbs +=
I->Prob;
12343 MachineBasicBlock *CurMBB =
W.MBB;
12345 bool FallthroughUnreachable =
false;
12346 MachineBasicBlock *Fallthrough;
12347 if (
I ==
W.LastCluster) {
12349 Fallthrough = DefaultMBB;
12354 CurMF->
insert(BBI, Fallthrough);
12358 UnhandledProbs -=
I->Prob;
12363 JumpTableHeader *JTH = &
SL->JTCases[
I->JTCasesIndex].first;
12364 SwitchCG::JumpTable *
JT = &
SL->JTCases[
I->JTCasesIndex].second;
12367 MachineBasicBlock *JumpMBB =
JT->MBB;
12368 CurMF->
insert(BBI, JumpMBB);
12370 auto JumpProb =
I->Prob;
12371 auto FallthroughProb = UnhandledProbs;
12379 if (*SI == DefaultMBB) {
12380 JumpProb += DefaultProb / 2;
12381 FallthroughProb -= DefaultProb / 2;
12399 if (FallthroughUnreachable) {
12406 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12407 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12413 JT->Default = Fallthrough;
12416 if (CurMBB == SwitchMBB) {
12424 BitTestBlock *BTB = &
SL->BitTestCases[
I->BTCasesIndex];
12427 for (BitTestCase &BTC : BTB->
Cases)
12439 BTB->
Prob += DefaultProb / 2;
12443 if (FallthroughUnreachable)
12447 if (CurMBB == SwitchMBB) {
12454 const Value *
RHS, *
LHS, *MHS;
12456 if (
I->Low ==
I->High) {
12471 if (FallthroughUnreachable)
12475 CaseBlock CB(CC,
LHS,
RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12478 if (CurMBB == SwitchMBB)
12481 SL->SwitchCases.push_back(CB);
12486 CurMBB = Fallthrough;
12490void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12491 const SwitchWorkListItem &W,
12494 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12495 "Clusters not sorted?");
12496 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12498 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12499 SL->computeSplitWorkItemInfo(W);
12504 assert(PivotCluster >
W.FirstCluster);
12505 assert(PivotCluster <=
W.LastCluster);
12510 const ConstantInt *Pivot = PivotCluster->Low;
12519 MachineBasicBlock *LeftMBB;
12520 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12521 FirstLeft->Low ==
W.GE &&
12522 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12523 LeftMBB = FirstLeft->MBB;
12525 LeftMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12526 FuncInfo.MF->insert(BBI, LeftMBB);
12528 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12536 MachineBasicBlock *RightMBB;
12537 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12538 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12539 RightMBB = FirstRight->MBB;
12541 RightMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12542 FuncInfo.MF->insert(BBI, RightMBB);
12544 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12550 CaseBlock CB(
ISD::SETLT,
Cond, Pivot,
nullptr, LeftMBB, RightMBB,
W.MBB,
12553 if (
W.MBB == SwitchMBB)
12556 SL->SwitchCases.push_back(CB);
12581 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12589 unsigned PeeledCaseIndex = 0;
12590 bool SwitchPeeled =
false;
12591 for (
unsigned Index = 0;
Index < Clusters.size(); ++
Index) {
12592 CaseCluster &CC = Clusters[
Index];
12593 if (CC.
Prob < TopCaseProb)
12595 TopCaseProb = CC.
Prob;
12596 PeeledCaseIndex =
Index;
12597 SwitchPeeled =
true;
12602 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12603 << TopCaseProb <<
"\n");
12608 MachineBasicBlock *PeeledSwitchMBB =
12610 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12613 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12614 SwitchWorkListItem
W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12615 nullptr,
nullptr, TopCaseProb.
getCompl()};
12616 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12618 Clusters.erase(PeeledCaseIt);
12619 for (CaseCluster &CC : Clusters) {
12621 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12622 << CC.
Prob <<
"\n");
12626 PeeledCaseProb = TopCaseProb;
12627 return PeeledSwitchMBB;
12630void SelectionDAGBuilder::visitSwitch(
const SwitchInst &
SI) {
12632 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12634 Clusters.reserve(
SI.getNumCases());
12635 for (
auto I :
SI.cases()) {
12636 MachineBasicBlock *Succ =
FuncInfo.getMBB(
I.getCaseSuccessor());
12637 const ConstantInt *CaseVal =
I.getCaseValue();
12638 BranchProbability Prob =
12640 : BranchProbability(1,
SI.getNumCases() + 1);
12644 MachineBasicBlock *DefaultMBB =
FuncInfo.getMBB(
SI.getDefaultDest());
12653 MachineBasicBlock *PeeledSwitchMBB =
12654 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12657 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12658 if (Clusters.empty()) {
12659 assert(PeeledSwitchMBB == SwitchMBB);
12661 if (DefaultMBB != NextBlock(SwitchMBB)) {
12668 SL->findJumpTables(Clusters, &SI,
getCurSDLoc(), DefaultMBB,
DAG.getPSI(),
12670 SL->findBitTestClusters(Clusters, &SI);
12673 dbgs() <<
"Case clusters: ";
12674 for (
const CaseCluster &
C : Clusters) {
12680 C.Low->getValue().print(
dbgs(),
true);
12681 if (
C.Low !=
C.High) {
12683 C.High->getValue().print(
dbgs(),
true);
12690 assert(!Clusters.empty());
12694 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12698 DefaultMBB ==
FuncInfo.getMBB(
SI.getDefaultDest()))
12701 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12703 while (!WorkList.
empty()) {
12705 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12710 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12714 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12718void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12719 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12725void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12726 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12731 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12740 SmallVector<int, 8>
Mask;
12742 for (
unsigned i = 0; i != NumElts; ++i)
12743 Mask.push_back(NumElts - 1 - i);
12748void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I,
12757 EVT OutVT = ValueVTs[0];
12761 for (
unsigned i = 0; i != Factor; ++i) {
12762 assert(ValueVTs[i] == OutVT &&
"Expected VTs to be the same");
12764 DAG.getVectorIdxConstant(OutNumElts * i,
DL));
12770 SDValue Even =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12772 SDValue Odd =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12780 DAG.getVTList(ValueVTs), SubVecs);
12784void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I,
12787 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12792 for (
unsigned i = 0; i < Factor; ++i) {
12795 "Expected VTs to be the same");
12813 for (
unsigned i = 0; i < Factor; ++i)
12820void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12824 unsigned NumValues = ValueVTs.
size();
12825 if (NumValues == 0)
return;
12830 for (
unsigned i = 0; i != NumValues; ++i)
12835 DAG.getVTList(ValueVTs), Values));
12838void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12839 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12851 DAG.getSignedConstant(
12858 uint64_t Idx = (NumElts +
Imm) % NumElts;
12861 SmallVector<int, 8>
Mask;
12862 for (
unsigned i = 0; i < NumElts; ++i)
12863 Mask.push_back(Idx + i);
12891 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12892 "start of copy chain MUST be COPY");
12893 Reg =
MI->getOperand(1).getReg();
12896 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12897 MI =
MRI.def_begin(
Reg)->getParent();
12900 if (
MI->getOpcode() == TargetOpcode::COPY) {
12901 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12902 Reg =
MI->getOperand(1).getReg();
12903 assert(
Reg.isPhysical() &&
"expected COPY of physical register");
12906 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12907 "end of copy chain MUST be INLINEASM_BR");
12917void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12923 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12924 const TargetRegisterInfo *
TRI =
DAG.getSubtarget().getRegisterInfo();
12925 MachineRegisterInfo &
MRI =
DAG.getMachineFunction().getRegInfo();
12933 for (
auto &
T : TargetConstraints) {
12934 SDISelAsmOperandInfo OpInfo(
T);
12942 switch (OpInfo.ConstraintType) {
12953 FuncInfo.MBB->addLiveIn(OriginalDef);
12961 ResultVTs.
push_back(OpInfo.ConstraintVT);
12970 ResultVTs.
push_back(OpInfo.ConstraintVT);
12978 DAG.getVTList(ResultVTs), ResultValues);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
static Value * getCondition(Instruction *I)
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static void failForInvalidBundles(const CallBase &I, StringRef Name, ArrayRef< uint32_t > AllowedBundles)
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
DenseMap< const Argument *, std::pair< const AllocaInst *, const StoreInst * > > ArgCopyElisionMapTy
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static FPClassTest getNoFPClass(const Instruction &I)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const unsigned char *MatcherTable, unsigned &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static const fltSemantics & IEEEsingle()
Class for arbitrary precision integers.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This class represents a no-op cast from one type to another.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Base class for variables.
LLVM_ABI std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
A parsed version of the target data layout string in and methods for querying it.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LLVM_ABI DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Lightweight error class with error context and mandatory checking.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
MachineBasicBlock * MBB
MBB - The current block.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHContTarget(bool V=true)
Indicates if this is a target of Windows EH Continuation Guard.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
bool hasEHFunclets() const
void setHasEHContTarget(bool V)
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
DenseMap< const Constant *, Register > ConstantsOut
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
const TargetTransformInfo * TTI
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
SDValue lowerNoFPClassToAssertNoFPClass(SelectionDAG &DAG, const Instruction &I, SDValue Op)
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool canTailCall(const CallBase &CB) const
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void CopyValueToVirtualRegister(const Value *V, Register Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC, const TargetLibraryInfo *li, const TargetTransformInfo &TTI)
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
SDValue getFPOperationRoot(fp::ExceptionBehavior EB)
Return the current virtual root of the Selection DAG, flushing PendingConstrainedFP or PendingConstra...
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineRegisterInfo * RegInfo
std::unique_ptr< SwiftErrorValueTracking > SwiftError
virtual void emitFunctionEntryCode()
std::unique_ptr< SelectionDAGBuilder > SDB
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, const CallInst *CI) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, const CallInst *CI) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitFunctionBasedCheckStackProtector() const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Provides information about what library functions are available for the current target.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallBase &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
virtual bool useStackGuardMixCookie() const
If this function returns true, stack protection checks should mix the stack guard value before checki...
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual SDValue emitStackGuardMixCookie(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, bool FailureBB) const
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned getID() const
Return the register class ID number.
const MCPhysReg * iterator
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static LLVM_ABI std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ CONVERGENCECTRL_ANCHOR
The llvm.experimental.convergence.* intrinsics.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SET_FPENV
Sets the current floating-point environment.
@ LOOP_DEPENDENCE_RAW_MASK
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ RESET_FPENV
Set floating-point environment to default state.
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ VECTOR_FIND_LAST_ACTIVE
Finds the index of the last active mask element Operands: Mask.
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ FAKE_USE
FAKE_USE represents a use of the operand but does not do anything.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ SET_ROUNDING
Set rounding mode.
@ CONVERGENCECTRL_GLUE
This does not correspond to any convergence control intrinsic.
@ SIGN_EXTEND
Conversion operators.
@ PREALLOCATED_SETUP
PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE with the preallocated call Va...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ BR
Control flow instructions. These all have token chains.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
@ PREALLOCATED_ARG
PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE with the preallocated call Value,...
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor to...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ GET_ACTIVE_LANE_MASK
GET_ACTIVE_LANE_MASK - this corrosponds to the llvm.get.active.lane.mask intrinsic.
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ CLEANUPRET
CLEANUPRET - Represents a return from a cleanup block funclet.
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
@ GET_FPENV
Gets the current floating-point environment.
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ PATCHPOINT
The llvm.experimental.patchpoint.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ PCMARKER
PCMARKER - This corresponds to the pcmarker intrinsic.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ RELOC_NONE
Issue a no-op relocation against a given symbol at the current location.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ STACKMAP
The llvm.experimental.stackmap intrinsic.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ CLEAR_CACHE
llvm.clear_cache intrinsic Operands: Input Chain, Start Addres, End Address Outputs: Output Chain
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ CATCHRET
CATCHRET - Represents a return from a catch block funclet.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor ...
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
@ LOOP_DEPENDENCE_WAR_MASK
The llvm.loop.dependence.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
auto cast_or_null(const Y &Val)
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
@ Default
The result values are uniform if and only if all operands are uniform.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
EVT changeVectorElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setPointerAddrSpace(unsigned AS)
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A lightweight accessor for an operand bundle meant to be passed around by value.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
void setUnpredictable(bool b)
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setDeactivationSymbol(GlobalValue *Sym)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
Type * OrigRetTy
Original unlegalized return type.
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)