78#include "llvm/IR/IntrinsicsAArch64.h"
79#include "llvm/IR/IntrinsicsAMDGPU.h"
80#include "llvm/IR/IntrinsicsWebAssembly.h"
113#define DEBUG_TYPE "isel"
121 cl::desc(
"Insert the experimental `assertalign` node."),
126 cl::desc(
"Generate low-precision inline sequences "
127 "for some float libcalls"),
133 cl::desc(
"Set the case probability threshold for peeling the case from a "
134 "switch statement. A value greater than 100 will void this "
154 const SDValue *Parts,
unsigned NumParts,
157 std::optional<CallingConv::ID> CC);
166 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
168 std::optional<CallingConv::ID> CC = std::nullopt,
169 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
173 PartVT, ValueVT, CC))
180 assert(NumParts > 0 &&
"No parts to assemble!");
191 unsigned RoundBits = PartBits * RoundParts;
192 EVT RoundVT = RoundBits == ValueBits ?
198 if (RoundParts > 2) {
202 PartVT, HalfVT, V, InChain);
204 Lo = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[0]);
205 Hi = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[1]);
213 if (RoundParts < NumParts) {
215 unsigned OddParts = NumParts - RoundParts;
218 OddVT, V, InChain, CC);
234 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
245 !PartVT.
isVector() &&
"Unexpected split");
257 if (PartEVT == ValueVT)
261 ValueVT.
bitsLT(PartEVT)) {
270 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
274 if (ValueVT.
bitsLT(PartEVT)) {
279 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
294 llvm::Attribute::StrictFP)) {
296 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
303 return DAG.
getNode(ISD::FP_EXTEND,
DL, ValueVT, Val);
308 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
309 ValueVT.
bitsLT(PartEVT)) {
310 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::i64, Val);
318 const Twine &ErrMsg) {
321 return Ctx.emitError(ErrMsg);
324 if (CI->isInlineAsm()) {
326 *CI, ErrMsg +
", possible invalid constraint for vector type"));
329 return Ctx.emitError(
I, ErrMsg);
338 const SDValue *Parts,
unsigned NumParts,
341 std::optional<CallingConv::ID> CallConv) {
343 assert(NumParts > 0 &&
"No parts to assemble!");
344 const bool IsABIRegCopy = CallConv.has_value();
353 unsigned NumIntermediates;
358 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
359 NumIntermediates, RegisterVT);
363 NumIntermediates, RegisterVT);
366 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
368 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
371 "Part type sizes don't match!");
375 if (NumIntermediates == NumParts) {
378 for (
unsigned i = 0; i != NumParts; ++i)
380 V, InChain, CallConv);
381 }
else if (NumParts > 0) {
384 assert(NumParts % NumIntermediates == 0 &&
385 "Must expand into a divisible number of parts!");
386 unsigned Factor = NumParts / NumIntermediates;
387 for (
unsigned i = 0; i != NumIntermediates; ++i)
389 IntermediateVT, V, InChain, CallConv);
404 DL, BuiltVectorTy,
Ops);
410 if (PartEVT == ValueVT)
416 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
426 "Cannot narrow, it would be a lossy transformation");
432 if (PartEVT == ValueVT)
435 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
439 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
450 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
456 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
457 }
else if (ValueVT.
bitsLT(PartEVT)) {
466 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
475 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueSVT, Val);
497 std::optional<CallingConv::ID> CallConv);
504 unsigned NumParts,
MVT PartVT,
const Value *V,
505 std::optional<CallingConv::ID> CallConv = std::nullopt,
519 unsigned OrigNumParts = NumParts;
521 "Copying to an illegal type!");
527 EVT PartEVT = PartVT;
528 if (PartEVT == ValueVT) {
529 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
538 assert(NumParts == 1 &&
"Do not know what to promote to!");
539 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
545 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
549 "Unknown mismatch!");
551 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
552 if (PartVT == MVT::x86mmx)
553 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
557 assert(NumParts == 1 && PartEVT != ValueVT);
558 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
563 "Unknown mismatch!");
566 if (PartVT == MVT::x86mmx)
567 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
573 "Failed to tile the value with PartVT!");
576 if (PartEVT != ValueVT) {
578 "scalar-to-vector conversion failed");
579 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
587 if (NumParts & (NumParts - 1)) {
590 "Do not know what to expand to!");
592 unsigned RoundBits = RoundParts * PartBits;
593 unsigned OddParts = NumParts - RoundParts;
602 std::reverse(Parts + RoundParts, Parts + NumParts);
604 NumParts = RoundParts;
616 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
617 for (
unsigned i = 0; i < NumParts; i += StepSize) {
618 unsigned ThisBits = StepSize * PartBits / 2;
621 SDValue &Part1 = Parts[i+StepSize/2];
628 if (ThisBits == PartBits && ThisVT != PartVT) {
629 Part0 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part0);
630 Part1 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part1);
636 std::reverse(Parts, Parts + OrigNumParts);
658 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
660 "Cannot widen to illegal type");
663 }
else if (PartEVT != ValueEVT) {
678 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
689 std::optional<CallingConv::ID> CallConv) {
693 const bool IsABIRegCopy = CallConv.has_value();
696 EVT PartEVT = PartVT;
697 if (PartEVT == ValueVT) {
701 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
736 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
743 "lossy conversion of vector to scalar type");
758 unsigned NumIntermediates;
762 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
767 NumIntermediates, RegisterVT);
770 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
772 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
775 "Mixing scalable and fixed vectors when copying in parts");
777 std::optional<ElementCount> DestEltCnt;
787 if (ValueVT == BuiltVectorTy) {
791 Val = DAG.
getNode(ISD::BITCAST,
DL, BuiltVectorTy, Val);
811 for (
unsigned i = 0; i != NumIntermediates; ++i) {
826 if (NumParts == NumIntermediates) {
829 for (
unsigned i = 0; i != NumParts; ++i)
831 }
else if (NumParts > 0) {
834 assert(NumIntermediates != 0 &&
"division by zero");
835 assert(NumParts % NumIntermediates == 0 &&
836 "Must expand into a divisible number of parts!");
837 unsigned Factor = NumParts / NumIntermediates;
838 for (
unsigned i = 0; i != NumIntermediates; ++i)
846 if (
I.hasOperandBundlesOtherThan(AllowedBundles)) {
850 for (
unsigned i = 0, e =
I.getNumOperandBundles(); i != e; ++i) {
853 OS << LS << U.getTagName();
856 Twine(
"cannot lower ", Name)
862 EVT valuevt, std::optional<CallingConv::ID> CC)
868 std::optional<CallingConv::ID> CC) {
882 for (
unsigned i = 0; i != NumRegs; ++i)
883 Regs.push_back(Reg + i);
884 RegVTs.push_back(RegisterVT);
886 Reg = Reg.id() + NumRegs;
913 for (
unsigned i = 0; i != NumRegs; ++i) {
919 *Glue =
P.getValue(2);
922 Chain =
P.getValue(1);
950 EVT FromVT(MVT::Other);
954 }
else if (NumSignBits > 1) {
962 assert(FromVT != MVT::Other);
968 RegisterVT, ValueVT, V, Chain,
CallConv);
984 unsigned NumRegs =
Regs.size();
998 NumParts, RegisterVT, V,
CallConv, ExtendKind);
1004 for (
unsigned i = 0; i != NumRegs; ++i) {
1016 if (NumRegs == 1 || Glue)
1027 Chain = Chains[NumRegs-1];
1033 unsigned MatchingIdx,
const SDLoc &dl,
1035 std::vector<SDValue> &
Ops)
const {
1040 Flag.setMatchingOp(MatchingIdx);
1041 else if (!
Regs.empty() &&
Regs.front().isVirtual()) {
1049 Flag.setRegClass(RC->
getID());
1060 "No 1:1 mapping from clobbers to regs?");
1063 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1068 "If we clobbered the stack pointer, MFI should know about it.");
1077 for (
unsigned i = 0; i != NumRegs; ++i) {
1078 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1090 unsigned RegCount = std::get<0>(CountAndVT);
1091 MVT RegisterVT = std::get<1>(CountAndVT);
1108 SL->init(
DAG.getTargetLoweringInfo(), TM,
DAG.getDataLayout());
1110 *
DAG.getMachineFunction().getFunction().getParent());
1115 UnusedArgNodeMap.clear();
1117 PendingExports.clear();
1118 PendingConstrainedFP.clear();
1119 PendingConstrainedFPStrict.clear();
1127 DanglingDebugInfoMap.clear();
1134 if (Pending.
empty())
1140 unsigned i = 0, e = Pending.
size();
1141 for (; i != e; ++i) {
1143 if (Pending[i].
getNode()->getOperand(0) == Root)
1151 if (Pending.
size() == 1)
1178 if (!PendingConstrainedFPStrict.empty()) {
1179 assert(PendingConstrainedFP.empty());
1180 updateRoot(PendingConstrainedFPStrict);
1193 if (!PendingConstrainedFP.empty()) {
1194 assert(PendingConstrainedFPStrict.empty());
1195 updateRoot(PendingConstrainedFP);
1199 return DAG.getRoot();
1207 PendingConstrainedFP.size() +
1208 PendingConstrainedFPStrict.size());
1210 PendingConstrainedFP.end());
1211 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1212 PendingConstrainedFPStrict.end());
1213 PendingConstrainedFP.clear();
1214 PendingConstrainedFPStrict.clear();
1221 PendingExports.append(PendingConstrainedFPStrict.begin(),
1222 PendingConstrainedFPStrict.end());
1223 PendingConstrainedFPStrict.clear();
1224 return updateRoot(PendingExports);
1231 assert(Variable &&
"Missing variable");
1238 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1254 if (IsParameter && FINode) {
1256 SDV =
DAG.getFrameIndexDbgValue(Variable,
Expression, FINode->getIndex(),
1257 true,
DL, SDNodeOrder);
1262 FuncArgumentDbgValueKind::Declare,
N);
1265 SDV =
DAG.getDbgValue(Variable,
Expression,
N.getNode(),
N.getResNo(),
1266 true,
DL, SDNodeOrder);
1268 DAG.AddDbgValue(SDV, IsParameter);
1273 FuncArgumentDbgValueKind::Declare,
N)) {
1275 <<
" (could not emit func-arg dbg_value)\n");
1286 for (
auto It = FnVarLocs->locs_begin(&
I), End = FnVarLocs->locs_end(&
I);
1288 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1290 if (It->Values.isKillLocation(It->Expr)) {
1296 It->Values.hasArgList())) {
1299 FnVarLocs->getDILocalVariable(It->VariableID),
1300 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1313 bool SkipDbgVariableRecords =
DAG.getFunctionVarLocs();
1316 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1318 assert(DLR->getLabel() &&
"Missing label");
1320 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1321 DAG.AddDbgLabel(SDV);
1325 if (SkipDbgVariableRecords)
1333 if (
FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1335 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1344 if (Values.
empty()) {
1361 SDNodeOrder, IsVariadic)) {
1372 if (
I.isTerminator()) {
1373 HandlePHINodesInSuccessorBlocks(
I.getParent());
1380 bool NodeInserted =
false;
1381 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1382 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1383 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1384 if (PCSectionsMD || MMRA) {
1385 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1386 DAG, [&](
SDNode *) { NodeInserted =
true; });
1396 if (PCSectionsMD || MMRA) {
1397 auto It = NodeMap.find(&
I);
1398 if (It != NodeMap.end()) {
1400 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1402 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1403 }
else if (NodeInserted) {
1406 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1407 <<
I.getModule()->getName() <<
"]\n";
1416void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1426#define HANDLE_INST(NUM, OPCODE, CLASS) \
1427 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1428#include "llvm/IR/Instruction.def"
1440 for (
const Value *V : Values) {
1465 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1470 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1471 DIVariable *DanglingVariable = DDI.getVariable();
1473 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1475 << printDDI(
nullptr, DDI) <<
"\n");
1481 for (
auto &DDIMI : DanglingDebugInfoMap) {
1482 DanglingDebugInfoVector &DDIV = DDIMI.second;
1486 for (
auto &DDI : DDIV)
1487 if (isMatchingDbgValue(DDI))
1490 erase_if(DDIV, isMatchingDbgValue);
1498 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1499 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1502 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1503 for (
auto &DDI : DDIV) {
1506 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1509 assert(Variable->isValidLocationForIntrinsic(
DL) &&
1510 "Expected inlined-at fields to agree");
1519 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1520 FuncArgumentDbgValueKind::Value, Val)) {
1522 << printDDI(V, DDI) <<
"\n");
1529 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1530 << ValSDNodeOrder <<
"\n");
1531 SDV = getDbgValue(Val, Variable, Expr,
DL,
1532 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1533 DAG.AddDbgValue(SDV,
false);
1537 <<
" in EmitFuncArgumentDbgValue\n");
1539 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1543 DAG.getConstantDbgValue(Variable, Expr,
Poison,
DL, DbgSDNodeOrder);
1544 DAG.AddDbgValue(SDV,
false);
1551 DanglingDebugInfo &DDI) {
1556 const Value *OrigV = V;
1560 unsigned SDOrder = DDI.getSDNodeOrder();
1564 bool StackValue =
true;
1589 if (!AdditionalValues.
empty())
1599 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1600 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1608 assert(OrigV &&
"V shouldn't be null");
1610 auto *SDV =
DAG.getConstantDbgValue(Var, Expr,
Poison,
DL, SDNodeOrder);
1611 DAG.AddDbgValue(SDV,
false);
1613 << printDDI(OrigV, DDI) <<
"\n");
1630 unsigned Order,
bool IsVariadic) {
1635 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1640 for (
const Value *V : Values) {
1650 if (CE->getOpcode() == Instruction::IntToPtr) {
1669 N = UnusedArgNodeMap[V];
1674 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1675 FuncArgumentDbgValueKind::Value,
N))
1702 bool IsParamOfFunc =
1710 auto VMI =
FuncInfo.ValueMap.find(V);
1711 if (VMI !=
FuncInfo.ValueMap.end()) {
1716 V->getType(), std::nullopt);
1722 unsigned BitsToDescribe = 0;
1724 BitsToDescribe = *VarSize;
1726 BitsToDescribe = Fragment->SizeInBits;
1729 if (
Offset >= BitsToDescribe)
1732 unsigned RegisterSize = RegAndSize.second;
1733 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1734 ? BitsToDescribe -
Offset
1737 Expr,
Offset, FragmentSize);
1741 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1742 DAG.AddDbgValue(SDV,
false);
1758 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1759 false, DbgLoc, Order, IsVariadic);
1760 DAG.AddDbgValue(SDV,
false);
1766 for (
auto &Pair : DanglingDebugInfoMap)
1767 for (
auto &DDI : Pair.second)
1778 if (It !=
FuncInfo.ValueMap.end()) {
1782 DAG.getDataLayout(), InReg, Ty,
1799 if (
N.getNode())
return N;
1859 return DAG.getSplatBuildVector(
1862 return DAG.getConstant(*CI,
DL, VT);
1871 getValue(CPA->getAddrDiscriminator()),
1872 getValue(CPA->getDiscriminator()));
1888 visit(CE->getOpcode(), *CE);
1890 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1896 for (
const Use &U :
C->operands()) {
1902 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1903 Constants.push_back(
SDValue(Val, i));
1912 for (
uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1916 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1925 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1927 "Unknown struct or array constant!");
1931 unsigned NumElts = ValueVTs.
size();
1935 for (
unsigned i = 0; i != NumElts; ++i) {
1936 EVT EltVT = ValueVTs[i];
1938 Constants[i] =
DAG.getUNDEF(EltVT);
1949 return DAG.getBlockAddress(BA, VT);
1952 return getValue(Equiv->getGlobalValue());
1957 if (VT == MVT::aarch64svcount) {
1958 assert(
C->isNullValue() &&
"Can only zero this target type!");
1964 assert(
C->isNullValue() &&
"Can only zero this target type!");
1981 for (
unsigned i = 0; i != NumElements; ++i)
2009 return DAG.getFrameIndex(
2017 std::optional<CallingConv::ID> CallConv;
2019 if (CB && !CB->isInlineAsm())
2020 CallConv = CB->getCallingConv();
2023 Inst->getType(), CallConv);
2037void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
2050 if (IsMSVCCXX || IsCoreCLR)
2056 MachineBasicBlock *TargetMBB =
FuncInfo.getMBB(
I.getSuccessor());
2057 FuncInfo.MBB->addSuccessor(TargetMBB);
2064 if (TargetMBB != NextBlock(
FuncInfo.MBB) ||
2073 DAG.getMachineFunction().setHasEHContTarget(
true);
2079 Value *ParentPad =
I.getCatchSwitchParentPad();
2082 SuccessorColor = &
FuncInfo.Fn->getEntryBlock();
2085 assert(SuccessorColor &&
"No parent funclet for catchret!");
2086 MachineBasicBlock *SuccessorColorMBB =
FuncInfo.getMBB(SuccessorColor);
2087 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2092 DAG.getBasicBlock(SuccessorColorMBB));
2096void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2102 FuncInfo.MBB->setIsEHFuncletEntry();
2103 FuncInfo.MBB->setIsCleanupFuncletEntry();
2132 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2138 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2139 UnwindDests.back().first->setIsEHScopeEntry();
2142 UnwindDests.back().first->setIsEHFuncletEntry();
2146 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2147 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2149 if (IsMSVCCXX || IsCoreCLR)
2150 UnwindDests.back().first->setIsEHFuncletEntry();
2152 UnwindDests.back().first->setIsEHScopeEntry();
2154 NewEHPadBB = CatchSwitch->getUnwindDest();
2160 if (BPI && NewEHPadBB)
2162 EHPadBB = NewEHPadBB;
2169 auto UnwindDest =
I.getUnwindDest();
2170 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
2171 BranchProbability UnwindDestProb =
2176 for (
auto &UnwindDest : UnwindDests) {
2177 UnwindDest.first->setIsEHPad();
2178 addSuccessorWithProb(
FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2180 FuncInfo.MBB->normalizeSuccProbs();
2183 MachineBasicBlock *CleanupPadMBB =
2184 FuncInfo.getMBB(
I.getCleanupPad()->getParent());
2190void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2194void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2195 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
2196 auto &
DL =
DAG.getDataLayout();
2208 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2225 SmallVector<uint64_t, 4>
Offsets;
2228 unsigned NumValues = ValueVTs.
size();
2231 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2232 for (
unsigned i = 0; i != NumValues; ++i) {
2239 if (MemVTs[i] != ValueVTs[i])
2241 Chains[i] =
DAG.getStore(
2249 MVT::Other, Chains);
2250 }
else if (
I.getNumOperands() != 0) {
2253 unsigned NumValues =
Types.size();
2257 const Function *
F =
I.getParent()->getParent();
2260 I.getOperand(0)->getType(),
F->getCallingConv(),
2264 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2266 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2269 LLVMContext &
Context =
F->getContext();
2270 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2272 for (
unsigned j = 0;
j != NumValues; ++
j) {
2285 &Parts[0], NumParts, PartVT, &
I, CC, ExtendKind);
2288 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2292 if (
I.getOperand(0)->getType()->isPointerTy()) {
2294 Flags.setPointerAddrSpace(
2298 if (NeedsRegBlock) {
2299 Flags.setInConsecutiveRegs();
2300 if (j == NumValues - 1)
2301 Flags.setInConsecutiveRegsLast();
2309 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2312 for (
unsigned i = 0; i < NumParts; ++i) {
2315 VT, Types[j], 0, 0));
2325 const Function *
F =
I.getParent()->getParent();
2327 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2329 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2330 Flags.setSwiftError();
2342 bool isVarArg =
DAG.getMachineFunction().getFunction().isVarArg();
2344 DAG.getMachineFunction().getFunction().getCallingConv();
2345 Chain =
DAG.getTargetLoweringInfo().LowerReturn(
2350 "LowerReturn didn't return a valid chain!");
2361 if (V->getType()->isEmptyTy())
2365 if (VMI !=
FuncInfo.ValueMap.end()) {
2367 "Unused value assigned virtual registers!");
2380 if (
FuncInfo.isExportedInst(V))
return;
2392 if (VI->getParent() == FromBB)
2418 const BasicBlock *SrcBB = Src->getBasicBlock();
2419 const BasicBlock *DstBB = Dst->getBasicBlock();
2423 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2433 Src->addSuccessorWithoutProb(Dst);
2436 Prob = getEdgeProbability(Src, Dst);
2437 Src->addSuccessor(Dst, Prob);
2443 return I->getParent() == BB;
2467 if (CurBB == SwitchBB ||
2473 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2478 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2480 if (TM.Options.NoNaNsFPMath)
2484 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2486 SL->SwitchCases.push_back(CB);
2495 SL->SwitchCases.push_back(CB);
2503 unsigned Depth = 0) {
2512 if (Necessary !=
nullptr) {
2515 if (Necessary->contains(
I))
2534 if (
I.getNumSuccessors() != 2)
2537 if (!
I.isConditional())
2549 if (BPI !=
nullptr) {
2555 std::optional<bool> Likely;
2558 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2562 if (
Opc == (*Likely ? Instruction::And : Instruction::Or))
2574 if (CostThresh <= 0)
2592 const auto &TLI =
DAG.getTargetLoweringInfo();
2599 Value *BrCond =
I.getCondition();
2600 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2601 for (
const auto *U : Ins->users()) {
2604 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2617 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2619 for (
const auto &InsPair : RhsDeps) {
2620 if (!ShouldCountInsn(InsPair.first)) {
2621 ToDrop = InsPair.first;
2625 if (ToDrop ==
nullptr)
2627 RhsDeps.erase(ToDrop);
2630 for (
const auto &InsPair : RhsDeps) {
2638 if (CostOfIncluding > CostThresh)
2664 const Value *BOpOp0, *BOpOp1;
2678 if (BOpc == Instruction::And)
2679 BOpc = Instruction::Or;
2680 else if (BOpc == Instruction::Or)
2681 BOpc = Instruction::And;
2687 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
2692 TProb, FProb, InvertCond);
2702 if (
Opc == Instruction::Or) {
2723 auto NewTrueProb = TProb / 2;
2724 auto NewFalseProb = TProb / 2 + FProb;
2727 NewFalseProb, InvertCond);
2734 Probs[1], InvertCond);
2736 assert(
Opc == Instruction::And &&
"Unknown merge op!");
2756 auto NewTrueProb = TProb + FProb / 2;
2757 auto NewFalseProb = FProb / 2;
2760 NewFalseProb, InvertCond);
2767 Probs[1], InvertCond);
2776 if (Cases.size() != 2)
return true;
2780 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2781 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2782 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2783 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2789 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2790 Cases[0].CC == Cases[1].CC &&
2793 if (Cases[0].CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2795 if (Cases[0].CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2802void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2808 if (
I.isUnconditional()) {
2814 if (Succ0MBB != NextBlock(BrMBB) ||
2827 const Value *CondVal =
I.getCondition();
2828 MachineBasicBlock *Succ1MBB =
FuncInfo.getMBB(
I.getSuccessor(1));
2847 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2849 if (!
DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2852 const Value *BOp0, *BOp1;
2855 Opcode = Instruction::And;
2857 Opcode = Instruction::Or;
2864 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2865 Opcode, BOp0, BOp1))) {
2867 getEdgeProbability(BrMBB, Succ0MBB),
2868 getEdgeProbability(BrMBB, Succ1MBB),
2873 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2877 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2884 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2890 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2891 FuncInfo.MF->erase(
SL->SwitchCases[i].ThisBB);
2893 SL->SwitchCases.clear();
2899 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2920 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2927 auto &TLI =
DAG.getTargetLoweringInfo();
2951 Cond =
DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.
CC);
2963 Cond =
DAG.getSetCC(dl, MVT::i1, CmpOp,
DAG.getConstant(
High, dl, VT),
2967 VT, CmpOp,
DAG.getConstant(
Low, dl, VT));
2968 Cond =
DAG.getSetCC(dl, MVT::i1, SUB,
2983 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2999 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3002 DAG.setRoot(BrCond);
3008 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3009 assert(JT.Reg &&
"Should lower JT Header first!");
3010 EVT PTy =
DAG.getTargetLoweringInfo().getJumpTableRegTy(
DAG.getDataLayout());
3012 SDValue Table =
DAG.getJumpTable(JT.JTI, PTy);
3013 SDValue BrJumpTable =
DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
3014 Index.getValue(1), Table, Index);
3015 DAG.setRoot(BrJumpTable);
3023 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3024 const SDLoc &dl = *JT.SL;
3030 DAG.getConstant(JTH.
First, dl, VT));
3045 JT.Reg = JumpTableReg;
3053 Sub.getValueType()),
3056 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3057 MVT::Other, CopyTo, CMP,
3058 DAG.getBasicBlock(JT.Default));
3061 if (JT.MBB != NextBlock(SwitchBB))
3062 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3063 DAG.getBasicBlock(JT.MBB));
3065 DAG.setRoot(BrCond);
3068 if (JT.MBB != NextBlock(SwitchBB))
3069 DAG.setRoot(
DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
3070 DAG.getBasicBlock(JT.MBB)));
3072 DAG.setRoot(CopyTo);
3095 if (PtrTy != PtrMemTy)
3111 auto &
DL =
DAG.getDataLayout();
3120 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3127 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3140 assert(GuardCheckFn &&
"Guard check function is null");
3151 Entry.IsInReg =
true;
3152 Args.push_back(Entry);
3158 getValue(GuardCheckFn), std::move(Args));
3160 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3161 DAG.setRoot(Result.second);
3173 Guard =
DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3179 Guard =
DAG.getPOISON(PtrMemTy);
3189 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3222 auto &
DL =
DAG.getDataLayout();
3230 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3236 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3251 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3252 Entry.IsInReg =
true;
3253 Args.push_back(Entry);
3259 getValue(GuardCheckFn), std::move(Args));
3265 Chain = TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3273 Chain =
DAG.getNode(ISD::TRAP,
getCurSDLoc(), MVT::Other, Chain);
3288 DAG.getNode(
ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(
B.First, dl, VT));
3292 bool UsePtrType =
false;
3316 if (!
B.FallthroughUnreachable)
3317 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3318 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3322 if (!
B.FallthroughUnreachable) {
3330 Root =
DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3331 DAG.getBasicBlock(
B.Default));
3335 if (
MBB != NextBlock(SwitchBB))
3336 Root =
DAG.getNode(ISD::BR, dl, MVT::Other, Root,
DAG.getBasicBlock(
MBB));
3353 if (PopCount == 1) {
3360 }
else if (PopCount == BB.
Range) {
3368 DAG.getConstant(1, dl, VT), ShiftOp);
3372 VT, SwitchVal,
DAG.getConstant(
B.Mask, dl, VT));
3379 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3381 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3389 Cmp,
DAG.getBasicBlock(
B.TargetBB));
3392 if (NextMBB != NextBlock(SwitchBB))
3393 BrAnd =
DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3394 DAG.getBasicBlock(NextMBB));
3399void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3417 const Value *Callee(
I.getCalledOperand());
3420 visitInlineAsm(
I, EHPadBB);
3425 case Intrinsic::donothing:
3427 case Intrinsic::seh_try_begin:
3428 case Intrinsic::seh_scope_begin:
3429 case Intrinsic::seh_try_end:
3430 case Intrinsic::seh_scope_end:
3436 case Intrinsic::experimental_patchpoint_void:
3437 case Intrinsic::experimental_patchpoint:
3438 visitPatchpoint(
I, EHPadBB);
3440 case Intrinsic::experimental_gc_statepoint:
3446 case Intrinsic::wasm_throw: {
3448 std::array<SDValue, 4>
Ops = {
3459 case Intrinsic::wasm_rethrow: {
3460 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3461 std::array<SDValue, 2>
Ops = {
3470 }
else if (
I.hasDeoptState()) {
3491 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
3492 BranchProbability EHPadBBProb =
3498 addSuccessorWithProb(InvokeMBB, Return);
3499 for (
auto &UnwindDest : UnwindDests) {
3500 UnwindDest.first->setIsEHPad();
3501 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3507 DAG.getBasicBlock(Return)));
3510void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3511 MachineBasicBlock *CallBrMBB =
FuncInfo.MBB;
3518 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3523 SmallPtrSet<BasicBlock *, 8> Dests;
3524 Dests.
insert(
I.getDefaultDest());
3529 for (BasicBlock *Dest :
I.getIndirectDests()) {
3531 Target->setIsInlineAsmBrIndirectTarget();
3537 Target->setLabelMustBeEmitted();
3539 if (Dests.
insert(Dest).second)
3547 DAG.getBasicBlock(Return)));
3550void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3551 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3554void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3556 "Call to landingpad not in landing pad!");
3560 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3576 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3581 if (
FuncInfo.ExceptionPointerVirtReg) {
3582 Ops[0] =
DAG.getZExtOrTrunc(
3583 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3590 Ops[1] =
DAG.getZExtOrTrunc(
3591 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3598 DAG.getVTList(ValueVTs),
Ops);
3606 if (JTB.first.HeaderBB ==
First)
3607 JTB.first.HeaderBB =
Last;
3620 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3622 bool Inserted =
Done.insert(BB).second;
3627 addSuccessorWithProb(IndirectBrMBB, Succ);
3637 if (!
I.shouldLowerToTrap(
DAG.getTarget().Options.TrapUnreachable,
3638 DAG.getTarget().Options.NoTrapAfterNoreturn))
3644void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3647 Flags.copyFMF(*FPOp);
3655void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3658 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3659 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3662 Flags.setExact(ExactOp->isExact());
3664 Flags.setDisjoint(DisjointOp->isDisjoint());
3666 Flags.copyFMF(*FPOp);
3675void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3679 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
3684 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3686 "Unexpected shift type");
3696 if (
const OverflowingBinaryOperator *OFBinOp =
3698 nuw = OFBinOp->hasNoUnsignedWrap();
3699 nsw = OFBinOp->hasNoSignedWrap();
3701 if (
const PossiblyExactOperator *ExactOp =
3703 exact = ExactOp->isExact();
3706 Flags.setExact(exact);
3707 Flags.setNoSignedWrap(nsw);
3708 Flags.setNoUnsignedWrap(nuw);
3714void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3725void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3731 auto &TLI =
DAG.getTargetLoweringInfo();
3744 Flags.setSameSign(
I.hasSameSign());
3745 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3747 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3752void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3759 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3763 Flags.copyFMF(*FPMO);
3764 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3766 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3775 return isa<SelectInst>(V);
3779void SelectionDAGBuilder::visitSelect(
const User &
I) {
3783 unsigned NumValues = ValueVTs.
size();
3784 if (NumValues == 0)
return;
3794 bool IsUnaryAbs =
false;
3795 bool Negate =
false;
3799 Flags.copyFMF(*FPOp);
3801 Flags.setUnpredictable(
3806 EVT VT = ValueVTs[0];
3807 LLVMContext &Ctx = *
DAG.getContext();
3808 auto &TLI =
DAG.getTargetLoweringInfo();
3818 bool UseScalarMinMax = VT.
isVector() &&
3827 switch (SPR.Flavor) {
3833 switch (SPR.NaNBehavior) {
3846 switch (SPR.NaNBehavior) {
3890 for (
unsigned i = 0; i != NumValues; ++i) {
3896 Values[i] =
DAG.getNegative(Values[i], dl, VT);
3899 for (
unsigned i = 0; i != NumValues; ++i) {
3903 Values[i] =
DAG.getNode(
3910 DAG.getVTList(ValueVTs), Values));
3913void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3916 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3920 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3921 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3927void SelectionDAGBuilder::visitZExt(
const User &
I) {
3931 auto &TLI =
DAG.getTargetLoweringInfo();
3936 Flags.setNonNeg(PNI->hasNonNeg());
3941 if (
Flags.hasNonNeg() &&
3950void SelectionDAGBuilder::visitSExt(
const User &
I) {
3954 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3959void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3965 Flags.copyFMF(*TruncInst);
3966 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3969 DAG.getTargetConstant(
3974void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3977 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3982void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3985 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3990void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3993 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3998void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
4001 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4005 Flags.setNonNeg(PNI->hasNonNeg());
4010void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
4013 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4018void SelectionDAGBuilder::visitPtrToAddr(
const User &
I) {
4021 const auto &TLI =
DAG.getTargetLoweringInfo();
4029void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
4033 auto &TLI =
DAG.getTargetLoweringInfo();
4034 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4043void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
4047 auto &TLI =
DAG.getTargetLoweringInfo();
4055void SelectionDAGBuilder::visitBitCast(
const User &
I) {
4058 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4063 if (DestVT !=
N.getValueType())
4071 setValue(&
I,
DAG.getConstant(
C->getValue(), dl, DestVT,
false,
4077void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
4078 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4079 const Value *SV =
I.getOperand(0);
4084 unsigned DestAS =
I.getType()->getPointerAddressSpace();
4086 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4092void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4093 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4100 InVec, InVal, InIdx));
4103void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4104 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4113void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4118 Mask = SVI->getShuffleMask();
4122 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4126 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4131 DAG.getVectorIdxConstant(0,
DL));
4142 unsigned MaskNumElts =
Mask.size();
4144 if (SrcNumElts == MaskNumElts) {
4150 if (SrcNumElts < MaskNumElts) {
4154 if (MaskNumElts % SrcNumElts == 0) {
4158 unsigned NumConcat = MaskNumElts / SrcNumElts;
4159 bool IsConcat =
true;
4160 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4161 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4167 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4168 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4169 ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts))) {
4174 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4181 for (
auto Src : ConcatSrcs) {
4194 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4195 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4211 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4212 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4214 if (Idx >= (
int)SrcNumElts)
4215 Idx -= SrcNumElts - PaddedMaskNumElts;
4223 if (MaskNumElts != PaddedMaskNumElts)
4225 DAG.getVectorIdxConstant(0,
DL));
4231 assert(SrcNumElts > MaskNumElts);
4235 int StartIdx[2] = {-1, -1};
4236 bool CanExtract =
true;
4237 for (
int Idx : Mask) {
4242 if (Idx >= (
int)SrcNumElts) {
4250 int NewStartIdx =
alignDown(Idx, MaskNumElts);
4251 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4252 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4256 StartIdx[Input] = NewStartIdx;
4259 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4265 for (
unsigned Input = 0; Input < 2; ++Input) {
4266 SDValue &Src = Input == 0 ? Src1 : Src2;
4267 if (StartIdx[Input] < 0)
4268 Src =
DAG.getUNDEF(VT);
4271 DAG.getVectorIdxConstant(StartIdx[Input],
DL));
4276 SmallVector<int, 8> MappedOps(Mask);
4277 for (
int &Idx : MappedOps) {
4278 if (Idx >= (
int)SrcNumElts)
4279 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4284 setValue(&
I,
DAG.getVectorShuffle(VT,
DL, Src1, Src2, MappedOps));
4293 for (
int Idx : Mask) {
4297 Res =
DAG.getUNDEF(EltVT);
4299 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4300 if (Idx >= (
int)SrcNumElts) Idx -= SrcNumElts;
4303 DAG.getVectorIdxConstant(Idx,
DL));
4313 ArrayRef<unsigned> Indices =
I.getIndices();
4314 const Value *Op0 =
I.getOperand(0);
4316 Type *AggTy =
I.getType();
4323 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4329 unsigned NumAggValues = AggValueVTs.
size();
4330 unsigned NumValValues = ValValueVTs.
size();
4334 if (!NumAggValues) {
4342 for (; i != LinearIndex; ++i)
4343 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4348 for (; i != LinearIndex + NumValValues; ++i)
4349 Values[i] = FromUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4353 for (; i != NumAggValues; ++i)
4354 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4358 DAG.getVTList(AggValueVTs), Values));
4362 ArrayRef<unsigned> Indices =
I.getIndices();
4363 const Value *Op0 =
I.getOperand(0);
4365 Type *ValTy =
I.getType();
4370 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4374 unsigned NumValValues = ValValueVTs.
size();
4377 if (!NumValValues) {
4386 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4387 Values[i - LinearIndex] =
4393 DAG.getVTList(ValValueVTs), Values));
4396void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4397 Value *Op0 =
I.getOperand(0);
4403 auto &TLI =
DAG.getTargetLoweringInfo();
4408 bool IsVectorGEP =
I.getType()->isVectorTy();
4409 ElementCount VectorElementCount =
4415 const Value *Idx = GTI.getOperand();
4416 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4421 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(
Field);
4431 N =
DAG.getMemBasePlusOffset(
4432 N,
DAG.getConstant(
Offset, dl,
N.getValueType()), dl, Flags);
4438 unsigned IdxSize =
DAG.getDataLayout().getIndexSizeInBits(AS);
4440 TypeSize ElementSize =
4441 GTI.getSequentialElementStride(
DAG.getDataLayout());
4446 bool ElementScalable = ElementSize.
isScalable();
4452 C =
C->getSplatValue();
4455 if (CI && CI->isZero())
4457 if (CI && !ElementScalable) {
4458 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4461 if (
N.getValueType().isVector())
4462 OffsVal =
DAG.getConstant(
4465 OffsVal =
DAG.getConstant(Offs, dl, IdxTy);
4472 Flags.setNoUnsignedWrap(
true);
4475 OffsVal =
DAG.getSExtOrTrunc(OffsVal, dl,
N.getValueType());
4477 N =
DAG.getMemBasePlusOffset(
N, OffsVal, dl, Flags);
4485 if (
N.getValueType().isVector()) {
4487 VectorElementCount);
4488 IdxN =
DAG.getSplat(VT, dl, IdxN);
4492 N =
DAG.getSplat(VT, dl,
N);
4498 IdxN =
DAG.getSExtOrTrunc(IdxN, dl,
N.getValueType());
4500 SDNodeFlags ScaleFlags;
4509 if (ElementScalable) {
4510 EVT VScaleTy =
N.getValueType().getScalarType();
4512 ISD::VSCALE, dl, VScaleTy,
4513 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4514 if (
N.getValueType().isVector())
4515 VScale =
DAG.getSplatVector(
N.getValueType(), dl, VScale);
4516 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, VScale,
4521 if (ElementMul != 1) {
4522 if (ElementMul.isPowerOf2()) {
4523 unsigned Amt = ElementMul.logBase2();
4526 DAG.getShiftAmountConstant(Amt,
N.getValueType(), dl),
4529 SDValue Scale =
DAG.getConstant(ElementMul.getZExtValue(), dl,
4531 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, Scale,
4541 SDNodeFlags AddFlags;
4545 N =
DAG.getMemBasePlusOffset(
N, IdxN, dl, AddFlags);
4549 if (IsVectorGEP && !
N.getValueType().isVector()) {
4551 N =
DAG.getSplat(VT, dl,
N);
4562 N =
DAG.getPtrExtendInReg(
N, dl, PtrMemTy);
4567void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4574 Type *Ty =
I.getAllocatedType();
4575 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4576 auto &
DL =
DAG.getDataLayout();
4577 TypeSize TySize =
DL.getTypeAllocSize(Ty);
4578 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4584 AllocSize =
DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4587 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4588 DAG.getVScale(dl, IntPtr,
4594 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4595 DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4601 Align StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlign();
4602 if (*Alignment <= StackAlign)
4603 Alignment = std::nullopt;
4605 const uint64_t StackAlignMask = StackAlign.
value() - 1U;
4610 DAG.getConstant(StackAlignMask, dl, IntPtr),
4615 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4619 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4621 SDValue DSA =
DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs,
Ops);
4629 return I.getMetadata(LLVMContext::MD_range);
4634 if (std::optional<ConstantRange> CR = CB->getRange())
4638 return std::nullopt;
4643 return CB->getRetNoFPClass();
4647void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4649 return visitAtomicLoad(
I);
4651 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4652 const Value *SV =
I.getOperand(0);
4657 if (Arg->hasSwiftErrorAttr())
4658 return visitLoadFromSwiftError(
I);
4662 if (Alloca->isSwiftError())
4663 return visitLoadFromSwiftError(
I);
4669 Type *Ty =
I.getType();
4673 unsigned NumValues = ValueVTs.
size();
4677 Align Alignment =
I.getAlign();
4678 AAMDNodes AAInfo =
I.getAAMetadata();
4680 bool isVolatile =
I.isVolatile();
4685 bool ConstantMemory =
false;
4692 BatchAA->pointsToConstantMemory(MemoryLocation(
4697 Root =
DAG.getEntryNode();
4698 ConstantMemory =
true;
4702 Root =
DAG.getRoot();
4713 unsigned ChainI = 0;
4714 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4730 MachinePointerInfo PtrInfo =
4732 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4733 : MachinePointerInfo();
4736 SDValue L =
DAG.getLoad(MemVTs[i], dl, Root,
A, PtrInfo, Alignment,
4737 MMOFlags, AAInfo, Ranges);
4738 Chains[ChainI] =
L.getValue(1);
4740 if (MemVTs[i] != ValueVTs[i])
4741 L =
DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4746 if (!ConstantMemory) {
4756 DAG.getVTList(ValueVTs), Values));
4759void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4760 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4761 "call visitStoreToSwiftError when backend supports swifterror");
4764 SmallVector<uint64_t, 4>
Offsets;
4765 const Value *SrcV =
I.getOperand(0);
4767 SrcV->
getType(), ValueVTs,
nullptr, &Offsets, 0);
4768 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4769 "expect a single EVT for swifterror");
4778 SDValue(Src.getNode(), Src.getResNo()));
4779 DAG.setRoot(CopyNode);
4782void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4783 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4784 "call visitLoadFromSwiftError when backend supports swifterror");
4787 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4788 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4789 "Support volatile, non temporal, invariant for load_from_swift_error");
4791 const Value *SV =
I.getOperand(0);
4792 Type *Ty =
I.getType();
4795 !
BatchAA->pointsToConstantMemory(MemoryLocation(
4797 I.getAAMetadata()))) &&
4798 "load_from_swift_error should not be constant memory");
4801 SmallVector<uint64_t, 4>
Offsets;
4803 ValueVTs,
nullptr, &Offsets, 0);
4804 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4805 "expect a single EVT for swifterror");
4815void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4817 return visitAtomicStore(
I);
4819 const Value *SrcV =
I.getOperand(0);
4820 const Value *PtrV =
I.getOperand(1);
4822 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4827 if (Arg->hasSwiftErrorAttr())
4828 return visitStoreToSwiftError(
I);
4832 if (Alloca->isSwiftError())
4833 return visitStoreToSwiftError(
I);
4840 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4841 unsigned NumValues = ValueVTs.
size();
4854 Align Alignment =
I.getAlign();
4855 AAMDNodes AAInfo =
I.getAAMetadata();
4859 unsigned ChainI = 0;
4860 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4870 MachinePointerInfo PtrInfo =
4872 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4873 : MachinePointerInfo();
4877 if (MemVTs[i] != ValueVTs[i])
4878 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4880 DAG.getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4881 Chains[ChainI] = St;
4887 DAG.setRoot(StoreNode);
4890void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4891 bool IsCompressing) {
4894 Value *Src0Operand =
I.getArgOperand(0);
4895 Value *PtrOperand =
I.getArgOperand(1);
4896 Value *MaskOperand =
I.getArgOperand(2);
4897 Align Alignment =
I.getParamAlign(1).valueOrOne();
4907 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4910 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4911 MachinePointerInfo(PtrOperand), MMOFlags,
4914 const auto &TLI =
DAG.getTargetLoweringInfo();
4919 I.getArgOperand(0)->getType(),
true)
4925 DAG.setRoot(StoreNode);
4951 assert(
Ptr->getType()->isVectorTy() &&
"Unexpected pointer type");
4955 C =
C->getSplatValue();
4969 if (!
GEP ||
GEP->getParent() != CurBB)
4972 if (
GEP->getNumOperands() != 2)
4975 const Value *BasePtr =
GEP->getPointerOperand();
4976 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
4982 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
4987 if (ScaleVal != 1 &&
4999void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
5003 const Value *
Ptr =
I.getArgOperand(1);
5007 Align Alignment =
I.getParamAlign(1).valueOrOne();
5008 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5016 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
5017 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5027 EVT IdxVT =
Index.getValueType();
5035 SDValue Scatter =
DAG.getMaskedScatter(
DAG.getVTList(MVT::Other), VT, sdl,
5037 DAG.setRoot(Scatter);
5041void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
5044 Value *PtrOperand =
I.getArgOperand(0);
5045 Value *MaskOperand =
I.getArgOperand(1);
5046 Value *Src0Operand =
I.getArgOperand(2);
5047 Align Alignment =
I.getParamAlign(0).valueOrOne();
5055 AAMDNodes AAInfo =
I.getAAMetadata();
5062 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
5065 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5068 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5069 MachinePointerInfo(PtrOperand), MMOFlags,
5072 const auto &TLI =
DAG.getTargetLoweringInfo();
5084 DAG.getMaskedLoad(VT, sdl, InChain,
Ptr,
Offset, Mask, Src0, VT, MMO,
5091void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5095 const Value *
Ptr =
I.getArgOperand(0);
5099 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5101 Align Alignment =
I.getParamAlign(0).valueOrOne();
5111 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
5112 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5124 EVT IdxVT =
Index.getValueType();
5133 DAG.getMaskedGather(
DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO,
5149 SDVTList VTs =
DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5151 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5154 MachineFunction &MF =
DAG.getMachineFunction();
5156 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5157 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, SuccessOrdering,
5160 SDValue L =
DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
5161 dl, MemVT, VTs, InChain,
5169 DAG.setRoot(OutChain);
5172void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5175 switch (
I.getOperation()) {
5193 NT = ISD::ATOMIC_LOAD_FMAXIMUM;
5196 NT = ISD::ATOMIC_LOAD_FMINIMUM;
5199 NT = ISD::ATOMIC_LOAD_UINC_WRAP;
5202 NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
5205 NT = ISD::ATOMIC_LOAD_USUB_COND;
5208 NT = ISD::ATOMIC_LOAD_USUB_SAT;
5217 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5220 MachineFunction &MF =
DAG.getMachineFunction();
5222 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5223 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, Ordering);
5226 DAG.getAtomic(NT, dl, MemVT, InChain,
5233 DAG.setRoot(OutChain);
5236void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5238 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5241 Ops[1] =
DAG.getTargetConstant((
unsigned)
I.getOrdering(), dl,
5243 Ops[2] =
DAG.getTargetConstant(
I.getSyncScopeID(), dl,
5250void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5257 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5268 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5269 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5270 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5280 L =
DAG.getPtrExtOrTrunc(L, dl, VT);
5283 DAG.setRoot(OutChain);
5286void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5294 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5304 MachineFunction &MF =
DAG.getMachineFunction();
5306 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5307 I.getAlign(), AAMDNodes(),
nullptr, SSID, Ordering);
5311 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5315 DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val,
Ptr, MMO);
5318 DAG.setRoot(OutChain);
5326std::pair<bool, bool>
5327SelectionDAGBuilder::getTargetIntrinsicCallProperties(
const CallBase &
I) {
5329 bool HasChain = !
F->doesNotAccessMemory();
5331 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5333 return {HasChain, OnlyLoad};
5337 const CallBase &
I,
bool HasChain,
bool OnlyLoad,
5339 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5346 Ops.push_back(
DAG.getRoot());
5359 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5360 const Value *Arg =
I.getArgOperand(i);
5361 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5369 assert(CI->getBitWidth() <= 64 &&
5370 "large intrinsic immediates not handled");
5371 Ops.push_back(
DAG.getTargetConstant(*CI, SDLoc(), VT));
5378 if (std::optional<OperandBundleUse> Bundle =
5380 Value *Token = Bundle->Inputs[0].get();
5382 assert(
Ops.back().getValueType() != MVT::Glue &&
5383 "Did not expect another glue node here.");
5385 DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
5386 Ops.push_back(ConvControlToken);
5394 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5402 return DAG.getVTList(ValueVTs);
5406SDValue SelectionDAGBuilder::getTargetNonMemIntrinsicNode(
5429 if (
I.getType()->isVoidTy())
5444void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5446 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(
I);
5449 TargetLowering::IntrinsicInfo
Info;
5450 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5451 bool IsTgtMemIntrinsic =
5455 I, HasChain, OnlyLoad, IsTgtMemIntrinsic ? &
Info :
nullptr);
5456 SDVTList VTs = getTargetIntrinsicVTList(
I, HasChain);
5461 Flags.copyFMF(*FPMO);
5462 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
5469 if (IsTgtMemIntrinsic) {
5474 MachinePointerInfo MPI;
5476 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
5477 else if (
Info.fallbackAddressSpace)
5478 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
5479 EVT MemVT =
Info.memVT;
5481 if (
Size.hasValue() && !
Size.getValue())
5483 Align Alignment =
Info.align.value_or(
DAG.getEVTAlign(MemVT));
5484 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5485 MPI,
Info.flags,
Size, Alignment,
I.getAAMetadata(),
nullptr,
5490 Result = getTargetNonMemIntrinsicNode(*
I.getType(), HasChain,
Ops, VTs);
5493 Result = handleTargetIntrinsicRet(
I, HasChain, OnlyLoad, Result);
5509 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32, t2);
5550 SDValue TwoToFractionalPartOfX;
5618 SDValue t13 = DAG.
getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5619 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32,
5627 if (
Op.getValueType() == MVT::f32 &&
5642 return DAG.
getNode(ISD::FEXP, dl,
Op.getValueType(),
Op, Flags);
5651 if (
Op.getValueType() == MVT::f32 &&
5741 return DAG.
getNode(ISD::FLOG, dl,
Op.getValueType(),
Op, Flags);
5750 if (
Op.getValueType() == MVT::f32 &&
5834 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5838 return DAG.
getNode(ISD::FLOG2, dl,
Op.getValueType(),
Op, Flags);
5847 if (
Op.getValueType() == MVT::f32 &&
5924 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5928 return DAG.
getNode(ISD::FLOG10, dl,
Op.getValueType(),
Op, Flags);
5935 if (
Op.getValueType() == MVT::f32 &&
5940 return DAG.
getNode(ISD::FEXP2, dl,
Op.getValueType(),
Op, Flags);
5948 bool IsExp10 =
false;
5949 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5953 IsExp10 = LHSC->isExactlyValue(Ten);
5980 unsigned Val = RHSC->getSExtValue();
6009 CurSquare, CurSquare);
6014 if (RHSC->getSExtValue() < 0)
6028 EVT VT =
LHS.getValueType();
6051 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
6055 Opcode, VT, ScaleInt);
6090 switch (
N.getOpcode()) {
6094 Op.getValueType().getSizeInBits());
6119bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6126 MachineFunction &MF =
DAG.getMachineFunction();
6127 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
6131 auto MakeVRegDbgValue = [&](
Register Reg, DIExpression *FragExpr,
6136 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6143 auto *NewDIExpr = FragExpr;
6150 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6153 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6154 return BuildMI(MF,
DL, Inst, Indirect,
Reg, Variable, FragExpr);
6158 if (Kind == FuncArgumentDbgValueKind::Value) {
6163 if (!IsInEntryBlock)
6179 bool VariableIsFunctionInputArg =
Variable->isParameter() &&
6180 !
DL->getInlinedAt();
6182 if (!IsInPrologue && !VariableIsFunctionInputArg)
6216 if (VariableIsFunctionInputArg) {
6218 if (ArgNo >=
FuncInfo.DescribedArgs.size())
6219 FuncInfo.DescribedArgs.resize(ArgNo + 1,
false);
6220 else if (!IsInPrologue &&
FuncInfo.DescribedArgs.test(ArgNo))
6221 return !NodeMap[
V].getNode();
6226 bool IsIndirect =
false;
6227 std::optional<MachineOperand>
Op;
6229 int FI =
FuncInfo.getArgumentFrameIndex(Arg);
6230 if (FI != std::numeric_limits<int>::max())
6234 if (!
Op &&
N.getNode()) {
6237 if (ArgRegsAndSizes.
size() == 1)
6238 Reg = ArgRegsAndSizes.
front().first;
6241 MachineRegisterInfo &RegInfo = MF.
getRegInfo();
6248 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6252 if (!
Op &&
N.getNode()) {
6256 if (FrameIndexSDNode *FINode =
6266 for (
const auto &RegAndSize : SplitRegs) {
6270 int RegFragmentSizeInBits = RegAndSize.second;
6272 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6275 if (
Offset >= ExprFragmentSizeInBits)
6279 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6280 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6285 Expr,
Offset, RegFragmentSizeInBits);
6286 Offset += RegAndSize.second;
6289 if (!FragmentExpr) {
6290 SDDbgValue *SDV =
DAG.getConstantDbgValue(
6292 DAG.AddDbgValue(SDV,
false);
6295 MachineInstr *NewMI =
6296 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6297 Kind != FuncArgumentDbgValueKind::Value);
6298 FuncInfo.ArgDbgValues.push_back(NewMI);
6305 if (VMI !=
FuncInfo.ValueMap.end()) {
6306 const auto &TLI =
DAG.getTargetLoweringInfo();
6307 RegsForValue RFV(
V->getContext(), TLI,
DAG.getDataLayout(), VMI->second,
6308 V->getType(), std::nullopt);
6309 if (RFV.occupiesMultipleRegs()) {
6310 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6315 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6316 }
else if (ArgRegsAndSizes.
size() > 1) {
6319 splitMultiRegDbgValue(ArgRegsAndSizes);
6328 "Expected inlined-at fields to agree");
6329 MachineInstr *NewMI =
nullptr;
6332 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6334 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6338 FuncInfo.ArgDbgValues.push_back(NewMI);
6347 unsigned DbgSDNodeOrder) {
6359 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6360 false, dl, DbgSDNodeOrder);
6362 return DAG.getDbgValue(Variable, Expr,
N.getNode(),
N.getResNo(),
6363 false, dl, DbgSDNodeOrder);
6368 case Intrinsic::smul_fix:
6370 case Intrinsic::umul_fix:
6372 case Intrinsic::smul_fix_sat:
6374 case Intrinsic::umul_fix_sat:
6376 case Intrinsic::sdiv_fix:
6378 case Intrinsic::udiv_fix:
6380 case Intrinsic::sdiv_fix_sat:
6382 case Intrinsic::udiv_fix_sat:
6395 "expected call_preallocated_setup Value");
6396 for (
const auto *U : PreallocatedSetup->
users()) {
6398 const Function *Fn = UseCall->getCalledFunction();
6399 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6409bool SelectionDAGBuilder::visitEntryValueDbgValue(
6419 auto ArgIt =
FuncInfo.ValueMap.find(Arg);
6420 if (ArgIt ==
FuncInfo.ValueMap.end()) {
6422 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6423 "couldn't find an associated register for the Argument\n");
6426 Register ArgVReg = ArgIt->getSecond();
6428 for (
auto [PhysReg, VirtReg] :
FuncInfo.RegInfo->liveins())
6429 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6430 SDDbgValue *SDV =
DAG.getVRegDbgValue(
6431 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6432 DAG.AddDbgValue(SDV,
false );
6435 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6436 "couldn't find a physical register\n");
6441void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6444 switch (Intrinsic) {
6445 case Intrinsic::experimental_convergence_anchor:
6446 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
6448 case Intrinsic::experimental_convergence_entry:
6449 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
6451 case Intrinsic::experimental_convergence_loop: {
6453 auto *Token = Bundle->Inputs[0].get();
6454 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
6461void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6462 unsigned IntrinsicID) {
6465 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6466 "Tried to lower unsupported histogram type");
6468 Value *
Ptr =
I.getOperand(0);
6472 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6473 DataLayout TargetDL =
DAG.getDataLayout();
6475 Align Alignment =
DAG.getEVTAlign(VT);
6486 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
6488 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
6489 MachinePointerInfo(AS),
6500 EVT IdxVT =
Index.getValueType();
6507 SDValue ID =
DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6510 SDValue Histogram =
DAG.getMaskedHistogram(
DAG.getVTList(MVT::Other), VT, sdl,
6514 DAG.setRoot(Histogram);
6517void SelectionDAGBuilder::visitVectorExtractLastActive(
const CallInst &
I,
6519 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6520 "Tried lowering invalid vector extract last");
6522 const DataLayout &Layout =
DAG.getDataLayout();
6526 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6530 SDValue Idx =
DAG.getNode(ISD::VECTOR_FIND_LAST_ACTIVE, sdl, ExtVT, Mask);
6536 EVT BoolVT =
Mask.getValueType().getScalarType();
6537 SDValue AnyActive =
DAG.getNode(ISD::VECREDUCE_OR, sdl, BoolVT, Mask);
6538 Result =
DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6545void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6547 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6554 Flags.copyFMF(*FPOp);
6556 switch (Intrinsic) {
6559 visitTargetIntrinsic(
I, Intrinsic);
6561 case Intrinsic::vscale: {
6566 case Intrinsic::vastart: visitVAStart(
I);
return;
6567 case Intrinsic::vaend: visitVAEnd(
I);
return;
6568 case Intrinsic::vacopy: visitVACopy(
I);
return;
6569 case Intrinsic::returnaddress:
6574 case Intrinsic::addressofreturnaddress:
6579 case Intrinsic::sponentry:
6584 case Intrinsic::frameaddress:
6589 case Intrinsic::read_volatile_register:
6590 case Intrinsic::read_register: {
6591 Value *
Reg =
I.getArgOperand(0);
6597 DAG.getVTList(VT, MVT::Other), Chain,
RegName);
6602 case Intrinsic::write_register: {
6603 Value *
Reg =
I.getArgOperand(0);
6604 Value *RegValue =
I.getArgOperand(1);
6612 case Intrinsic::memcpy:
6613 case Intrinsic::memcpy_inline: {
6619 "memcpy_inline needs constant size");
6621 Align DstAlign = MCI.getDestAlign().valueOrOne();
6622 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6623 Align Alignment = std::min(DstAlign, SrcAlign);
6624 bool isVol = MCI.isVolatile();
6628 SDValue MC =
DAG.getMemcpy(Root, sdl, Dst, Src,
Size, Alignment, isVol,
6629 MCI.isForceInlined(), &
I, std::nullopt,
6630 MachinePointerInfo(
I.getArgOperand(0)),
6631 MachinePointerInfo(
I.getArgOperand(1)),
6633 updateDAGForMaybeTailCall(MC);
6636 case Intrinsic::memset:
6637 case Intrinsic::memset_inline: {
6643 "memset_inline needs constant size");
6645 Align DstAlign = MSII.getDestAlign().valueOrOne();
6646 bool isVol = MSII.isVolatile();
6649 Root, sdl, Dst, Value,
Size, DstAlign, isVol, MSII.isForceInlined(),
6650 &
I, MachinePointerInfo(
I.getArgOperand(0)),
I.getAAMetadata());
6651 updateDAGForMaybeTailCall(MC);
6654 case Intrinsic::memmove: {
6660 Align DstAlign = MMI.getDestAlign().valueOrOne();
6661 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6662 Align Alignment = std::min(DstAlign, SrcAlign);
6663 bool isVol = MMI.isVolatile();
6667 SDValue MM =
DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &
I,
6669 MachinePointerInfo(
I.getArgOperand(0)),
6670 MachinePointerInfo(
I.getArgOperand(1)),
6672 updateDAGForMaybeTailCall(MM);
6675 case Intrinsic::memcpy_element_unordered_atomic: {
6681 Type *LengthTy =
MI.getLength()->getType();
6682 unsigned ElemSz =
MI.getElementSizeInBytes();
6686 isTC, MachinePointerInfo(
MI.getRawDest()),
6687 MachinePointerInfo(
MI.getRawSource()));
6688 updateDAGForMaybeTailCall(MC);
6691 case Intrinsic::memmove_element_unordered_atomic: {
6697 Type *LengthTy =
MI.getLength()->getType();
6698 unsigned ElemSz =
MI.getElementSizeInBytes();
6702 isTC, MachinePointerInfo(
MI.getRawDest()),
6703 MachinePointerInfo(
MI.getRawSource()));
6704 updateDAGForMaybeTailCall(MC);
6707 case Intrinsic::memset_element_unordered_atomic: {
6713 Type *LengthTy =
MI.getLength()->getType();
6714 unsigned ElemSz =
MI.getElementSizeInBytes();
6718 isTC, MachinePointerInfo(
MI.getRawDest()));
6719 updateDAGForMaybeTailCall(MC);
6722 case Intrinsic::call_preallocated_setup: {
6724 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6725 SDValue Res =
DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6731 case Intrinsic::call_preallocated_arg: {
6733 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6740 ISD::PREALLOCATED_ARG, sdl,
6747 case Intrinsic::eh_typeid_for: {
6750 unsigned TypeID =
DAG.getMachineFunction().getTypeIDFor(GV);
6751 Res =
DAG.getConstant(
TypeID, sdl, MVT::i32);
6756 case Intrinsic::eh_return_i32:
6757 case Intrinsic::eh_return_i64:
6758 DAG.getMachineFunction().setCallsEHReturn(
true);
6765 case Intrinsic::eh_unwind_init:
6766 DAG.getMachineFunction().setCallsUnwindInit(
true);
6768 case Intrinsic::eh_dwarf_cfa:
6773 case Intrinsic::eh_sjlj_callsite: {
6775 assert(
FuncInfo.getCurrentCallSite() == 0 &&
"Overlapping call sites!");
6780 case Intrinsic::eh_sjlj_functioncontext: {
6782 MachineFrameInfo &MFI =
DAG.getMachineFunction().getFrameInfo();
6785 int FI =
FuncInfo.StaticAllocaMap[FnCtx];
6789 case Intrinsic::eh_sjlj_setjmp: {
6794 DAG.getVTList(MVT::i32, MVT::Other),
Ops);
6796 DAG.setRoot(
Op.getValue(1));
6799 case Intrinsic::eh_sjlj_longjmp:
6803 case Intrinsic::eh_sjlj_setup_dispatch:
6807 case Intrinsic::masked_gather:
6808 visitMaskedGather(
I);
6810 case Intrinsic::masked_load:
6813 case Intrinsic::masked_scatter:
6814 visitMaskedScatter(
I);
6816 case Intrinsic::masked_store:
6817 visitMaskedStore(
I);
6819 case Intrinsic::masked_expandload:
6820 visitMaskedLoad(
I,
true );
6822 case Intrinsic::masked_compressstore:
6823 visitMaskedStore(
I,
true );
6825 case Intrinsic::powi:
6829 case Intrinsic::log:
6832 case Intrinsic::log2:
6836 case Intrinsic::log10:
6840 case Intrinsic::exp:
6843 case Intrinsic::exp2:
6847 case Intrinsic::pow:
6851 case Intrinsic::sqrt:
6852 case Intrinsic::fabs:
6853 case Intrinsic::sin:
6854 case Intrinsic::cos:
6855 case Intrinsic::tan:
6856 case Intrinsic::asin:
6857 case Intrinsic::acos:
6858 case Intrinsic::atan:
6859 case Intrinsic::sinh:
6860 case Intrinsic::cosh:
6861 case Intrinsic::tanh:
6862 case Intrinsic::exp10:
6863 case Intrinsic::floor:
6864 case Intrinsic::ceil:
6865 case Intrinsic::trunc:
6866 case Intrinsic::rint:
6867 case Intrinsic::nearbyint:
6868 case Intrinsic::round:
6869 case Intrinsic::roundeven:
6870 case Intrinsic::canonicalize: {
6873 switch (Intrinsic) {
6875 case Intrinsic::sqrt: Opcode = ISD::FSQRT;
break;
6876 case Intrinsic::fabs: Opcode = ISD::FABS;
break;
6877 case Intrinsic::sin: Opcode = ISD::FSIN;
break;
6878 case Intrinsic::cos: Opcode = ISD::FCOS;
break;
6879 case Intrinsic::tan: Opcode = ISD::FTAN;
break;
6880 case Intrinsic::asin: Opcode = ISD::FASIN;
break;
6881 case Intrinsic::acos: Opcode = ISD::FACOS;
break;
6882 case Intrinsic::atan: Opcode = ISD::FATAN;
break;
6883 case Intrinsic::sinh: Opcode = ISD::FSINH;
break;
6884 case Intrinsic::cosh: Opcode = ISD::FCOSH;
break;
6885 case Intrinsic::tanh: Opcode = ISD::FTANH;
break;
6886 case Intrinsic::exp10: Opcode = ISD::FEXP10;
break;
6887 case Intrinsic::floor: Opcode = ISD::FFLOOR;
break;
6888 case Intrinsic::ceil: Opcode = ISD::FCEIL;
break;
6889 case Intrinsic::trunc: Opcode = ISD::FTRUNC;
break;
6890 case Intrinsic::rint: Opcode = ISD::FRINT;
break;
6891 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT;
break;
6892 case Intrinsic::round: Opcode = ISD::FROUND;
break;
6893 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN;
break;
6899 getValue(
I.getArgOperand(0)).getValueType(),
6903 case Intrinsic::atan2:
6905 getValue(
I.getArgOperand(0)).getValueType(),
6909 case Intrinsic::lround:
6910 case Intrinsic::llround:
6911 case Intrinsic::lrint:
6912 case Intrinsic::llrint: {
6915 switch (Intrinsic) {
6917 case Intrinsic::lround: Opcode = ISD::LROUND;
break;
6918 case Intrinsic::llround: Opcode = ISD::LLROUND;
break;
6919 case Intrinsic::lrint: Opcode = ISD::LRINT;
break;
6920 case Intrinsic::llrint: Opcode = ISD::LLRINT;
break;
6929 case Intrinsic::minnum:
6931 getValue(
I.getArgOperand(0)).getValueType(),
6935 case Intrinsic::maxnum:
6937 getValue(
I.getArgOperand(0)).getValueType(),
6941 case Intrinsic::minimum:
6943 getValue(
I.getArgOperand(0)).getValueType(),
6947 case Intrinsic::maximum:
6949 getValue(
I.getArgOperand(0)).getValueType(),
6953 case Intrinsic::minimumnum:
6955 getValue(
I.getArgOperand(0)).getValueType(),
6959 case Intrinsic::maximumnum:
6961 getValue(
I.getArgOperand(0)).getValueType(),
6965 case Intrinsic::copysign:
6967 getValue(
I.getArgOperand(0)).getValueType(),
6971 case Intrinsic::ldexp:
6973 getValue(
I.getArgOperand(0)).getValueType(),
6977 case Intrinsic::modf:
6978 case Intrinsic::sincos:
6979 case Intrinsic::sincospi:
6980 case Intrinsic::frexp: {
6982 switch (Intrinsic) {
6985 case Intrinsic::sincos:
6986 Opcode = ISD::FSINCOS;
6988 case Intrinsic::sincospi:
6989 Opcode = ISD::FSINCOSPI;
6991 case Intrinsic::modf:
6992 Opcode = ISD::FMODF;
6994 case Intrinsic::frexp:
6995 Opcode = ISD::FFREXP;
7000 SDVTList VTs =
DAG.getVTList(ValueVTs);
7002 &
I,
DAG.getNode(Opcode, sdl, VTs,
getValue(
I.getArgOperand(0)), Flags));
7005 case Intrinsic::arithmetic_fence: {
7007 getValue(
I.getArgOperand(0)).getValueType(),
7011 case Intrinsic::fma:
7017#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
7018 case Intrinsic::INTRINSIC:
7019#include "llvm/IR/ConstrainedOps.def"
7022#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
7023#include "llvm/IR/VPIntrinsics.def"
7026 case Intrinsic::fptrunc_round: {
7030 std::optional<RoundingMode> RoundMode =
7038 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
7043 DAG.getTargetConstant((
int)*RoundMode, sdl, MVT::i32));
7048 case Intrinsic::fmuladd: {
7053 getValue(
I.getArgOperand(0)).getValueType(),
7060 getValue(
I.getArgOperand(0)).getValueType(),
7076 case Intrinsic::convert_to_fp16:
7080 DAG.getTargetConstant(0, sdl,
7083 case Intrinsic::convert_from_fp16:
7086 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
7089 case Intrinsic::fptosi_sat: {
7096 case Intrinsic::fptoui_sat: {
7103 case Intrinsic::set_rounding:
7104 Res =
DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
7109 case Intrinsic::is_fpclass: {
7110 const DataLayout DLayout =
DAG.getDataLayout();
7112 EVT ArgVT = TLI.
getValueType(DLayout,
I.getArgOperand(0)->getType());
7115 MachineFunction &MF =
DAG.getMachineFunction();
7119 Flags.setNoFPExcept(
7120 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7136 case Intrinsic::get_fpenv: {
7137 const DataLayout DLayout =
DAG.getDataLayout();
7139 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7145 ISD::GET_FPENV, sdl,
7154 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7157 Chain =
DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7158 Res =
DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7164 case Intrinsic::set_fpenv: {
7165 const DataLayout DLayout =
DAG.getDataLayout();
7168 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7173 Chain =
DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
7181 Chain =
DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7183 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7186 Chain =
DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7191 case Intrinsic::reset_fpenv:
7192 DAG.setRoot(
DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other,
getRoot()));
7194 case Intrinsic::get_fpmode:
7196 ISD::GET_FPMODE, sdl,
7203 case Intrinsic::set_fpmode:
7204 Res =
DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {
DAG.getRoot()},
7208 case Intrinsic::reset_fpmode: {
7209 Res =
DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other,
getRoot());
7213 case Intrinsic::pcmarker: {
7215 DAG.setRoot(
DAG.getNode(ISD::PCMARKER, sdl, MVT::Other,
getRoot(), Tmp));
7218 case Intrinsic::readcyclecounter: {
7220 Res =
DAG.getNode(ISD::READCYCLECOUNTER, sdl,
7221 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7226 case Intrinsic::readsteadycounter: {
7228 Res =
DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
7229 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7234 case Intrinsic::bitreverse:
7236 getValue(
I.getArgOperand(0)).getValueType(),
7239 case Intrinsic::bswap:
7241 getValue(
I.getArgOperand(0)).getValueType(),
7244 case Intrinsic::cttz: {
7252 case Intrinsic::ctlz: {
7260 case Intrinsic::ctpop: {
7266 case Intrinsic::fshl:
7267 case Intrinsic::fshr: {
7268 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7272 EVT VT =
X.getValueType();
7283 case Intrinsic::sadd_sat: {
7289 case Intrinsic::uadd_sat: {
7295 case Intrinsic::ssub_sat: {
7301 case Intrinsic::usub_sat: {
7307 case Intrinsic::sshl_sat: {
7313 case Intrinsic::ushl_sat: {
7319 case Intrinsic::smul_fix:
7320 case Intrinsic::umul_fix:
7321 case Intrinsic::smul_fix_sat:
7322 case Intrinsic::umul_fix_sat: {
7330 case Intrinsic::sdiv_fix:
7331 case Intrinsic::udiv_fix:
7332 case Intrinsic::sdiv_fix_sat:
7333 case Intrinsic::udiv_fix_sat: {
7338 Op1, Op2, Op3,
DAG, TLI));
7341 case Intrinsic::smax: {
7347 case Intrinsic::smin: {
7353 case Intrinsic::umax: {
7359 case Intrinsic::umin: {
7365 case Intrinsic::abs: {
7371 case Intrinsic::scmp: {
7378 case Intrinsic::ucmp: {
7385 case Intrinsic::stacksave: {
7388 Res =
DAG.getNode(ISD::STACKSAVE, sdl,
DAG.getVTList(VT, MVT::Other),
Op);
7393 case Intrinsic::stackrestore:
7395 DAG.setRoot(
DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other,
getRoot(), Res));
7397 case Intrinsic::get_dynamic_area_offset: {
7400 Res =
DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl,
DAG.getVTList(ResTy),
7406 case Intrinsic::stackguard: {
7407 MachineFunction &MF =
DAG.getMachineFunction();
7413 Res =
DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7417 LLVMContext &Ctx = *
DAG.getContext();
7418 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
7425 MachinePointerInfo(
Global, 0), Align,
7434 case Intrinsic::stackprotector: {
7436 MachineFunction &MF =
DAG.getMachineFunction();
7456 Chain, sdl, Src, FIN,
7463 case Intrinsic::objectsize:
7466 case Intrinsic::is_constant:
7469 case Intrinsic::annotation:
7470 case Intrinsic::ptr_annotation:
7471 case Intrinsic::launder_invariant_group:
7472 case Intrinsic::strip_invariant_group:
7477 case Intrinsic::type_test:
7478 case Intrinsic::public_type_test:
7482 case Intrinsic::assume:
7483 case Intrinsic::experimental_noalias_scope_decl:
7484 case Intrinsic::var_annotation:
7485 case Intrinsic::sideeffect:
7490 case Intrinsic::codeview_annotation: {
7492 MachineFunction &MF =
DAG.getMachineFunction();
7496 Res =
DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl,
getRoot(), Label);
7501 case Intrinsic::init_trampoline: {
7509 Ops[4] =
DAG.getSrcValue(
I.getArgOperand(0));
7512 Res =
DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other,
Ops);
7517 case Intrinsic::adjust_trampoline:
7522 case Intrinsic::gcroot: {
7523 assert(
DAG.getMachineFunction().getFunction().hasGC() &&
7524 "only valid in functions with gc specified, enforced by Verifier");
7526 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7533 case Intrinsic::gcread:
7534 case Intrinsic::gcwrite:
7536 case Intrinsic::get_rounding:
7542 case Intrinsic::expect:
7543 case Intrinsic::expect_with_probability:
7549 case Intrinsic::ubsantrap:
7550 case Intrinsic::debugtrap:
7551 case Intrinsic::trap: {
7552 StringRef TrapFuncName =
7553 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7554 if (TrapFuncName.
empty()) {
7555 switch (Intrinsic) {
7556 case Intrinsic::trap:
7557 DAG.setRoot(
DAG.getNode(ISD::TRAP, sdl, MVT::Other,
getRoot()));
7559 case Intrinsic::debugtrap:
7560 DAG.setRoot(
DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other,
getRoot()));
7562 case Intrinsic::ubsantrap:
7564 ISD::UBSANTRAP, sdl, MVT::Other,
getRoot(),
7565 DAG.getTargetConstant(
7571 DAG.addNoMergeSiteInfo(
DAG.getRoot().getNode(),
7572 I.hasFnAttr(Attribute::NoMerge));
7576 if (Intrinsic == Intrinsic::ubsantrap) {
7577 Value *Arg =
I.getArgOperand(0);
7581 TargetLowering::CallLoweringInfo CLI(
DAG);
7582 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7584 DAG.getExternalSymbol(TrapFuncName.
data(),
7587 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7593 case Intrinsic::allow_runtime_check:
7594 case Intrinsic::allow_ubsan_check:
7598 case Intrinsic::uadd_with_overflow:
7599 case Intrinsic::sadd_with_overflow:
7600 case Intrinsic::usub_with_overflow:
7601 case Intrinsic::ssub_with_overflow:
7602 case Intrinsic::umul_with_overflow:
7603 case Intrinsic::smul_with_overflow: {
7605 switch (Intrinsic) {
7607 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7608 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7609 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7610 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7611 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7612 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7618 EVT OverflowVT = MVT::i1;
7623 SDVTList VTs =
DAG.getVTList(ResultVT, OverflowVT);
7627 case Intrinsic::prefetch: {
7640 ISD::PREFETCH, sdl,
DAG.getVTList(MVT::Other),
Ops,
7642 std::nullopt, Flags);
7648 DAG.setRoot(Result);
7651 case Intrinsic::lifetime_start:
7652 case Intrinsic::lifetime_end: {
7653 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7659 if (!LifetimeObject)
7664 auto SI =
FuncInfo.StaticAllocaMap.find(LifetimeObject);
7665 if (SI ==
FuncInfo.StaticAllocaMap.end())
7669 Res =
DAG.getLifetimeNode(IsStart, sdl,
getRoot(), FrameIndex);
7673 case Intrinsic::pseudoprobe: {
7681 case Intrinsic::invariant_start:
7686 case Intrinsic::invariant_end:
7689 case Intrinsic::clear_cache: {
7694 {InputChain, StartVal, EndVal});
7699 case Intrinsic::donothing:
7700 case Intrinsic::seh_try_begin:
7701 case Intrinsic::seh_scope_begin:
7702 case Intrinsic::seh_try_end:
7703 case Intrinsic::seh_scope_end:
7706 case Intrinsic::experimental_stackmap:
7709 case Intrinsic::experimental_patchpoint_void:
7710 case Intrinsic::experimental_patchpoint:
7713 case Intrinsic::experimental_gc_statepoint:
7716 case Intrinsic::experimental_gc_result:
7719 case Intrinsic::experimental_gc_relocate:
7722 case Intrinsic::instrprof_cover:
7724 case Intrinsic::instrprof_increment:
7726 case Intrinsic::instrprof_timestamp:
7728 case Intrinsic::instrprof_value_profile:
7730 case Intrinsic::instrprof_mcdc_parameters:
7732 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7734 case Intrinsic::localescape: {
7735 MachineFunction &MF =
DAG.getMachineFunction();
7736 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
7740 for (
unsigned Idx = 0,
E =
I.arg_size(); Idx <
E; ++Idx) {
7746 "can only escape static allocas");
7751 TII->get(TargetOpcode::LOCAL_ESCAPE))
7759 case Intrinsic::localrecover: {
7761 MachineFunction &MF =
DAG.getMachineFunction();
7767 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7771 Value *
FP =
I.getArgOperand(1);
7777 SDValue OffsetSym =
DAG.getMCSymbol(FrameAllocSym, PtrVT);
7782 SDValue Add =
DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7788 case Intrinsic::fake_use: {
7789 Value *
V =
I.getArgOperand(0);
7794 auto FakeUseValue = [&]() ->
SDValue {
7808 if (!FakeUseValue || FakeUseValue.isUndef())
7811 Ops[1] = FakeUseValue;
7816 DAG.setRoot(
DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other,
Ops));
7820 case Intrinsic::reloc_none: {
7825 DAG.getTargetExternalSymbol(
7827 DAG.setRoot(
DAG.getNode(ISD::RELOC_NONE, sdl, MVT::Other,
Ops));
7831 case Intrinsic::eh_exceptionpointer:
7832 case Intrinsic::eh_exceptioncode: {
7838 SDValue N =
DAG.getCopyFromReg(
DAG.getEntryNode(), sdl, VReg, PtrVT);
7839 if (Intrinsic == Intrinsic::eh_exceptioncode)
7840 N =
DAG.getZExtOrTrunc(
N, sdl, MVT::i32);
7844 case Intrinsic::xray_customevent: {
7847 const auto &Triple =
DAG.getTarget().getTargetTriple();
7856 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7858 Ops.push_back(LogEntryVal);
7859 Ops.push_back(StrSizeVal);
7860 Ops.push_back(Chain);
7866 MachineSDNode *MN =
DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7869 DAG.setRoot(patchableNode);
7873 case Intrinsic::xray_typedevent: {
7876 const auto &Triple =
DAG.getTarget().getTargetTriple();
7888 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7890 Ops.push_back(LogTypeId);
7891 Ops.push_back(LogEntryVal);
7892 Ops.push_back(StrSizeVal);
7893 Ops.push_back(Chain);
7899 MachineSDNode *MN =
DAG.getMachineNode(
7900 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys,
Ops);
7902 DAG.setRoot(patchableNode);
7906 case Intrinsic::experimental_deoptimize:
7909 case Intrinsic::stepvector:
7912 case Intrinsic::vector_reduce_fadd:
7913 case Intrinsic::vector_reduce_fmul:
7914 case Intrinsic::vector_reduce_add:
7915 case Intrinsic::vector_reduce_mul:
7916 case Intrinsic::vector_reduce_and:
7917 case Intrinsic::vector_reduce_or:
7918 case Intrinsic::vector_reduce_xor:
7919 case Intrinsic::vector_reduce_smax:
7920 case Intrinsic::vector_reduce_smin:
7921 case Intrinsic::vector_reduce_umax:
7922 case Intrinsic::vector_reduce_umin:
7923 case Intrinsic::vector_reduce_fmax:
7924 case Intrinsic::vector_reduce_fmin:
7925 case Intrinsic::vector_reduce_fmaximum:
7926 case Intrinsic::vector_reduce_fminimum:
7927 visitVectorReduce(
I, Intrinsic);
7930 case Intrinsic::icall_branch_funnel: {
7936 I.getArgOperand(1),
Offset,
DAG.getDataLayout()));
7939 "llvm.icall.branch.funnel operand must be a GlobalValue");
7940 Ops.push_back(
DAG.getTargetGlobalAddress(
Base, sdl, MVT::i64, 0));
7942 struct BranchFunnelTarget {
7948 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7951 if (ElemBase !=
Base)
7953 "to the same GlobalValue");
7959 "llvm.icall.branch.funnel operand must be a GlobalValue");
7965 [](
const BranchFunnelTarget &
T1,
const BranchFunnelTarget &T2) {
7966 return T1.Offset < T2.Offset;
7969 for (
auto &
T : Targets) {
7970 Ops.push_back(
DAG.getTargetConstant(
T.Offset, sdl, MVT::i32));
7971 Ops.push_back(
T.Target);
7974 Ops.push_back(
DAG.getRoot());
7975 SDValue N(
DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7984 case Intrinsic::wasm_landingpad_index:
7990 case Intrinsic::aarch64_settag:
7991 case Intrinsic::aarch64_settag_zero: {
7992 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
7993 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
7996 getValue(
I.getArgOperand(1)), MachinePointerInfo(
I.getArgOperand(0)),
8002 case Intrinsic::amdgcn_cs_chain: {
8007 Type *RetTy =
I.getType();
8017 for (
unsigned Idx : {2, 3, 1}) {
8018 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8020 Arg.setAttributes(&
I, Idx);
8021 Args.push_back(Arg);
8024 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
8025 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
8026 Args[2].IsInReg =
true;
8029 for (
unsigned Idx = 4; Idx <
I.arg_size(); ++Idx) {
8030 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8032 Arg.setAttributes(&
I, Idx);
8033 Args.push_back(Arg);
8036 TargetLowering::CallLoweringInfo CLI(
DAG);
8039 .setCallee(CC, RetTy, Callee, std::move(Args))
8042 .setConvergent(
I.isConvergent());
8044 std::pair<SDValue, SDValue>
Result =
8048 "Should've lowered as tail call");
8053 case Intrinsic::amdgcn_call_whole_wave: {
8055 bool isTailCall =
I.isTailCall();
8058 for (
unsigned Idx = 1; Idx <
I.arg_size(); ++Idx) {
8059 TargetLowering::ArgListEntry Arg(
getValue(
I.getArgOperand(Idx)),
8060 I.getArgOperand(Idx)->getType());
8061 Arg.setAttributes(&
I, Idx);
8068 Args.push_back(Arg);
8073 auto *Token = Bundle->Inputs[0].get();
8074 ConvControlToken =
getValue(Token);
8077 TargetLowering::CallLoweringInfo CLI(
DAG);
8081 getValue(
I.getArgOperand(0)), std::move(Args))
8085 .setConvergent(
I.isConvergent())
8086 .setConvergenceControlToken(ConvControlToken);
8089 std::pair<SDValue, SDValue>
Result =
8092 if (
Result.first.getNode())
8096 case Intrinsic::ptrmask: {
8112 auto HighOnes =
DAG.getNode(
8113 ISD::SHL, sdl, PtrVT,
DAG.getAllOnesConstant(sdl, PtrVT),
8114 DAG.getShiftAmountConstant(
Mask.getValueType().getFixedSizeInBits(),
8117 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8118 }
else if (
Mask.getValueType() != PtrVT)
8119 Mask =
DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8125 case Intrinsic::threadlocal_address: {
8129 case Intrinsic::get_active_lane_mask: {
8133 EVT ElementVT =
Index.getValueType();
8136 setValue(&
I,
DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, sdl, CCVT, Index,
8144 SDValue VectorIndex =
DAG.getSplat(VecTy, sdl, Index);
8145 SDValue VectorTripCount =
DAG.getSplat(VecTy, sdl, TripCount);
8146 SDValue VectorStep =
DAG.getStepVector(sdl, VecTy);
8149 SDValue SetCC =
DAG.getSetCC(sdl, CCVT, VectorInduction,
8154 case Intrinsic::experimental_get_vector_length: {
8156 "Expected positive VF");
8161 EVT CountVT =
Count.getValueType();
8164 visitTargetIntrinsic(
I, Intrinsic);
8173 if (CountVT.
bitsLT(VT)) {
8178 SDValue MaxEVL =
DAG.getElementCount(sdl, CountVT,
8188 case Intrinsic::vector_partial_reduce_add: {
8196 case Intrinsic::vector_partial_reduce_fadd: {
8200 ISD::PARTIAL_REDUCE_FMLA, sdl, Acc.
getValueType(), Acc,
8204 case Intrinsic::experimental_cttz_elts: {
8207 EVT OpVT =
Op.getValueType();
8210 visitTargetIntrinsic(
I, Intrinsic);
8226 ConstantRange VScaleRange(1,
true);
8255 case Intrinsic::vector_insert: {
8263 if (
Index.getValueType() != VectorIdxTy)
8264 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8271 case Intrinsic::vector_extract: {
8279 if (
Index.getValueType() != VectorIdxTy)
8280 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8286 case Intrinsic::experimental_vector_match: {
8292 EVT ResVT =
Mask.getValueType();
8298 visitTargetIntrinsic(
I, Intrinsic);
8304 for (
unsigned i = 0; i < SearchSize; ++i) {
8307 DAG.getVectorIdxConstant(i, sdl));
8316 case Intrinsic::vector_reverse:
8317 visitVectorReverse(
I);
8319 case Intrinsic::vector_splice:
8320 visitVectorSplice(
I);
8322 case Intrinsic::callbr_landingpad:
8323 visitCallBrLandingPad(
I);
8325 case Intrinsic::vector_interleave2:
8326 visitVectorInterleave(
I, 2);
8328 case Intrinsic::vector_interleave3:
8329 visitVectorInterleave(
I, 3);
8331 case Intrinsic::vector_interleave4:
8332 visitVectorInterleave(
I, 4);
8334 case Intrinsic::vector_interleave5:
8335 visitVectorInterleave(
I, 5);
8337 case Intrinsic::vector_interleave6:
8338 visitVectorInterleave(
I, 6);
8340 case Intrinsic::vector_interleave7:
8341 visitVectorInterleave(
I, 7);
8343 case Intrinsic::vector_interleave8:
8344 visitVectorInterleave(
I, 8);
8346 case Intrinsic::vector_deinterleave2:
8347 visitVectorDeinterleave(
I, 2);
8349 case Intrinsic::vector_deinterleave3:
8350 visitVectorDeinterleave(
I, 3);
8352 case Intrinsic::vector_deinterleave4:
8353 visitVectorDeinterleave(
I, 4);
8355 case Intrinsic::vector_deinterleave5:
8356 visitVectorDeinterleave(
I, 5);
8358 case Intrinsic::vector_deinterleave6:
8359 visitVectorDeinterleave(
I, 6);
8361 case Intrinsic::vector_deinterleave7:
8362 visitVectorDeinterleave(
I, 7);
8364 case Intrinsic::vector_deinterleave8:
8365 visitVectorDeinterleave(
I, 8);
8367 case Intrinsic::experimental_vector_compress:
8369 getValue(
I.getArgOperand(0)).getValueType(),
8374 case Intrinsic::experimental_convergence_anchor:
8375 case Intrinsic::experimental_convergence_entry:
8376 case Intrinsic::experimental_convergence_loop:
8377 visitConvergenceControl(
I, Intrinsic);
8379 case Intrinsic::experimental_vector_histogram_add: {
8380 visitVectorHistogram(
I, Intrinsic);
8383 case Intrinsic::experimental_vector_extract_last_active: {
8384 visitVectorExtractLastActive(
I, Intrinsic);
8387 case Intrinsic::loop_dependence_war_mask:
8393 case Intrinsic::loop_dependence_raw_mask:
8402void SelectionDAGBuilder::pushFPOpOutChain(
SDValue Result,
8418 PendingConstrainedFP.push_back(OutChain);
8421 PendingConstrainedFPStrict.push_back(OutChain);
8426void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8440 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8442 SDVTList VTs =
DAG.getVTList(VT, MVT::Other);
8446 Flags.setNoFPExcept(
true);
8449 Flags.copyFMF(*FPOp);
8454#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8455 case Intrinsic::INTRINSIC: \
8456 Opcode = ISD::STRICT_##DAGN; \
8458#include "llvm/IR/ConstrainedOps.def"
8459 case Intrinsic::experimental_constrained_fmuladd: {
8466 pushFPOpOutChain(
Mul, EB);
8489 if (TM.Options.NoNaNsFPMath)
8497 pushFPOpOutChain(Result, EB);
8504 std::optional<unsigned> ResOPC;
8506 case Intrinsic::vp_ctlz: {
8508 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8511 case Intrinsic::vp_cttz: {
8513 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8516 case Intrinsic::vp_cttz_elts: {
8518 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8521#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8522 case Intrinsic::VPID: \
8523 ResOPC = ISD::VPSD; \
8525#include "llvm/IR/VPIntrinsics.def"
8530 "Inconsistency: no SDNode available for this VPIntrinsic!");
8532 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8533 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8535 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8536 : ISD::VP_REDUCE_FMUL;
8542void SelectionDAGBuilder::visitVPLoad(
8554 Alignment =
DAG.getEVTAlign(VT);
8557 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8558 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8561 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8562 MachinePointerInfo(PtrOperand), MMOFlags,
8564 LD =
DAG.getLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8571void SelectionDAGBuilder::visitVPLoadFF(
8574 assert(OpValues.
size() == 3 &&
"Unexpected number of operands");
8584 Alignment =
DAG.getEVTAlign(VT);
8587 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8588 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8591 LD =
DAG.getLoadFFVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8596 setValue(&VPIntrin,
DAG.getMergeValues({LD.getValue(0), Trunc},
DL));
8599void SelectionDAGBuilder::visitVPGather(
8603 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8615 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8617 *Alignment, AAInfo, Ranges);
8627 EVT IdxVT =
Index.getValueType();
8633 LD =
DAG.getGatherVP(
8634 DAG.getVTList(VT, MVT::Other), VT,
DL,
8635 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8641void SelectionDAGBuilder::visitVPStore(
8645 EVT VT = OpValues[0].getValueType();
8650 Alignment =
DAG.getEVTAlign(VT);
8653 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8656 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8657 MachinePointerInfo(PtrOperand), MMOFlags,
8666void SelectionDAGBuilder::visitVPScatter(
8669 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8671 EVT VT = OpValues[0].getValueType();
8681 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8683 *Alignment, AAInfo);
8693 EVT IdxVT =
Index.getValueType();
8699 ST =
DAG.getScatterVP(
DAG.getVTList(MVT::Other), VT,
DL,
8700 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8701 OpValues[2], OpValues[3]},
8707void SelectionDAGBuilder::visitVPStridedLoad(
8719 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8721 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8724 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8726 *Alignment, AAInfo, Ranges);
8728 SDValue LD =
DAG.getStridedLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1],
8729 OpValues[2], OpValues[3], MMO,
8737void SelectionDAGBuilder::visitVPStridedStore(
8741 EVT VT = OpValues[0].getValueType();
8747 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8750 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8752 *Alignment, AAInfo);
8756 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8764void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8765 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8776 if (TM.Options.NoNaNsFPMath)
8789 "Unexpected target EVL type");
8792 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8795 DAG.getSetCCVP(
DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8798void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8806 return visitVPCmp(*CmpI);
8809 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8811 SDVTList VTs =
DAG.getVTList(ValueVTs);
8817 "Unexpected target EVL type");
8821 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8823 if (
I == EVLParamPos)
8830 SDNodeFlags SDFlags;
8838 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8840 case ISD::VP_LOAD_FF:
8841 visitVPLoadFF(VPIntrin, ValueVTs[0], ValueVTs[1], OpValues);
8843 case ISD::VP_GATHER:
8844 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8846 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8847 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8850 visitVPStore(VPIntrin, OpValues);
8852 case ISD::VP_SCATTER:
8853 visitVPScatter(VPIntrin, OpValues);
8855 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8856 visitVPStridedStore(VPIntrin, OpValues);
8858 case ISD::VP_FMULADD: {
8859 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8860 SDNodeFlags SDFlags;
8865 setValue(&VPIntrin,
DAG.getNode(ISD::VP_FMA,
DL, VTs, OpValues, SDFlags));
8868 ISD::VP_FMUL,
DL, VTs,
8869 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8871 DAG.getNode(ISD::VP_FADD,
DL, VTs,
8872 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8877 case ISD::VP_IS_FPCLASS: {
8878 const DataLayout DLayout =
DAG.getDataLayout();
8880 auto Constant = OpValues[1]->getAsZExtVal();
8883 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8887 case ISD::VP_INTTOPTR: {
8898 case ISD::VP_PTRTOINT: {
8900 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8913 case ISD::VP_CTLZ_ZERO_UNDEF:
8915 case ISD::VP_CTTZ_ZERO_UNDEF:
8916 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8917 case ISD::VP_CTTZ_ELTS: {
8919 DAG.getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8929 MachineFunction &MF =
DAG.getMachineFunction();
8937 unsigned CallSiteIndex =
FuncInfo.getCurrentCallSite();
8938 if (CallSiteIndex) {
8952 assert(BeginLabel &&
"BeginLabel should've been set");
8954 MachineFunction &MF =
DAG.getMachineFunction();
8966 assert(
II &&
"II should've been set");
8977std::pair<SDValue, SDValue>
8991 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
8994 "Non-null chain expected with non-tail call!");
8995 assert((Result.second.getNode() || !Result.first.getNode()) &&
8996 "Null value expected with tail call!");
8998 if (!Result.second.getNode()) {
9005 PendingExports.clear();
9007 DAG.setRoot(Result.second);
9025 if (!isMustTailCall &&
9026 Caller->getFnAttribute(
"disable-tail-calls").getValueAsBool())
9032 if (
DAG.getTargetLoweringInfo().supportSwiftError() &&
9033 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
9042 bool isTailCall,
bool isMustTailCall,
9045 auto &
DL =
DAG.getDataLayout();
9052 const Value *SwiftErrorVal =
nullptr;
9059 const Value *V = *
I;
9062 if (V->getType()->isEmptyTy())
9067 Entry.setAttributes(&CB,
I - CB.
arg_begin());
9079 Args.push_back(Entry);
9090 Value *V = Bundle->Inputs[0];
9092 Entry.IsCFGuardTarget =
true;
9093 Args.push_back(Entry);
9106 "Target doesn't support calls with kcfi operand bundles.");
9114 auto *Token = Bundle->Inputs[0].get();
9115 ConvControlToken =
getValue(Token);
9121 .
setCallee(RetTy, FTy, Callee, std::move(Args), CB)
9133 "This target doesn't support calls with ptrauth operand bundles.");
9137 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
9139 if (Result.first.getNode()) {
9154 DAG.setRoot(CopyNode);
9170 LoadTy, Builder.DAG.getDataLayout()))
9171 return Builder.getValue(LoadCst);
9177 bool ConstantMemory =
false;
9180 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9181 Root = Builder.DAG.getEntryNode();
9182 ConstantMemory =
true;
9185 Root = Builder.DAG.getRoot();
9190 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
Ptr,
9193 if (!ConstantMemory)
9194 Builder.PendingLoads.push_back(LoadVal.
getValue(1));
9200void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
9203 EVT VT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9214bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
9215 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
9216 const Value *
Size =
I.getArgOperand(2);
9219 EVT CallVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9225 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9229 if (Res.first.getNode()) {
9230 processIntegerCallValue(
I, Res.first,
true);
9244 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
9245 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
9267 switch (NumBitsToCompare) {
9279 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9292 LoadL =
DAG.getBitcast(CmpVT, LoadL);
9293 LoadR =
DAG.getBitcast(CmpVT, LoadR);
9297 processIntegerCallValue(
I, Cmp,
false);
9306bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9307 const Value *Src =
I.getArgOperand(0);
9308 const Value *
Char =
I.getArgOperand(1);
9309 const Value *
Length =
I.getArgOperand(2);
9311 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9312 std::pair<SDValue, SDValue> Res =
9315 MachinePointerInfo(Src));
9316 if (Res.first.getNode()) {
9330bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9335 Align DstAlign =
DAG.InferPtrAlign(Dst).valueOrOne();
9336 Align SrcAlign =
DAG.InferPtrAlign(Src).valueOrOne();
9338 Align Alignment = std::min(DstAlign, SrcAlign);
9347 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9348 std::nullopt, MachinePointerInfo(
I.getArgOperand(0)),
9349 MachinePointerInfo(
I.getArgOperand(1)),
I.getAAMetadata());
9351 "** memcpy should not be lowered as TailCall in mempcpy context **");
9355 Size =
DAG.getSExtOrTrunc(
Size, sdl, Dst.getValueType());
9368bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9369 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9371 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9372 std::pair<SDValue, SDValue> Res =
9375 MachinePointerInfo(Arg0),
9376 MachinePointerInfo(Arg1), isStpcpy);
9377 if (Res.first.getNode()) {
9379 DAG.setRoot(Res.second);
9391bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9392 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9394 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9395 std::pair<SDValue, SDValue> Res =
9398 MachinePointerInfo(Arg0),
9399 MachinePointerInfo(Arg1));
9400 if (Res.first.getNode()) {
9401 processIntegerCallValue(
I, Res.first,
true);
9414bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9415 const Value *Arg0 =
I.getArgOperand(0);
9417 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9420 if (Res.first.getNode()) {
9421 processIntegerCallValue(
I, Res.first,
false);
9434bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9435 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9437 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9438 std::pair<SDValue, SDValue> Res =
9441 MachinePointerInfo(Arg0));
9442 if (Res.first.getNode()) {
9443 processIntegerCallValue(
I, Res.first,
false);
9456bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9461 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9478bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9483 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9496void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9498 if (
I.isInlineAsm()) {
9505 if (Function *
F =
I.getCalledFunction()) {
9506 if (
F->isDeclaration()) {
9508 if (
unsigned IID =
F->getIntrinsicID()) {
9509 visitIntrinsicCall(
I, IID);
9518 if (!
I.isNoBuiltin() && !
F->hasLocalLinkage() &&
F->hasName() &&
9519 LibInfo->getLibFunc(*
F, Func) &&
LibInfo->hasOptimizedCodeGen(Func)) {
9523 if (visitMemCmpBCmpCall(
I))
9526 case LibFunc_copysign:
9527 case LibFunc_copysignf:
9528 case LibFunc_copysignl:
9531 if (
I.onlyReadsMemory()) {
9542 if (visitUnaryFloatCall(
I, ISD::FABS))
9548 if (visitBinaryFloatCall(
I, ISD::FMINNUM))
9554 if (visitBinaryFloatCall(
I, ISD::FMAXNUM))
9557 case LibFunc_fminimum_num:
9558 case LibFunc_fminimum_numf:
9559 case LibFunc_fminimum_numl:
9560 if (visitBinaryFloatCall(
I, ISD::FMINIMUMNUM))
9563 case LibFunc_fmaximum_num:
9564 case LibFunc_fmaximum_numf:
9565 case LibFunc_fmaximum_numl:
9566 if (visitBinaryFloatCall(
I, ISD::FMAXIMUMNUM))
9572 if (visitUnaryFloatCall(
I, ISD::FSIN))
9578 if (visitUnaryFloatCall(
I, ISD::FCOS))
9584 if (visitUnaryFloatCall(
I, ISD::FTAN))
9590 if (visitUnaryFloatCall(
I, ISD::FASIN))
9596 if (visitUnaryFloatCall(
I, ISD::FACOS))
9602 if (visitUnaryFloatCall(
I, ISD::FATAN))
9606 case LibFunc_atan2f:
9607 case LibFunc_atan2l:
9608 if (visitBinaryFloatCall(
I, ISD::FATAN2))
9614 if (visitUnaryFloatCall(
I, ISD::FSINH))
9620 if (visitUnaryFloatCall(
I, ISD::FCOSH))
9626 if (visitUnaryFloatCall(
I, ISD::FTANH))
9632 case LibFunc_sqrt_finite:
9633 case LibFunc_sqrtf_finite:
9634 case LibFunc_sqrtl_finite:
9635 if (visitUnaryFloatCall(
I, ISD::FSQRT))
9639 case LibFunc_floorf:
9640 case LibFunc_floorl:
9641 if (visitUnaryFloatCall(
I, ISD::FFLOOR))
9644 case LibFunc_nearbyint:
9645 case LibFunc_nearbyintf:
9646 case LibFunc_nearbyintl:
9647 if (visitUnaryFloatCall(
I, ISD::FNEARBYINT))
9653 if (visitUnaryFloatCall(
I, ISD::FCEIL))
9659 if (visitUnaryFloatCall(
I, ISD::FRINT))
9663 case LibFunc_roundf:
9664 case LibFunc_roundl:
9665 if (visitUnaryFloatCall(
I, ISD::FROUND))
9669 case LibFunc_truncf:
9670 case LibFunc_truncl:
9671 if (visitUnaryFloatCall(
I, ISD::FTRUNC))
9677 if (visitUnaryFloatCall(
I, ISD::FLOG2))
9683 if (visitUnaryFloatCall(
I, ISD::FEXP2))
9687 case LibFunc_exp10f:
9688 case LibFunc_exp10l:
9689 if (visitUnaryFloatCall(
I, ISD::FEXP10))
9693 case LibFunc_ldexpf:
9694 case LibFunc_ldexpl:
9695 if (visitBinaryFloatCall(
I, ISD::FLDEXP))
9698 case LibFunc_memcmp:
9699 if (visitMemCmpBCmpCall(
I))
9702 case LibFunc_mempcpy:
9703 if (visitMemPCpyCall(
I))
9706 case LibFunc_memchr:
9707 if (visitMemChrCall(
I))
9710 case LibFunc_strcpy:
9711 if (visitStrCpyCall(
I,
false))
9714 case LibFunc_stpcpy:
9715 if (visitStrCpyCall(
I,
true))
9718 case LibFunc_strcmp:
9719 if (visitStrCmpCall(
I))
9722 case LibFunc_strlen:
9723 if (visitStrLenCall(
I))
9726 case LibFunc_strnlen:
9727 if (visitStrNLenCall(
I))
9751 if (
I.hasDeoptState())
9768 const Value *Discriminator = PAB->Inputs[1];
9770 assert(
Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9771 assert(Discriminator->getType()->isIntegerTy(64) &&
9772 "Invalid ptrauth discriminator");
9777 if (CalleeCPA->isKnownCompatibleWith(
Key, Discriminator,
9778 DAG.getDataLayout()))
9818 for (
const auto &Code : Codes)
9833 SDISelAsmOperandInfo &MatchingOpInfo,
9835 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9841 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9843 OpInfo.ConstraintVT);
9844 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9846 MatchingOpInfo.ConstraintVT);
9847 const bool OutOpIsIntOrFP =
9848 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9849 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9850 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9851 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9854 " with a matching output constraint of"
9855 " incompatible type!");
9857 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9864 SDISelAsmOperandInfo &OpInfo,
9877 const Value *OpVal = OpInfo.CallOperandVal;
9895 DL.getPrefTypeAlign(Ty),
false,
9898 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9901 OpInfo.CallOperand = StackSlot;
9914static std::optional<unsigned>
9916 SDISelAsmOperandInfo &OpInfo,
9917 SDISelAsmOperandInfo &RefOpInfo) {
9928 return std::nullopt;
9932 unsigned AssignedReg;
9935 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9938 return std::nullopt;
9943 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9945 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9954 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9959 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9964 OpInfo.CallOperand =
9965 DAG.
getNode(ISD::BITCAST,
DL, RegVT, OpInfo.CallOperand);
9966 OpInfo.ConstraintVT = RegVT;
9970 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9973 OpInfo.CallOperand =
9974 DAG.
getNode(ISD::BITCAST,
DL, VT, OpInfo.CallOperand);
9975 OpInfo.ConstraintVT = VT;
9982 if (OpInfo.isMatchingInputConstraint())
9983 return std::nullopt;
9985 EVT ValueVT = OpInfo.ConstraintVT;
9986 if (OpInfo.ConstraintVT == MVT::Other)
9990 unsigned NumRegs = 1;
9991 if (OpInfo.ConstraintVT != MVT::Other)
10006 I = std::find(
I, RC->
end(), AssignedReg);
10007 if (
I == RC->
end()) {
10010 return {AssignedReg};
10014 for (; NumRegs; --NumRegs, ++
I) {
10015 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
10020 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
10021 return std::nullopt;
10026 const std::vector<SDValue> &AsmNodeOperands) {
10029 for (; OperandNo; --OperandNo) {
10031 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
10034 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
10035 "Skipped past definitions?");
10036 CurOp +=
F.getNumOperandRegisters() + 1;
10044 unsigned Flags = 0;
10047 explicit ExtraFlags(
const CallBase &
Call) {
10049 if (
IA->hasSideEffects())
10051 if (
IA->isAlignStack())
10058 void update(
const TargetLowering::AsmOperandInfo &OpInfo) {
10074 unsigned get()
const {
return Flags; }
10097void SelectionDAGBuilder::visitInlineAsm(
const CallBase &
Call,
10104 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10106 DAG.getDataLayout(),
DAG.getSubtarget().getRegisterInfo(),
Call);
10110 bool HasSideEffect =
IA->hasSideEffects();
10111 ExtraFlags ExtraInfo(
Call);
10113 for (
auto &
T : TargetConstraints) {
10114 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
10115 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
10117 if (OpInfo.CallOperandVal)
10118 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
10120 if (!HasSideEffect)
10121 HasSideEffect = OpInfo.hasMemory(TLI);
10133 return emitInlineAsmError(
Call,
"constraint '" + Twine(
T.ConstraintCode) +
10134 "' expects an integer constant "
10137 ExtraInfo.update(
T);
10145 if (EmitEHLabels) {
10146 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
10150 if (IsCallBr || EmitEHLabels) {
10158 if (EmitEHLabels) {
10159 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10164 IA->collectAsmStrs(AsmStrs);
10167 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10175 if (OpInfo.hasMatchingInput()) {
10176 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10207 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
10210 OpInfo.isIndirect =
false;
10217 !OpInfo.isIndirect) {
10218 assert((OpInfo.isMultipleAlternative ||
10220 "Can only indirectify direct input operands!");
10226 OpInfo.CallOperandVal =
nullptr;
10229 OpInfo.isIndirect =
true;
10235 std::vector<SDValue> AsmNodeOperands;
10236 AsmNodeOperands.push_back(
SDValue());
10237 AsmNodeOperands.push_back(
DAG.getTargetExternalSymbol(
10244 AsmNodeOperands.push_back(
DAG.getMDNode(SrcLoc));
10248 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10253 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10255 SDISelAsmOperandInfo &RefOpInfo =
10256 OpInfo.isMatchingInputConstraint()
10257 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10259 const auto RegError =
10262 const MachineFunction &MF =
DAG.getMachineFunction();
10264 const char *
RegName =
TRI.getName(*RegError);
10265 emitInlineAsmError(
Call,
"register '" + Twine(
RegName) +
10266 "' allocated for constraint '" +
10267 Twine(OpInfo.ConstraintCode) +
10268 "' does not match required type");
10272 auto DetectWriteToReservedRegister = [&]() {
10273 const MachineFunction &MF =
DAG.getMachineFunction();
10278 emitInlineAsmError(
Call,
"write to reserved register '" +
10287 !OpInfo.isMatchingInputConstraint())) &&
10288 "Only address as input operand is allowed.");
10290 switch (OpInfo.Type) {
10296 "Failed to convert memory constraint code to constraint id.");
10300 OpFlags.setMemConstraint(ConstraintID);
10301 AsmNodeOperands.push_back(
DAG.getTargetConstant(OpFlags,
getCurSDLoc(),
10303 AsmNodeOperands.push_back(OpInfo.CallOperand);
10308 if (OpInfo.AssignedRegs.
Regs.empty()) {
10309 emitInlineAsmError(
10310 Call,
"couldn't allocate output register for constraint '" +
10311 Twine(OpInfo.ConstraintCode) +
"'");
10315 if (DetectWriteToReservedRegister())
10329 SDValue InOperandVal = OpInfo.CallOperand;
10331 if (OpInfo.isMatchingInputConstraint()) {
10336 InlineAsm::Flag
Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10337 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
10338 if (OpInfo.isIndirect) {
10340 emitInlineAsmError(
Call,
"inline asm not supported yet: "
10341 "don't know how to handle tied "
10342 "indirect register inputs");
10347 MachineFunction &MF =
DAG.getMachineFunction();
10352 MVT RegVT =
R->getSimpleValueType(0);
10353 const TargetRegisterClass *RC =
10356 :
TRI.getMinimalPhysRegClass(TiedReg);
10357 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
10360 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.
getValueType());
10364 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &
Call);
10366 OpInfo.getMatchedOperand(), dl,
DAG,
10371 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
10372 assert(
Flag.getNumOperandRegisters() == 1 &&
10373 "Unexpected number of operands");
10376 Flag.clearMemConstraint();
10377 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10378 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10380 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10391 std::vector<SDValue>
Ops;
10397 emitInlineAsmError(
Call,
"value out of range for constraint '" +
10398 Twine(OpInfo.ConstraintCode) +
"'");
10402 emitInlineAsmError(
Call,
10403 "invalid operand for inline asm constraint '" +
10404 Twine(OpInfo.ConstraintCode) +
"'");
10410 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10417 assert((OpInfo.isIndirect ||
10419 "Operand must be indirect to be a mem!");
10422 "Memory operands expect pointer values");
10427 "Failed to convert memory constraint code to constraint id.");
10431 ResOpType.setMemConstraint(ConstraintID);
10432 AsmNodeOperands.push_back(
DAG.getTargetConstant(ResOpType,
10435 AsmNodeOperands.push_back(InOperandVal);
10443 "Failed to convert memory constraint code to constraint id.");
10447 SDValue AsmOp = InOperandVal;
10451 AsmOp =
DAG.getTargetGlobalAddress(GA->getGlobal(),
getCurSDLoc(),
10457 ResOpType.setMemConstraint(ConstraintID);
10459 AsmNodeOperands.push_back(
10462 AsmNodeOperands.push_back(AsmOp);
10468 emitInlineAsmError(
Call,
"unknown asm constraint '" +
10469 Twine(OpInfo.ConstraintCode) +
"'");
10474 if (OpInfo.isIndirect) {
10475 emitInlineAsmError(
10476 Call,
"Don't know how to handle indirect register inputs yet "
10477 "for constraint '" +
10478 Twine(OpInfo.ConstraintCode) +
"'");
10483 if (OpInfo.AssignedRegs.
Regs.empty()) {
10484 emitInlineAsmError(
Call,
10485 "couldn't allocate input reg for constraint '" +
10486 Twine(OpInfo.ConstraintCode) +
"'");
10490 if (DetectWriteToReservedRegister())
10499 0, dl,
DAG, AsmNodeOperands);
10505 if (!OpInfo.AssignedRegs.
Regs.empty())
10515 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10517 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
10519 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10531 ResultTypes = StructResult->elements();
10532 else if (!CallResultType->
isVoidTy())
10533 ResultTypes =
ArrayRef(CallResultType);
10535 auto CurResultType = ResultTypes.
begin();
10536 auto handleRegAssign = [&](
SDValue V) {
10537 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10538 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10539 EVT ResultVT = TLI.
getValueType(
DAG.getDataLayout(), *CurResultType);
10551 if (ResultVT !=
V.getValueType() &&
10554 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10555 V.getValueType().isInteger()) {
10561 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10567 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10571 if (OpInfo.AssignedRegs.
Regs.empty())
10574 switch (OpInfo.ConstraintType) {
10578 Chain, &Glue, &
Call);
10590 assert(
false &&
"Unexpected unknown constraint");
10594 if (OpInfo.isIndirect) {
10595 const Value *
Ptr = OpInfo.CallOperandVal;
10596 assert(
Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10598 MachinePointerInfo(
Ptr));
10605 handleRegAssign(V);
10607 handleRegAssign(Val);
10613 if (!ResultValues.
empty()) {
10614 assert(CurResultType == ResultTypes.
end() &&
10615 "Mismatch in number of ResultTypes");
10617 "Mismatch in number of output operands in asm result");
10620 DAG.getVTList(ResultVTs), ResultValues);
10625 if (!OutChains.
empty())
10628 if (EmitEHLabels) {
10633 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10635 DAG.setRoot(Chain);
10638void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &
Call,
10639 const Twine &Message) {
10640 LLVMContext &Ctx = *
DAG.getContext();
10644 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10648 if (ValueVTs.
empty())
10652 for (
const EVT &VT : ValueVTs)
10653 Ops.push_back(
DAG.getUNDEF(VT));
10658void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10662 DAG.getSrcValue(
I.getArgOperand(0))));
10665void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10666 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10667 const DataLayout &
DL =
DAG.getDataLayout();
10671 DL.getABITypeAlign(
I.getType()).value());
10672 DAG.setRoot(
V.getValue(1));
10674 if (
I.getType()->isPointerTy())
10675 V =
DAG.getPtrExtOrTrunc(
10680void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10684 DAG.getSrcValue(
I.getArgOperand(0))));
10687void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10692 DAG.getSrcValue(
I.getArgOperand(0)),
10693 DAG.getSrcValue(
I.getArgOperand(1))));
10699 std::optional<ConstantRange> CR =
getRange(
I);
10701 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10704 APInt Lo = CR->getUnsignedMin();
10705 if (!
Lo.isMinValue())
10708 APInt Hi = CR->getUnsignedMax();
10709 unsigned Bits = std::max(
Hi.getActiveBits(),
10717 DAG.getValueType(SmallVT));
10718 unsigned NumVals =
Op.getNode()->getNumValues();
10724 Ops.push_back(ZExt);
10725 for (
unsigned I = 1;
I != NumVals; ++
I)
10726 Ops.push_back(
Op.getValue(
I));
10728 return DAG.getMergeValues(
Ops,
SL);
10738 DAG.getTargetConstant(Classes,
SDLoc(), MVT::i32));
10749 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10752 Args.reserve(NumArgs);
10756 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10757 ArgI != ArgE; ++ArgI) {
10758 const Value *V =
Call->getOperand(ArgI);
10760 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10763 Entry.setAttributes(
Call, ArgI);
10764 Args.push_back(Entry);
10769 .
setCallee(
Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10798 for (
unsigned I = StartIdx;
I <
Call.arg_size();
I++) {
10807 Ops.push_back(Builder.getValue(
Call.getArgOperand(
I)));
10813void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10839 Ops.push_back(Chain);
10840 Ops.push_back(InGlue);
10847 assert(
ID.getValueType() == MVT::i64);
10849 DAG.getTargetConstant(
ID->getAsZExtVal(),
DL,
ID.getValueType());
10850 Ops.push_back(IDConst);
10856 Ops.push_back(ShadConst);
10862 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10863 Chain =
DAG.getNode(ISD::STACKMAP,
DL, NodeTys,
Ops);
10866 Chain =
DAG.getCALLSEQ_END(Chain, 0, 0, InGlue,
DL);
10871 DAG.setRoot(Chain);
10874 FuncInfo.MF->getFrameInfo().setHasStackMap();
10878void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10895 Callee =
DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10898 Callee =
DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10899 SDLoc(SymbolicCallee),
10900 SymbolicCallee->getValueType(0));
10910 "Not enough arguments provided to the patchpoint intrinsic");
10913 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10917 TargetLowering::CallLoweringInfo CLI(
DAG);
10922 SDNode *CallEnd =
Result.second.getNode();
10923 if (CallEnd->
getOpcode() == ISD::EH_LABEL)
10931 "Expected a callseq node.");
10933 bool HasGlue =
Call->getGluedNode();
10958 Ops.push_back(Callee);
10964 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10965 Ops.push_back(
DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10968 Ops.push_back(
DAG.getTargetConstant((
unsigned)CC, dl, MVT::i32));
10973 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10984 if (IsAnyRegCC && HasDef) {
10986 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10989 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
10994 NodeTys =
DAG.getVTList(ValueVTs);
10996 NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10999 SDValue PPV =
DAG.getNode(ISD::PATCHPOINT, dl, NodeTys,
Ops);
11013 if (IsAnyRegCC && HasDef) {
11016 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
11022 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
11025void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
11027 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11030 if (
I.arg_size() > 1)
11035 SDNodeFlags SDFlags;
11039 switch (Intrinsic) {
11040 case Intrinsic::vector_reduce_fadd:
11043 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
11046 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
11048 case Intrinsic::vector_reduce_fmul:
11051 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
11054 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
11056 case Intrinsic::vector_reduce_add:
11057 Res =
DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
11059 case Intrinsic::vector_reduce_mul:
11060 Res =
DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
11062 case Intrinsic::vector_reduce_and:
11063 Res =
DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
11065 case Intrinsic::vector_reduce_or:
11066 Res =
DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
11068 case Intrinsic::vector_reduce_xor:
11069 Res =
DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
11071 case Intrinsic::vector_reduce_smax:
11072 Res =
DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
11074 case Intrinsic::vector_reduce_smin:
11075 Res =
DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
11077 case Intrinsic::vector_reduce_umax:
11078 Res =
DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
11080 case Intrinsic::vector_reduce_umin:
11081 Res =
DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
11083 case Intrinsic::vector_reduce_fmax:
11084 Res =
DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
11086 case Intrinsic::vector_reduce_fmin:
11087 Res =
DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
11089 case Intrinsic::vector_reduce_fmaximum:
11090 Res =
DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
11092 case Intrinsic::vector_reduce_fminimum:
11093 Res =
DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
11106 Attrs.push_back(Attribute::SExt);
11108 Attrs.push_back(Attribute::ZExt);
11110 Attrs.push_back(Attribute::InReg);
11112 return AttributeList::get(CLI.
RetTy->
getContext(), AttributeList::ReturnIndex,
11120std::pair<SDValue, SDValue>
11134 "Only supported for non-aggregate returns");
11137 for (
Type *Ty : RetOrigTys)
11146 RetOrigTys.
swap(OldRetOrigTys);
11147 RetVTs.
swap(OldRetVTs);
11148 Offsets.swap(OldOffsets);
11150 for (
size_t i = 0, e = OldRetVTs.
size(); i != e; ++i) {
11151 EVT RetVT = OldRetVTs[i];
11155 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
11156 RetOrigTys.
append(NumRegs, OldRetOrigTys[i]);
11157 RetVTs.
append(NumRegs, RegisterVT);
11158 for (
unsigned j = 0; j != NumRegs; ++j)
11171 int DemoteStackIdx = -100;
11184 ArgListEntry Entry(DemoteStackSlot, StackSlotPtrType);
11185 Entry.IsSRet =
true;
11186 Entry.Alignment = Alignment;
11198 for (
unsigned I = 0, E = RetVTs.
size();
I != E; ++
I) {
11200 if (NeedsRegBlock) {
11201 Flags.setInConsecutiveRegs();
11202 if (
I == RetVTs.
size() - 1)
11203 Flags.setInConsecutiveRegsLast();
11205 EVT VT = RetVTs[
I];
11209 for (
unsigned i = 0; i != NumRegs; ++i) {
11213 Ret.Flags.setPointer();
11214 Ret.Flags.setPointerAddrSpace(
11218 Ret.Flags.setSExt();
11220 Ret.Flags.setZExt();
11222 Ret.Flags.setInReg();
11223 CLI.
Ins.push_back(Ret);
11232 if (Arg.IsSwiftError) {
11238 CLI.
Ins.push_back(Ret);
11246 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
11250 Type *FinalType = Args[i].Ty;
11251 if (Args[i].IsByVal)
11252 FinalType = Args[i].IndirectType;
11255 for (
unsigned Value = 0, NumValues = OrigArgTys.
size();
Value != NumValues;
11258 Type *ArgTy = OrigArgTy;
11259 if (Args[i].Ty != Args[i].OrigTy) {
11260 assert(
Value == 0 &&
"Only supported for non-aggregate arguments");
11261 ArgTy = Args[i].Ty;
11266 Args[i].Node.getResNo() +
Value);
11273 Flags.setOrigAlign(OriginalAlignment);
11278 Flags.setPointer();
11281 if (Args[i].IsZExt)
11283 if (Args[i].IsSExt)
11285 if (Args[i].IsNoExt)
11287 if (Args[i].IsInReg) {
11294 Flags.setHvaStart();
11300 if (Args[i].IsSRet)
11302 if (Args[i].IsSwiftSelf)
11303 Flags.setSwiftSelf();
11304 if (Args[i].IsSwiftAsync)
11305 Flags.setSwiftAsync();
11306 if (Args[i].IsSwiftError)
11307 Flags.setSwiftError();
11308 if (Args[i].IsCFGuardTarget)
11309 Flags.setCFGuardTarget();
11310 if (Args[i].IsByVal)
11312 if (Args[i].IsByRef)
11314 if (Args[i].IsPreallocated) {
11315 Flags.setPreallocated();
11323 if (Args[i].IsInAlloca) {
11324 Flags.setInAlloca();
11333 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11334 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11335 Flags.setByValSize(FrameSize);
11338 if (
auto MA = Args[i].Alignment)
11342 }
else if (
auto MA = Args[i].Alignment) {
11345 MemAlign = OriginalAlignment;
11347 Flags.setMemAlign(MemAlign);
11348 if (Args[i].IsNest)
11351 Flags.setInConsecutiveRegs();
11354 unsigned NumParts =
11359 if (Args[i].IsSExt)
11361 else if (Args[i].IsZExt)
11366 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11371 Args[i].Ty->getPointerAddressSpace())) &&
11372 RetVTs.
size() == NumValues &&
"unexpected use of 'returned'");
11385 CLI.
RetZExt == Args[i].IsZExt))
11386 Flags.setReturned();
11392 for (
unsigned j = 0; j != NumParts; ++j) {
11398 j * Parts[j].
getValueType().getStoreSize().getKnownMinValue());
11399 if (NumParts > 1 && j == 0)
11403 if (j == NumParts - 1)
11407 CLI.
Outs.push_back(MyFlags);
11408 CLI.
OutVals.push_back(Parts[j]);
11411 if (NeedsRegBlock &&
Value == NumValues - 1)
11412 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11424 "LowerCall didn't return a valid chain!");
11426 "LowerCall emitted a return value for a tail call!");
11428 "LowerCall didn't emit the correct number of values!");
11440 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11441 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11442 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11443 "LowerCall emitted a value with the wrong type!");
11453 unsigned NumValues = RetVTs.
size();
11454 ReturnValues.
resize(NumValues);
11461 for (
unsigned i = 0; i < NumValues; ++i) {
11468 DemoteStackIdx, Offsets[i]),
11470 ReturnValues[i] = L;
11471 Chains[i] = L.getValue(1);
11478 std::optional<ISD::NodeType> AssertOp;
11483 unsigned CurReg = 0;
11484 for (
EVT VT : RetVTs) {
11490 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11498 if (ReturnValues.
empty())
11504 return std::make_pair(Res, CLI.
Chain);
11521 if (
N->getNumValues() == 1) {
11529 "Lowering returned the wrong number of results!");
11532 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11546 "Copy from a reg to the same reg!");
11547 assert(!Reg.isPhysical() &&
"Is a physreg");
11553 RegsForValue RFV(V->getContext(), TLI,
DAG.getDataLayout(), Reg, V->getType(),
11558 auto PreferredExtendIt =
FuncInfo.PreferredExtendType.find(V);
11559 if (PreferredExtendIt !=
FuncInfo.PreferredExtendType.end())
11560 ExtendType = PreferredExtendIt->second;
11563 PendingExports.push_back(Chain);
11575 return A->use_empty();
11577 const BasicBlock &Entry =
A->getParent()->front();
11578 for (
const User *U :
A->users())
11587 std::pair<const AllocaInst *, const StoreInst *>>;
11599 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11601 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11602 StaticAllocas.
reserve(NumArgs * 2);
11604 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11607 V = V->stripPointerCasts();
11609 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11612 return &Iter.first->second;
11629 if (
I.isDebugOrPseudoInst())
11633 for (
const Use &U :
I.operands()) {
11634 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11635 *
Info = StaticAllocaInfo::Clobbered;
11641 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(
SI->getValueOperand()))
11642 *
Info = StaticAllocaInfo::Clobbered;
11645 const Value *Dst =
SI->getPointerOperand()->stripPointerCasts();
11646 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11652 if (*
Info != StaticAllocaInfo::Unknown)
11660 const Value *Val =
SI->getValueOperand()->stripPointerCasts();
11662 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11666 !
DL.typeSizeEqualsStoreSize(Arg->
getType()) ||
11667 ArgCopyElisionCandidates.count(Arg)) {
11668 *
Info = StaticAllocaInfo::Clobbered;
11672 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11676 *
Info = StaticAllocaInfo::Elidable;
11677 ArgCopyElisionCandidates.insert({Arg, {AI,
SI}});
11682 if (ArgCopyElisionCandidates.size() == NumArgs)
11706 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11707 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11708 const AllocaInst *AI = ArgCopyIter->second.first;
11709 int FixedIndex = FINode->getIndex();
11711 int OldIndex = AllocaIndex;
11715 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11721 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11722 "greater than stack argument alignment ("
11723 <<
DebugStr(RequiredAlignment) <<
" vs "
11731 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11732 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11738 AllocaIndex = FixedIndex;
11739 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11740 for (
SDValue ArgVal : ArgVals)
11744 const StoreInst *
SI = ArgCopyIter->second.second;
11757void SelectionDAGISel::LowerArguments(
const Function &
F) {
11758 SelectionDAG &DAG =
SDB->DAG;
11759 SDLoc dl =
SDB->getCurSDLoc();
11764 if (
F.hasFnAttribute(Attribute::Naked))
11769 MVT ValueVT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11771 ISD::ArgFlagsTy
Flags;
11773 MVT RegisterVT =
TLI->getRegisterType(*DAG.
getContext(), ValueVT);
11774 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT,
F.getReturnType(),
true,
11776 Ins.push_back(RetArg);
11784 ArgCopyElisionCandidates);
11787 for (
const Argument &Arg :
F.args()) {
11788 unsigned ArgNo = Arg.getArgNo();
11791 bool isArgValueUsed = !Arg.
use_empty();
11792 unsigned PartBase = 0;
11794 if (Arg.hasAttribute(Attribute::ByVal))
11795 FinalType = Arg.getParamByValType();
11796 bool NeedsRegBlock =
TLI->functionArgumentNeedsConsecutiveRegisters(
11797 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11798 for (
unsigned Value = 0, NumValues =
Types.size();
Value != NumValues;
11801 EVT VT =
TLI->getValueType(
DL, ArgTy);
11802 ISD::ArgFlagsTy
Flags;
11805 Flags.setPointer();
11808 if (Arg.hasAttribute(Attribute::ZExt))
11810 if (Arg.hasAttribute(Attribute::SExt))
11812 if (Arg.hasAttribute(Attribute::InReg)) {
11819 Flags.setHvaStart();
11825 if (Arg.hasAttribute(Attribute::StructRet))
11827 if (Arg.hasAttribute(Attribute::SwiftSelf))
11828 Flags.setSwiftSelf();
11829 if (Arg.hasAttribute(Attribute::SwiftAsync))
11830 Flags.setSwiftAsync();
11831 if (Arg.hasAttribute(Attribute::SwiftError))
11832 Flags.setSwiftError();
11833 if (Arg.hasAttribute(Attribute::ByVal))
11835 if (Arg.hasAttribute(Attribute::ByRef))
11837 if (Arg.hasAttribute(Attribute::InAlloca)) {
11838 Flags.setInAlloca();
11846 if (Arg.hasAttribute(Attribute::Preallocated)) {
11847 Flags.setPreallocated();
11859 const Align OriginalAlignment(
11860 TLI->getABIAlignmentForCallingConv(ArgTy,
DL));
11861 Flags.setOrigAlign(OriginalAlignment);
11864 Type *ArgMemTy =
nullptr;
11865 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11868 ArgMemTy = Arg.getPointeeInMemoryValueType();
11870 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11875 if (
auto ParamAlign = Arg.getParamStackAlign())
11876 MemAlign = *ParamAlign;
11877 else if ((ParamAlign = Arg.getParamAlign()))
11878 MemAlign = *ParamAlign;
11880 MemAlign =
TLI->getByValTypeAlignment(ArgMemTy,
DL);
11881 if (
Flags.isByRef())
11882 Flags.setByRefSize(MemSize);
11884 Flags.setByValSize(MemSize);
11885 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11886 MemAlign = *ParamAlign;
11888 MemAlign = OriginalAlignment;
11890 Flags.setMemAlign(MemAlign);
11892 if (Arg.hasAttribute(Attribute::Nest))
11895 Flags.setInConsecutiveRegs();
11896 if (ArgCopyElisionCandidates.count(&Arg))
11897 Flags.setCopyElisionCandidate();
11898 if (Arg.hasAttribute(Attribute::Returned))
11899 Flags.setReturned();
11901 MVT RegisterVT =
TLI->getRegisterTypeForCallingConv(
11902 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11903 unsigned NumRegs =
TLI->getNumRegistersForCallingConv(
11904 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11905 for (
unsigned i = 0; i != NumRegs; ++i) {
11909 ISD::InputArg MyFlags(
11910 Flags, RegisterVT, VT, ArgTy, isArgValueUsed, ArgNo,
11912 if (NumRegs > 1 && i == 0)
11913 MyFlags.Flags.setSplit();
11916 MyFlags.Flags.setOrigAlign(
Align(1));
11917 if (i == NumRegs - 1)
11918 MyFlags.Flags.setSplitEnd();
11920 Ins.push_back(MyFlags);
11922 if (NeedsRegBlock &&
Value == NumValues - 1)
11923 Ins[
Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11930 SDValue NewRoot =
TLI->LowerFormalArguments(
11931 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11935 "LowerFormalArguments didn't return a valid chain!");
11937 "LowerFormalArguments didn't emit the correct number of values!");
11939 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
11941 "LowerFormalArguments emitted a null value!");
11943 "LowerFormalArguments emitted a value with the wrong type!");
11955 MVT VT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11956 MVT RegVT =
TLI->getRegisterType(*
CurDAG->getContext(), VT);
11957 std::optional<ISD::NodeType> AssertOp;
11960 F.getCallingConv(), AssertOp);
11962 MachineFunction&
MF =
SDB->DAG.getMachineFunction();
11963 MachineRegisterInfo&
RegInfo =
MF.getRegInfo();
11965 RegInfo.createVirtualRegister(
TLI->getRegClassFor(RegVT));
11966 FuncInfo->DemoteRegister = SRetReg;
11968 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11976 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11977 for (
const Argument &Arg :
F.args()) {
11981 unsigned NumValues = ValueVTs.
size();
11982 if (NumValues == 0)
11989 if (Ins[i].
Flags.isCopyElisionCandidate()) {
11990 unsigned NumParts = 0;
11991 for (EVT VT : ValueVTs)
11992 NumParts +=
TLI->getNumRegistersForCallingConv(*
CurDAG->getContext(),
11993 F.getCallingConv(), VT);
11997 ArrayRef(&InVals[i], NumParts), ArgHasUses);
12002 bool isSwiftErrorArg =
12003 TLI->supportSwiftError() &&
12004 Arg.hasAttribute(Attribute::SwiftError);
12005 if (!ArgHasUses && !isSwiftErrorArg) {
12006 SDB->setUnusedArgValue(&Arg, InVals[i]);
12009 if (FrameIndexSDNode *FI =
12011 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12014 for (
unsigned Val = 0; Val != NumValues; ++Val) {
12015 EVT VT = ValueVTs[Val];
12016 MVT PartVT =
TLI->getRegisterTypeForCallingConv(*
CurDAG->getContext(),
12017 F.getCallingConv(), VT);
12018 unsigned NumParts =
TLI->getNumRegistersForCallingConv(
12019 *
CurDAG->getContext(),
F.getCallingConv(), VT);
12024 if (ArgHasUses || isSwiftErrorArg) {
12025 std::optional<ISD::NodeType> AssertOp;
12026 if (Arg.hasAttribute(Attribute::SExt))
12028 else if (Arg.hasAttribute(Attribute::ZExt))
12033 NewRoot,
F.getCallingConv(), AssertOp);
12036 if (NoFPClass !=
fcNone) {
12038 static_cast<uint64_t
>(NoFPClass), dl, MVT::i32);
12040 OutVal, SDNoFPClass);
12049 if (ArgValues.
empty())
12053 if (FrameIndexSDNode *FI =
12055 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12058 SDB->getCurSDLoc());
12060 SDB->setValue(&Arg, Res);
12070 if (LoadSDNode *LNode =
12072 if (FrameIndexSDNode *FI =
12074 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12102 FuncInfo->InitializeRegForValue(&Arg);
12103 SDB->CopyToExportRegsIfNeeded(&Arg);
12107 if (!Chains.
empty()) {
12114 assert(i == InVals.
size() &&
"Argument register count mismatch!");
12118 if (!ArgCopyElisionFrameIndexMap.
empty()) {
12119 for (MachineFunction::VariableDbgInfo &VI :
12120 MF->getInStackSlotVariableDbgInfo()) {
12121 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
12122 if (
I != ArgCopyElisionFrameIndexMap.
end())
12123 VI.updateStackSlot(
I->second);
12138SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
12139 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12141 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12147 MachineBasicBlock *SuccMBB =
FuncInfo.getMBB(SuccBB);
12151 if (!SuccsHandled.
insert(SuccMBB).second)
12159 for (
const PHINode &PN : SuccBB->phis()) {
12161 if (PN.use_empty())
12165 if (PN.getType()->isEmptyTy())
12169 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12174 RegOut =
FuncInfo.CreateRegs(&PN);
12192 "Didn't codegen value into a register!??");
12202 for (EVT VT : ValueVTs) {
12204 for (
unsigned i = 0; i != NumRegisters; ++i)
12206 Reg += NumRegisters;
12226void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
12228 if (MaybeTC.
getNode() !=
nullptr)
12229 DAG.setRoot(MaybeTC);
12234void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W,
Value *
Cond,
12237 MachineFunction *CurMF =
FuncInfo.MF;
12238 MachineBasicBlock *NextMBB =
nullptr;
12243 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
12245 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12247 if (
Size == 2 &&
W.MBB == SwitchMBB) {
12255 CaseCluster &
Small = *
W.FirstCluster;
12256 CaseCluster &
Big = *
W.LastCluster;
12260 const APInt &SmallValue =
Small.Low->getValue();
12261 const APInt &BigValue =
Big.Low->getValue();
12264 APInt CommonBit = BigValue ^ SmallValue;
12271 DAG.getConstant(CommonBit,
DL, VT));
12273 DL, MVT::i1,
Or,
DAG.getConstant(BigValue | SmallValue,
DL, VT),
12279 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
12281 addSuccessorWithProb(
12282 SwitchMBB, DefaultMBB,
12286 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12293 BrCond =
DAG.getNode(ISD::BR,
DL, MVT::Other, BrCond,
12294 DAG.getBasicBlock(DefaultMBB));
12296 DAG.setRoot(BrCond);
12308 [](
const CaseCluster &a,
const CaseCluster &b) {
12309 return a.Prob != b.Prob ?
12311 a.Low->getValue().slt(b.Low->getValue());
12318 if (
I->Prob >
W.LastCluster->Prob)
12320 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12328 BranchProbability DefaultProb =
W.DefaultProb;
12329 BranchProbability UnhandledProbs = DefaultProb;
12331 UnhandledProbs +=
I->Prob;
12333 MachineBasicBlock *CurMBB =
W.MBB;
12335 bool FallthroughUnreachable =
false;
12336 MachineBasicBlock *Fallthrough;
12337 if (
I ==
W.LastCluster) {
12339 Fallthrough = DefaultMBB;
12344 CurMF->
insert(BBI, Fallthrough);
12348 UnhandledProbs -=
I->Prob;
12353 JumpTableHeader *JTH = &
SL->JTCases[
I->JTCasesIndex].first;
12354 SwitchCG::JumpTable *
JT = &
SL->JTCases[
I->JTCasesIndex].second;
12357 MachineBasicBlock *JumpMBB =
JT->MBB;
12358 CurMF->
insert(BBI, JumpMBB);
12360 auto JumpProb =
I->Prob;
12361 auto FallthroughProb = UnhandledProbs;
12369 if (*SI == DefaultMBB) {
12370 JumpProb += DefaultProb / 2;
12371 FallthroughProb -= DefaultProb / 2;
12389 if (FallthroughUnreachable) {
12396 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12397 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12403 JT->Default = Fallthrough;
12406 if (CurMBB == SwitchMBB) {
12414 BitTestBlock *BTB = &
SL->BitTestCases[
I->BTCasesIndex];
12417 for (BitTestCase &BTC : BTB->
Cases)
12429 BTB->
Prob += DefaultProb / 2;
12433 if (FallthroughUnreachable)
12437 if (CurMBB == SwitchMBB) {
12444 const Value *
RHS, *
LHS, *MHS;
12446 if (
I->Low ==
I->High) {
12461 if (FallthroughUnreachable)
12465 CaseBlock CB(CC,
LHS,
RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12468 if (CurMBB == SwitchMBB)
12471 SL->SwitchCases.push_back(CB);
12476 CurMBB = Fallthrough;
12480void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12481 const SwitchWorkListItem &W,
12484 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12485 "Clusters not sorted?");
12486 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12488 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12489 SL->computeSplitWorkItemInfo(W);
12494 assert(PivotCluster >
W.FirstCluster);
12495 assert(PivotCluster <=
W.LastCluster);
12500 const ConstantInt *Pivot = PivotCluster->Low;
12509 MachineBasicBlock *LeftMBB;
12510 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12511 FirstLeft->Low ==
W.GE &&
12512 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12513 LeftMBB = FirstLeft->MBB;
12515 LeftMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12516 FuncInfo.MF->insert(BBI, LeftMBB);
12518 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12526 MachineBasicBlock *RightMBB;
12527 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12528 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12529 RightMBB = FirstRight->MBB;
12531 RightMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12532 FuncInfo.MF->insert(BBI, RightMBB);
12534 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12540 CaseBlock CB(
ISD::SETLT,
Cond, Pivot,
nullptr, LeftMBB, RightMBB,
W.MBB,
12543 if (
W.MBB == SwitchMBB)
12546 SL->SwitchCases.push_back(CB);
12571 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12579 unsigned PeeledCaseIndex = 0;
12580 bool SwitchPeeled =
false;
12581 for (
unsigned Index = 0;
Index < Clusters.size(); ++
Index) {
12582 CaseCluster &CC = Clusters[
Index];
12583 if (CC.
Prob < TopCaseProb)
12585 TopCaseProb = CC.
Prob;
12586 PeeledCaseIndex =
Index;
12587 SwitchPeeled =
true;
12592 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12593 << TopCaseProb <<
"\n");
12598 MachineBasicBlock *PeeledSwitchMBB =
12600 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12603 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12604 SwitchWorkListItem
W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12605 nullptr,
nullptr, TopCaseProb.
getCompl()};
12606 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12608 Clusters.erase(PeeledCaseIt);
12609 for (CaseCluster &CC : Clusters) {
12611 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12612 << CC.
Prob <<
"\n");
12616 PeeledCaseProb = TopCaseProb;
12617 return PeeledSwitchMBB;
12620void SelectionDAGBuilder::visitSwitch(
const SwitchInst &
SI) {
12622 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12624 Clusters.reserve(
SI.getNumCases());
12625 for (
auto I :
SI.cases()) {
12626 MachineBasicBlock *Succ =
FuncInfo.getMBB(
I.getCaseSuccessor());
12627 const ConstantInt *CaseVal =
I.getCaseValue();
12628 BranchProbability Prob =
12630 : BranchProbability(1,
SI.getNumCases() + 1);
12634 MachineBasicBlock *DefaultMBB =
FuncInfo.getMBB(
SI.getDefaultDest());
12643 MachineBasicBlock *PeeledSwitchMBB =
12644 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12647 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12648 if (Clusters.empty()) {
12649 assert(PeeledSwitchMBB == SwitchMBB);
12651 if (DefaultMBB != NextBlock(SwitchMBB)) {
12658 SL->findJumpTables(Clusters, &SI,
getCurSDLoc(), DefaultMBB,
DAG.getPSI(),
12660 SL->findBitTestClusters(Clusters, &SI);
12663 dbgs() <<
"Case clusters: ";
12664 for (
const CaseCluster &
C : Clusters) {
12670 C.Low->getValue().print(
dbgs(),
true);
12671 if (
C.Low !=
C.High) {
12673 C.High->getValue().print(
dbgs(),
true);
12680 assert(!Clusters.empty());
12684 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12688 DefaultMBB ==
FuncInfo.getMBB(
SI.getDefaultDest()))
12691 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12693 while (!WorkList.
empty()) {
12695 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12700 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12704 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12708void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12709 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12715void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12716 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12721 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12730 SmallVector<int, 8>
Mask;
12732 for (
unsigned i = 0; i != NumElts; ++i)
12733 Mask.push_back(NumElts - 1 - i);
12738void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I,
12747 EVT OutVT = ValueVTs[0];
12751 for (
unsigned i = 0; i != Factor; ++i) {
12752 assert(ValueVTs[i] == OutVT &&
"Expected VTs to be the same");
12754 DAG.getVectorIdxConstant(OutNumElts * i,
DL));
12760 SDValue Even =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12762 SDValue Odd =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12770 DAG.getVTList(ValueVTs), SubVecs);
12774void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I,
12777 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12782 for (
unsigned i = 0; i < Factor; ++i) {
12785 "Expected VTs to be the same");
12803 for (
unsigned i = 0; i < Factor; ++i)
12810void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12814 unsigned NumValues = ValueVTs.
size();
12815 if (NumValues == 0)
return;
12820 for (
unsigned i = 0; i != NumValues; ++i)
12825 DAG.getVTList(ValueVTs), Values));
12828void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12829 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12841 DAG.getSignedConstant(
12848 uint64_t Idx = (NumElts +
Imm) % NumElts;
12851 SmallVector<int, 8>
Mask;
12852 for (
unsigned i = 0; i < NumElts; ++i)
12853 Mask.push_back(Idx + i);
12881 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12882 "start of copy chain MUST be COPY");
12883 Reg =
MI->getOperand(1).getReg();
12886 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12887 MI =
MRI.def_begin(
Reg)->getParent();
12890 if (
MI->getOpcode() == TargetOpcode::COPY) {
12891 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12892 Reg =
MI->getOperand(1).getReg();
12893 assert(
Reg.isPhysical() &&
"expected COPY of physical register");
12896 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12897 "end of copy chain MUST be INLINEASM_BR");
12907void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12913 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12914 const TargetRegisterInfo *
TRI =
DAG.getSubtarget().getRegisterInfo();
12915 MachineRegisterInfo &
MRI =
DAG.getMachineFunction().getRegInfo();
12923 for (
auto &
T : TargetConstraints) {
12924 SDISelAsmOperandInfo OpInfo(
T);
12932 switch (OpInfo.ConstraintType) {
12943 FuncInfo.MBB->addLiveIn(OriginalDef);
12951 ResultVTs.
push_back(OpInfo.ConstraintVT);
12960 ResultVTs.
push_back(OpInfo.ConstraintVT);
12968 DAG.getVTList(ResultVTs), ResultValues);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
static Value * getCondition(Instruction *I)
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static void failForInvalidBundles(const CallBase &I, StringRef Name, ArrayRef< uint32_t > AllowedBundles)
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
DenseMap< const Argument *, std::pair< const AllocaInst *, const StoreInst * > > ArgCopyElisionMapTy
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static FPClassTest getNoFPClass(const Instruction &I)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const unsigned char *MatcherTable, unsigned &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static const fltSemantics & IEEEsingle()
Class for arbitrary precision integers.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This class represents a no-op cast from one type to another.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Base class for variables.
LLVM_ABI std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
A parsed version of the target data layout string in and methods for querying it.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LLVM_ABI DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Lightweight error class with error context and mandatory checking.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
MachineBasicBlock * MBB
MBB - The current block.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHContTarget(bool V=true)
Indicates if this is a target of Windows EH Continuation Guard.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
bool hasEHFunclets() const
void setHasEHContTarget(bool V)
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
DenseMap< const Constant *, Register > ConstantsOut
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
SDValue lowerNoFPClassToAssertNoFPClass(SelectionDAG &DAG, const Instruction &I, SDValue Op)
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool canTailCall(const CallBase &CB) const
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void CopyValueToVirtualRegister(const Value *V, Register Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC, const TargetLibraryInfo *li)
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
SDValue getFPOperationRoot(fp::ExceptionBehavior EB)
Return the current virtual root of the Selection DAG, flushing PendingConstrainedFP or PendingConstra...
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineRegisterInfo * RegInfo
std::unique_ptr< SwiftErrorValueTracking > SwiftError
virtual void emitFunctionEntryCode()
std::unique_ptr< SelectionDAGBuilder > SDB
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, const CallInst *CI) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, const CallInst *CI) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitFunctionBasedCheckStackProtector() const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Provides information about what library functions are available for the current target.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
virtual TargetTransformInfo getTargetTransformInfo(const Function &F) const
Return a TargetTransformInfo for a given function.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned getID() const
Return the register class ID number.
const MCPhysReg * iterator
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static LLVM_ABI std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ LOOP_DEPENDENCE_RAW_MASK
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor to...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor ...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
@ LOOP_DEPENDENCE_WAR_MASK
Set rounding mode.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
auto cast_or_null(const Y &Val)
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
@ Default
The result values are uniform if and only if all operands are uniform.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A lightweight accessor for an operand bundle meant to be passed around by value.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
void setUnpredictable(bool b)
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
Type * OrigRetTy
Original unlegalized return type.
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)