78#include "llvm/IR/IntrinsicsAArch64.h"
79#include "llvm/IR/IntrinsicsAMDGPU.h"
80#include "llvm/IR/IntrinsicsWebAssembly.h"
113#define DEBUG_TYPE "isel"
121 cl::desc(
"Insert the experimental `assertalign` node."),
126 cl::desc(
"Generate low-precision inline sequences "
127 "for some float libcalls"),
133 cl::desc(
"Set the case probability threshold for peeling the case from a "
134 "switch statement. A value greater than 100 will void this "
154 const SDValue *Parts,
unsigned NumParts,
157 std::optional<CallingConv::ID> CC);
166 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
168 std::optional<CallingConv::ID> CC = std::nullopt,
169 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
173 PartVT, ValueVT, CC))
180 assert(NumParts > 0 &&
"No parts to assemble!");
191 unsigned RoundBits = PartBits * RoundParts;
192 EVT RoundVT = RoundBits == ValueBits ?
198 if (RoundParts > 2) {
202 PartVT, HalfVT, V, InChain);
204 Lo = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[0]);
205 Hi = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[1]);
213 if (RoundParts < NumParts) {
215 unsigned OddParts = NumParts - RoundParts;
218 OddVT, V, InChain, CC);
234 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
245 !PartVT.
isVector() &&
"Unexpected split");
257 if (PartEVT == ValueVT)
261 ValueVT.
bitsLT(PartEVT)) {
270 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
274 if (ValueVT.
bitsLT(PartEVT)) {
279 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
294 llvm::Attribute::StrictFP)) {
296 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
303 return DAG.
getNode(ISD::FP_EXTEND,
DL, ValueVT, Val);
308 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
309 ValueVT.
bitsLT(PartEVT)) {
310 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::i64, Val);
318 const Twine &ErrMsg) {
321 return Ctx.emitError(ErrMsg);
324 if (CI->isInlineAsm()) {
326 *CI, ErrMsg +
", possible invalid constraint for vector type"));
329 return Ctx.emitError(
I, ErrMsg);
338 const SDValue *Parts,
unsigned NumParts,
341 std::optional<CallingConv::ID> CallConv) {
343 assert(NumParts > 0 &&
"No parts to assemble!");
344 const bool IsABIRegCopy = CallConv.has_value();
353 unsigned NumIntermediates;
358 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
359 NumIntermediates, RegisterVT);
363 NumIntermediates, RegisterVT);
366 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
368 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
371 "Part type sizes don't match!");
375 if (NumIntermediates == NumParts) {
378 for (
unsigned i = 0; i != NumParts; ++i)
380 V, InChain, CallConv);
381 }
else if (NumParts > 0) {
384 assert(NumParts % NumIntermediates == 0 &&
385 "Must expand into a divisible number of parts!");
386 unsigned Factor = NumParts / NumIntermediates;
387 for (
unsigned i = 0; i != NumIntermediates; ++i)
389 IntermediateVT, V, InChain, CallConv);
404 DL, BuiltVectorTy,
Ops);
410 if (PartEVT == ValueVT)
416 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
426 "Cannot narrow, it would be a lossy transformation");
432 if (PartEVT == ValueVT)
435 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
439 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
450 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
456 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
457 }
else if (ValueVT.
bitsLT(PartEVT)) {
466 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
475 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueSVT, Val);
497 std::optional<CallingConv::ID> CallConv);
504 unsigned NumParts,
MVT PartVT,
const Value *V,
505 std::optional<CallingConv::ID> CallConv = std::nullopt,
519 unsigned OrigNumParts = NumParts;
521 "Copying to an illegal type!");
527 EVT PartEVT = PartVT;
528 if (PartEVT == ValueVT) {
529 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
538 assert(NumParts == 1 &&
"Do not know what to promote to!");
539 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
545 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
549 "Unknown mismatch!");
551 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
552 if (PartVT == MVT::x86mmx)
553 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
557 assert(NumParts == 1 && PartEVT != ValueVT);
558 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
563 "Unknown mismatch!");
566 if (PartVT == MVT::x86mmx)
567 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
573 "Failed to tile the value with PartVT!");
576 if (PartEVT != ValueVT) {
578 "scalar-to-vector conversion failed");
579 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
587 if (NumParts & (NumParts - 1)) {
590 "Do not know what to expand to!");
592 unsigned RoundBits = RoundParts * PartBits;
593 unsigned OddParts = NumParts - RoundParts;
602 std::reverse(Parts + RoundParts, Parts + NumParts);
604 NumParts = RoundParts;
616 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
617 for (
unsigned i = 0; i < NumParts; i += StepSize) {
618 unsigned ThisBits = StepSize * PartBits / 2;
621 SDValue &Part1 = Parts[i+StepSize/2];
628 if (ThisBits == PartBits && ThisVT != PartVT) {
629 Part0 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part0);
630 Part1 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part1);
636 std::reverse(Parts, Parts + OrigNumParts);
658 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
660 "Cannot widen to illegal type");
663 }
else if (PartEVT != ValueEVT) {
678 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
689 std::optional<CallingConv::ID> CallConv) {
693 const bool IsABIRegCopy = CallConv.has_value();
696 EVT PartEVT = PartVT;
697 if (PartEVT == ValueVT) {
701 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
736 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
743 "lossy conversion of vector to scalar type");
758 unsigned NumIntermediates;
762 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
767 NumIntermediates, RegisterVT);
770 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
772 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
775 "Mixing scalable and fixed vectors when copying in parts");
777 std::optional<ElementCount> DestEltCnt;
787 if (ValueVT == BuiltVectorTy) {
791 Val = DAG.
getNode(ISD::BITCAST,
DL, BuiltVectorTy, Val);
811 for (
unsigned i = 0; i != NumIntermediates; ++i) {
826 if (NumParts == NumIntermediates) {
829 for (
unsigned i = 0; i != NumParts; ++i)
831 }
else if (NumParts > 0) {
834 assert(NumIntermediates != 0 &&
"division by zero");
835 assert(NumParts % NumIntermediates == 0 &&
836 "Must expand into a divisible number of parts!");
837 unsigned Factor = NumParts / NumIntermediates;
838 for (
unsigned i = 0; i != NumIntermediates; ++i)
846 if (
I.hasOperandBundlesOtherThan(AllowedBundles)) {
850 for (
unsigned i = 0, e =
I.getNumOperandBundles(); i != e; ++i) {
853 OS << LS << U.getTagName();
856 Twine(
"cannot lower ", Name)
862 EVT valuevt, std::optional<CallingConv::ID> CC)
868 std::optional<CallingConv::ID> CC) {
882 for (
unsigned i = 0; i != NumRegs; ++i)
883 Regs.push_back(Reg + i);
884 RegVTs.push_back(RegisterVT);
886 Reg = Reg.id() + NumRegs;
913 for (
unsigned i = 0; i != NumRegs; ++i) {
919 *Glue =
P.getValue(2);
922 Chain =
P.getValue(1);
950 EVT FromVT(MVT::Other);
954 }
else if (NumSignBits > 1) {
962 assert(FromVT != MVT::Other);
968 RegisterVT, ValueVT, V, Chain,
CallConv);
984 unsigned NumRegs =
Regs.size();
998 NumParts, RegisterVT, V,
CallConv, ExtendKind);
1004 for (
unsigned i = 0; i != NumRegs; ++i) {
1016 if (NumRegs == 1 || Glue)
1027 Chain = Chains[NumRegs-1];
1033 unsigned MatchingIdx,
const SDLoc &dl,
1035 std::vector<SDValue> &
Ops)
const {
1040 Flag.setMatchingOp(MatchingIdx);
1041 else if (!
Regs.empty() &&
Regs.front().isVirtual()) {
1049 Flag.setRegClass(RC->
getID());
1060 "No 1:1 mapping from clobbers to regs?");
1063 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1068 "If we clobbered the stack pointer, MFI should know about it.");
1077 for (
unsigned i = 0; i != NumRegs; ++i) {
1078 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1090 unsigned RegCount = std::get<0>(CountAndVT);
1091 MVT RegisterVT = std::get<1>(CountAndVT);
1108 SL->init(
DAG.getTargetLoweringInfo(), TM,
DAG.getDataLayout());
1110 *
DAG.getMachineFunction().getFunction().getParent());
1115 UnusedArgNodeMap.clear();
1117 PendingExports.clear();
1118 PendingConstrainedFP.clear();
1119 PendingConstrainedFPStrict.clear();
1127 DanglingDebugInfoMap.clear();
1134 if (Pending.
empty())
1140 unsigned i = 0, e = Pending.
size();
1141 for (; i != e; ++i) {
1143 if (Pending[i].
getNode()->getOperand(0) == Root)
1151 if (Pending.
size() == 1)
1170 PendingConstrainedFP.size() +
1171 PendingConstrainedFPStrict.size());
1173 PendingConstrainedFP.end());
1174 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1175 PendingConstrainedFPStrict.end());
1176 PendingConstrainedFP.clear();
1177 PendingConstrainedFPStrict.clear();
1184 PendingExports.append(PendingConstrainedFPStrict.begin(),
1185 PendingConstrainedFPStrict.end());
1186 PendingConstrainedFPStrict.clear();
1187 return updateRoot(PendingExports);
1194 assert(Variable &&
"Missing variable");
1201 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1217 if (IsParameter && FINode) {
1219 SDV =
DAG.getFrameIndexDbgValue(Variable,
Expression, FINode->getIndex(),
1220 true,
DL, SDNodeOrder);
1225 FuncArgumentDbgValueKind::Declare,
N);
1228 SDV =
DAG.getDbgValue(Variable,
Expression,
N.getNode(),
N.getResNo(),
1229 true,
DL, SDNodeOrder);
1231 DAG.AddDbgValue(SDV, IsParameter);
1236 FuncArgumentDbgValueKind::Declare,
N)) {
1238 <<
" (could not emit func-arg dbg_value)\n");
1249 for (
auto It = FnVarLocs->locs_begin(&
I), End = FnVarLocs->locs_end(&
I);
1251 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1253 if (It->Values.isKillLocation(It->Expr)) {
1259 It->Values.hasArgList())) {
1262 FnVarLocs->getDILocalVariable(It->VariableID),
1263 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1276 bool SkipDbgVariableRecords =
DAG.getFunctionVarLocs();
1279 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1281 assert(DLR->getLabel() &&
"Missing label");
1283 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1284 DAG.AddDbgLabel(SDV);
1288 if (SkipDbgVariableRecords)
1296 if (
FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1298 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1307 if (Values.
empty()) {
1324 SDNodeOrder, IsVariadic)) {
1335 if (
I.isTerminator()) {
1336 HandlePHINodesInSuccessorBlocks(
I.getParent());
1343 bool NodeInserted =
false;
1344 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1345 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1346 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1347 if (PCSectionsMD || MMRA) {
1348 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1349 DAG, [&](
SDNode *) { NodeInserted =
true; });
1359 if (PCSectionsMD || MMRA) {
1360 auto It = NodeMap.find(&
I);
1361 if (It != NodeMap.end()) {
1363 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1365 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1366 }
else if (NodeInserted) {
1369 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1370 <<
I.getModule()->getName() <<
"]\n";
1379void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1389#define HANDLE_INST(NUM, OPCODE, CLASS) \
1390 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1391#include "llvm/IR/Instruction.def"
1403 for (
const Value *V : Values) {
1428 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1433 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1434 DIVariable *DanglingVariable = DDI.getVariable();
1436 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1438 << printDDI(
nullptr, DDI) <<
"\n");
1444 for (
auto &DDIMI : DanglingDebugInfoMap) {
1445 DanglingDebugInfoVector &DDIV = DDIMI.second;
1449 for (
auto &DDI : DDIV)
1450 if (isMatchingDbgValue(DDI))
1453 erase_if(DDIV, isMatchingDbgValue);
1461 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1462 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1465 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1466 for (
auto &DDI : DDIV) {
1469 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1472 assert(Variable->isValidLocationForIntrinsic(
DL) &&
1473 "Expected inlined-at fields to agree");
1482 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1483 FuncArgumentDbgValueKind::Value, Val)) {
1485 << printDDI(V, DDI) <<
"\n");
1492 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1493 << ValSDNodeOrder <<
"\n");
1494 SDV = getDbgValue(Val, Variable, Expr,
DL,
1495 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1496 DAG.AddDbgValue(SDV,
false);
1500 <<
" in EmitFuncArgumentDbgValue\n");
1502 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1506 DAG.getConstantDbgValue(Variable, Expr,
Poison,
DL, DbgSDNodeOrder);
1507 DAG.AddDbgValue(SDV,
false);
1514 DanglingDebugInfo &DDI) {
1519 const Value *OrigV = V;
1523 unsigned SDOrder = DDI.getSDNodeOrder();
1527 bool StackValue =
true;
1552 if (!AdditionalValues.
empty())
1562 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1563 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1571 assert(OrigV &&
"V shouldn't be null");
1573 auto *SDV =
DAG.getConstantDbgValue(Var, Expr,
Poison,
DL, SDNodeOrder);
1574 DAG.AddDbgValue(SDV,
false);
1576 << printDDI(OrigV, DDI) <<
"\n");
1593 unsigned Order,
bool IsVariadic) {
1598 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1603 for (
const Value *V : Values) {
1613 if (CE->getOpcode() == Instruction::IntToPtr) {
1632 N = UnusedArgNodeMap[V];
1637 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1638 FuncArgumentDbgValueKind::Value,
N))
1665 bool IsParamOfFunc =
1673 auto VMI =
FuncInfo.ValueMap.find(V);
1674 if (VMI !=
FuncInfo.ValueMap.end()) {
1679 V->getType(), std::nullopt);
1685 unsigned BitsToDescribe = 0;
1687 BitsToDescribe = *VarSize;
1689 BitsToDescribe = Fragment->SizeInBits;
1692 if (
Offset >= BitsToDescribe)
1695 unsigned RegisterSize = RegAndSize.second;
1696 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1697 ? BitsToDescribe -
Offset
1700 Expr,
Offset, FragmentSize);
1704 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1705 DAG.AddDbgValue(SDV,
false);
1721 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1722 false, DbgLoc, Order, IsVariadic);
1723 DAG.AddDbgValue(SDV,
false);
1729 for (
auto &Pair : DanglingDebugInfoMap)
1730 for (
auto &DDI : Pair.second)
1741 if (It !=
FuncInfo.ValueMap.end()) {
1745 DAG.getDataLayout(), InReg, Ty,
1762 if (
N.getNode())
return N;
1822 return DAG.getSplatBuildVector(
1825 return DAG.getConstant(*CI,
DL, VT);
1834 getValue(CPA->getAddrDiscriminator()),
1835 getValue(CPA->getDiscriminator()));
1851 visit(CE->getOpcode(), *CE);
1853 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1859 for (
const Use &U :
C->operands()) {
1865 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1866 Constants.push_back(
SDValue(Val, i));
1875 for (
uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1879 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1888 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1890 "Unknown struct or array constant!");
1894 unsigned NumElts = ValueVTs.
size();
1898 for (
unsigned i = 0; i != NumElts; ++i) {
1899 EVT EltVT = ValueVTs[i];
1901 Constants[i] =
DAG.getUNDEF(EltVT);
1912 return DAG.getBlockAddress(BA, VT);
1915 return getValue(Equiv->getGlobalValue());
1920 if (VT == MVT::aarch64svcount) {
1921 assert(
C->isNullValue() &&
"Can only zero this target type!");
1927 assert(
C->isNullValue() &&
"Can only zero this target type!");
1944 for (
unsigned i = 0; i != NumElements; ++i)
1972 return DAG.getFrameIndex(
1980 std::optional<CallingConv::ID> CallConv;
1982 if (CI && !CI->isInlineAsm())
1983 CallConv = CI->getCallingConv();
1986 Inst->getType(), CallConv);
2000void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
2013 if (IsMSVCCXX || IsCoreCLR)
2019 MachineBasicBlock *TargetMBB =
FuncInfo.getMBB(
I.getSuccessor());
2020 FuncInfo.MBB->addSuccessor(TargetMBB);
2027 if (TargetMBB != NextBlock(
FuncInfo.MBB) ||
2036 DAG.getMachineFunction().setHasEHContTarget(
true);
2042 Value *ParentPad =
I.getCatchSwitchParentPad();
2045 SuccessorColor = &
FuncInfo.Fn->getEntryBlock();
2048 assert(SuccessorColor &&
"No parent funclet for catchret!");
2049 MachineBasicBlock *SuccessorColorMBB =
FuncInfo.getMBB(SuccessorColor);
2050 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2055 DAG.getBasicBlock(SuccessorColorMBB));
2059void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2065 FuncInfo.MBB->setIsEHFuncletEntry();
2066 FuncInfo.MBB->setIsCleanupFuncletEntry();
2095 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2101 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2102 UnwindDests.back().first->setIsEHScopeEntry();
2105 UnwindDests.back().first->setIsEHFuncletEntry();
2109 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2110 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2112 if (IsMSVCCXX || IsCoreCLR)
2113 UnwindDests.back().first->setIsEHFuncletEntry();
2115 UnwindDests.back().first->setIsEHScopeEntry();
2117 NewEHPadBB = CatchSwitch->getUnwindDest();
2123 if (BPI && NewEHPadBB)
2125 EHPadBB = NewEHPadBB;
2132 auto UnwindDest =
I.getUnwindDest();
2133 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
2134 BranchProbability UnwindDestProb =
2139 for (
auto &UnwindDest : UnwindDests) {
2140 UnwindDest.first->setIsEHPad();
2141 addSuccessorWithProb(
FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2143 FuncInfo.MBB->normalizeSuccProbs();
2146 MachineBasicBlock *CleanupPadMBB =
2147 FuncInfo.getMBB(
I.getCleanupPad()->getParent());
2153void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2157void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2158 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
2159 auto &
DL =
DAG.getDataLayout();
2171 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2188 SmallVector<uint64_t, 4>
Offsets;
2191 unsigned NumValues = ValueVTs.
size();
2194 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2195 for (
unsigned i = 0; i != NumValues; ++i) {
2202 if (MemVTs[i] != ValueVTs[i])
2204 Chains[i] =
DAG.getStore(
2212 MVT::Other, Chains);
2213 }
else if (
I.getNumOperands() != 0) {
2216 unsigned NumValues =
Types.size();
2220 const Function *
F =
I.getParent()->getParent();
2223 I.getOperand(0)->getType(),
F->getCallingConv(),
2227 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2229 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2232 LLVMContext &
Context =
F->getContext();
2233 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2235 for (
unsigned j = 0;
j != NumValues; ++
j) {
2248 &Parts[0], NumParts, PartVT, &
I, CC, ExtendKind);
2251 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2255 if (
I.getOperand(0)->getType()->isPointerTy()) {
2257 Flags.setPointerAddrSpace(
2261 if (NeedsRegBlock) {
2262 Flags.setInConsecutiveRegs();
2263 if (j == NumValues - 1)
2264 Flags.setInConsecutiveRegsLast();
2272 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2275 for (
unsigned i = 0; i < NumParts; ++i) {
2278 VT, Types[j], 0, 0));
2288 const Function *
F =
I.getParent()->getParent();
2290 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2292 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2293 Flags.setSwiftError();
2305 bool isVarArg =
DAG.getMachineFunction().getFunction().isVarArg();
2307 DAG.getMachineFunction().getFunction().getCallingConv();
2308 Chain =
DAG.getTargetLoweringInfo().LowerReturn(
2313 "LowerReturn didn't return a valid chain!");
2324 if (V->getType()->isEmptyTy())
2328 if (VMI !=
FuncInfo.ValueMap.end()) {
2330 "Unused value assigned virtual registers!");
2343 if (
FuncInfo.isExportedInst(V))
return;
2355 if (VI->getParent() == FromBB)
2381 const BasicBlock *SrcBB = Src->getBasicBlock();
2382 const BasicBlock *DstBB = Dst->getBasicBlock();
2386 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2396 Src->addSuccessorWithoutProb(Dst);
2399 Prob = getEdgeProbability(Src, Dst);
2400 Src->addSuccessor(Dst, Prob);
2406 return I->getParent() == BB;
2430 if (CurBB == SwitchBB ||
2436 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2441 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2443 if (TM.Options.NoNaNsFPMath)
2447 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2449 SL->SwitchCases.push_back(CB);
2458 SL->SwitchCases.push_back(CB);
2466 unsigned Depth = 0) {
2475 if (Necessary !=
nullptr) {
2478 if (Necessary->contains(
I))
2497 if (
I.getNumSuccessors() != 2)
2500 if (!
I.isConditional())
2512 if (BPI !=
nullptr) {
2518 std::optional<bool> Likely;
2521 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2525 if (
Opc == (*Likely ? Instruction::And : Instruction::Or))
2537 if (CostThresh <= 0)
2555 const auto &TLI =
DAG.getTargetLoweringInfo();
2562 Value *BrCond =
I.getCondition();
2563 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2564 for (
const auto *U : Ins->users()) {
2567 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2580 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2582 for (
const auto &InsPair : RhsDeps) {
2583 if (!ShouldCountInsn(InsPair.first)) {
2584 ToDrop = InsPair.first;
2588 if (ToDrop ==
nullptr)
2590 RhsDeps.erase(ToDrop);
2593 for (
const auto &InsPair : RhsDeps) {
2601 if (CostOfIncluding > CostThresh)
2627 const Value *BOpOp0, *BOpOp1;
2641 if (BOpc == Instruction::And)
2642 BOpc = Instruction::Or;
2643 else if (BOpc == Instruction::Or)
2644 BOpc = Instruction::And;
2650 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
2655 TProb, FProb, InvertCond);
2665 if (
Opc == Instruction::Or) {
2686 auto NewTrueProb = TProb / 2;
2687 auto NewFalseProb = TProb / 2 + FProb;
2690 NewFalseProb, InvertCond);
2697 Probs[1], InvertCond);
2699 assert(
Opc == Instruction::And &&
"Unknown merge op!");
2719 auto NewTrueProb = TProb + FProb / 2;
2720 auto NewFalseProb = FProb / 2;
2723 NewFalseProb, InvertCond);
2730 Probs[1], InvertCond);
2739 if (Cases.size() != 2)
return true;
2743 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2744 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2745 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2746 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2752 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2753 Cases[0].CC == Cases[1].CC &&
2756 if (Cases[0].CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2758 if (Cases[0].CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2765void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2771 if (
I.isUnconditional()) {
2777 if (Succ0MBB != NextBlock(BrMBB) ||
2790 const Value *CondVal =
I.getCondition();
2791 MachineBasicBlock *Succ1MBB =
FuncInfo.getMBB(
I.getSuccessor(1));
2810 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2812 if (!
DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2815 const Value *BOp0, *BOp1;
2818 Opcode = Instruction::And;
2820 Opcode = Instruction::Or;
2827 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2828 Opcode, BOp0, BOp1))) {
2830 getEdgeProbability(BrMBB, Succ0MBB),
2831 getEdgeProbability(BrMBB, Succ1MBB),
2836 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2840 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2847 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2853 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2854 FuncInfo.MF->erase(
SL->SwitchCases[i].ThisBB);
2856 SL->SwitchCases.clear();
2862 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2883 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2890 auto &TLI =
DAG.getTargetLoweringInfo();
2914 Cond =
DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.
CC);
2926 Cond =
DAG.getSetCC(dl, MVT::i1, CmpOp,
DAG.getConstant(
High, dl, VT),
2930 VT, CmpOp,
DAG.getConstant(
Low, dl, VT));
2931 Cond =
DAG.getSetCC(dl, MVT::i1, SUB,
2946 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2962 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2965 DAG.setRoot(BrCond);
2971 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2972 assert(JT.Reg &&
"Should lower JT Header first!");
2973 EVT PTy =
DAG.getTargetLoweringInfo().getJumpTableRegTy(
DAG.getDataLayout());
2975 SDValue Table =
DAG.getJumpTable(JT.JTI, PTy);
2976 SDValue BrJumpTable =
DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
2977 Index.getValue(1), Table, Index);
2978 DAG.setRoot(BrJumpTable);
2986 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2987 const SDLoc &dl = *JT.SL;
2993 DAG.getConstant(JTH.
First, dl, VT));
3008 JT.Reg = JumpTableReg;
3016 Sub.getValueType()),
3019 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3020 MVT::Other, CopyTo, CMP,
3021 DAG.getBasicBlock(JT.Default));
3024 if (JT.MBB != NextBlock(SwitchBB))
3025 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3026 DAG.getBasicBlock(JT.MBB));
3028 DAG.setRoot(BrCond);
3031 if (JT.MBB != NextBlock(SwitchBB))
3032 DAG.setRoot(
DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
3033 DAG.getBasicBlock(JT.MBB)));
3035 DAG.setRoot(CopyTo);
3058 if (PtrTy != PtrMemTy)
3074 auto &
DL =
DAG.getDataLayout();
3083 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3090 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3103 assert(GuardCheckFn &&
"Guard check function is null");
3114 Entry.IsInReg =
true;
3115 Args.push_back(Entry);
3121 getValue(GuardCheckFn), std::move(Args));
3123 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3124 DAG.setRoot(Result.second);
3137 Guard =
DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3148 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3181 auto &
DL =
DAG.getDataLayout();
3189 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3195 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3210 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3211 Entry.IsInReg =
true;
3212 Args.push_back(Entry);
3218 getValue(GuardCheckFn), std::move(Args));
3224 Chain = TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3232 Chain =
DAG.getNode(ISD::TRAP,
getCurSDLoc(), MVT::Other, Chain);
3247 DAG.getNode(
ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(
B.First, dl, VT));
3251 bool UsePtrType =
false;
3275 if (!
B.FallthroughUnreachable)
3276 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3277 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3281 if (!
B.FallthroughUnreachable) {
3289 Root =
DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3290 DAG.getBasicBlock(
B.Default));
3294 if (
MBB != NextBlock(SwitchBB))
3295 Root =
DAG.getNode(ISD::BR, dl, MVT::Other, Root,
DAG.getBasicBlock(
MBB));
3312 if (PopCount == 1) {
3319 }
else if (PopCount == BB.
Range) {
3327 DAG.getConstant(1, dl, VT), ShiftOp);
3331 VT, SwitchVal,
DAG.getConstant(
B.Mask, dl, VT));
3338 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3340 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3348 Cmp,
DAG.getBasicBlock(
B.TargetBB));
3351 if (NextMBB != NextBlock(SwitchBB))
3352 BrAnd =
DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3353 DAG.getBasicBlock(NextMBB));
3358void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3376 const Value *Callee(
I.getCalledOperand());
3379 visitInlineAsm(
I, EHPadBB);
3384 case Intrinsic::donothing:
3386 case Intrinsic::seh_try_begin:
3387 case Intrinsic::seh_scope_begin:
3388 case Intrinsic::seh_try_end:
3389 case Intrinsic::seh_scope_end:
3395 case Intrinsic::experimental_patchpoint_void:
3396 case Intrinsic::experimental_patchpoint:
3397 visitPatchpoint(
I, EHPadBB);
3399 case Intrinsic::experimental_gc_statepoint:
3405 case Intrinsic::wasm_throw: {
3407 std::array<SDValue, 4>
Ops = {
3418 case Intrinsic::wasm_rethrow: {
3419 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3420 std::array<SDValue, 2>
Ops = {
3429 }
else if (
I.hasDeoptState()) {
3450 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
3451 BranchProbability EHPadBBProb =
3457 addSuccessorWithProb(InvokeMBB, Return);
3458 for (
auto &UnwindDest : UnwindDests) {
3459 UnwindDest.first->setIsEHPad();
3460 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3466 DAG.getBasicBlock(Return)));
3469void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3470 MachineBasicBlock *CallBrMBB =
FuncInfo.MBB;
3477 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3482 SmallPtrSet<BasicBlock *, 8> Dests;
3483 Dests.
insert(
I.getDefaultDest());
3488 for (
unsigned i = 0, e =
I.getNumIndirectDests(); i < e; ++i) {
3491 Target->setIsInlineAsmBrIndirectTarget();
3497 Target->setLabelMustBeEmitted();
3499 if (Dests.
insert(Dest).second)
3507 DAG.getBasicBlock(Return)));
3510void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3511 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3514void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3516 "Call to landingpad not in landing pad!");
3520 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3536 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3541 if (
FuncInfo.ExceptionPointerVirtReg) {
3542 Ops[0] =
DAG.getZExtOrTrunc(
3543 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3550 Ops[1] =
DAG.getZExtOrTrunc(
3551 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3558 DAG.getVTList(ValueVTs),
Ops);
3566 if (JTB.first.HeaderBB ==
First)
3567 JTB.first.HeaderBB =
Last;
3580 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3582 bool Inserted =
Done.insert(BB).second;
3587 addSuccessorWithProb(IndirectBrMBB, Succ);
3597 if (!
I.shouldLowerToTrap(
DAG.getTarget().Options.TrapUnreachable,
3598 DAG.getTarget().Options.NoTrapAfterNoreturn))
3604void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3607 Flags.copyFMF(*FPOp);
3615void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3618 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3619 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3622 Flags.setExact(ExactOp->isExact());
3624 Flags.setDisjoint(DisjointOp->isDisjoint());
3626 Flags.copyFMF(*FPOp);
3635void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3639 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
3644 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3646 "Unexpected shift type");
3656 if (
const OverflowingBinaryOperator *OFBinOp =
3658 nuw = OFBinOp->hasNoUnsignedWrap();
3659 nsw = OFBinOp->hasNoSignedWrap();
3661 if (
const PossiblyExactOperator *ExactOp =
3663 exact = ExactOp->isExact();
3666 Flags.setExact(exact);
3667 Flags.setNoSignedWrap(nsw);
3668 Flags.setNoUnsignedWrap(nuw);
3674void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3685void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3691 auto &TLI =
DAG.getTargetLoweringInfo();
3704 Flags.setSameSign(
I.hasSameSign());
3705 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3707 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3712void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3719 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3723 Flags.copyFMF(*FPMO);
3724 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3726 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3735 return isa<SelectInst>(V);
3739void SelectionDAGBuilder::visitSelect(
const User &
I) {
3743 unsigned NumValues = ValueVTs.
size();
3744 if (NumValues == 0)
return;
3754 bool IsUnaryAbs =
false;
3755 bool Negate =
false;
3759 Flags.copyFMF(*FPOp);
3761 Flags.setUnpredictable(
3766 EVT VT = ValueVTs[0];
3767 LLVMContext &Ctx = *
DAG.getContext();
3768 auto &TLI =
DAG.getTargetLoweringInfo();
3778 bool UseScalarMinMax = VT.
isVector() &&
3787 switch (SPR.Flavor) {
3793 switch (SPR.NaNBehavior) {
3806 switch (SPR.NaNBehavior) {
3850 for (
unsigned i = 0; i != NumValues; ++i) {
3856 Values[i] =
DAG.getNegative(Values[i], dl, VT);
3859 for (
unsigned i = 0; i != NumValues; ++i) {
3863 Values[i] =
DAG.getNode(
3870 DAG.getVTList(ValueVTs), Values));
3873void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3876 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3880 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3881 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3887void SelectionDAGBuilder::visitZExt(
const User &
I) {
3891 auto &TLI =
DAG.getTargetLoweringInfo();
3896 Flags.setNonNeg(PNI->hasNonNeg());
3901 if (
Flags.hasNonNeg() &&
3910void SelectionDAGBuilder::visitSExt(
const User &
I) {
3914 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3919void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3925 Flags.copyFMF(*TruncInst);
3926 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3929 DAG.getTargetConstant(
3934void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3937 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3942void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3945 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3950void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3953 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3958void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
3961 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3965 Flags.setNonNeg(PNI->hasNonNeg());
3970void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
3973 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3978void SelectionDAGBuilder::visitPtrToAddr(
const User &
I) {
3981 const auto &TLI =
DAG.getTargetLoweringInfo();
3989void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
3993 auto &TLI =
DAG.getTargetLoweringInfo();
3994 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4003void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
4007 auto &TLI =
DAG.getTargetLoweringInfo();
4015void SelectionDAGBuilder::visitBitCast(
const User &
I) {
4018 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4023 if (DestVT !=
N.getValueType())
4031 setValue(&
I,
DAG.getConstant(
C->getValue(), dl, DestVT,
false,
4037void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
4038 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4039 const Value *SV =
I.getOperand(0);
4044 unsigned DestAS =
I.getType()->getPointerAddressSpace();
4046 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4052void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4053 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4060 InVec, InVal, InIdx));
4063void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4064 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4073void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4078 Mask = SVI->getShuffleMask();
4082 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4086 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4091 DAG.getVectorIdxConstant(0,
DL));
4102 unsigned MaskNumElts =
Mask.size();
4104 if (SrcNumElts == MaskNumElts) {
4110 if (SrcNumElts < MaskNumElts) {
4114 if (MaskNumElts % SrcNumElts == 0) {
4118 unsigned NumConcat = MaskNumElts / SrcNumElts;
4119 bool IsConcat =
true;
4120 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4121 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4127 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4128 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4129 ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts))) {
4134 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4141 for (
auto Src : ConcatSrcs) {
4154 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4155 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4171 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4172 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4174 if (Idx >= (
int)SrcNumElts)
4175 Idx -= SrcNumElts - PaddedMaskNumElts;
4183 if (MaskNumElts != PaddedMaskNumElts)
4185 DAG.getVectorIdxConstant(0,
DL));
4191 assert(SrcNumElts > MaskNumElts);
4195 int StartIdx[2] = {-1, -1};
4196 bool CanExtract =
true;
4197 for (
int Idx : Mask) {
4202 if (Idx >= (
int)SrcNumElts) {
4210 int NewStartIdx =
alignDown(Idx, MaskNumElts);
4211 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4212 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4216 StartIdx[Input] = NewStartIdx;
4219 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4225 for (
unsigned Input = 0; Input < 2; ++Input) {
4226 SDValue &Src = Input == 0 ? Src1 : Src2;
4227 if (StartIdx[Input] < 0)
4228 Src =
DAG.getUNDEF(VT);
4231 DAG.getVectorIdxConstant(StartIdx[Input],
DL));
4236 SmallVector<int, 8> MappedOps(Mask);
4237 for (
int &Idx : MappedOps) {
4238 if (Idx >= (
int)SrcNumElts)
4239 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4244 setValue(&
I,
DAG.getVectorShuffle(VT,
DL, Src1, Src2, MappedOps));
4253 for (
int Idx : Mask) {
4257 Res =
DAG.getUNDEF(EltVT);
4259 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4260 if (Idx >= (
int)SrcNumElts) Idx -= SrcNumElts;
4263 DAG.getVectorIdxConstant(Idx,
DL));
4273 ArrayRef<unsigned> Indices =
I.getIndices();
4274 const Value *Op0 =
I.getOperand(0);
4276 Type *AggTy =
I.getType();
4283 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4289 unsigned NumAggValues = AggValueVTs.
size();
4290 unsigned NumValValues = ValValueVTs.
size();
4294 if (!NumAggValues) {
4302 for (; i != LinearIndex; ++i)
4303 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4308 for (; i != LinearIndex + NumValValues; ++i)
4309 Values[i] = FromUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4313 for (; i != NumAggValues; ++i)
4314 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4318 DAG.getVTList(AggValueVTs), Values));
4322 ArrayRef<unsigned> Indices =
I.getIndices();
4323 const Value *Op0 =
I.getOperand(0);
4325 Type *ValTy =
I.getType();
4330 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4334 unsigned NumValValues = ValValueVTs.
size();
4337 if (!NumValValues) {
4346 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4347 Values[i - LinearIndex] =
4353 DAG.getVTList(ValValueVTs), Values));
4356void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4357 Value *Op0 =
I.getOperand(0);
4363 auto &TLI =
DAG.getTargetLoweringInfo();
4368 bool IsVectorGEP =
I.getType()->isVectorTy();
4369 ElementCount VectorElementCount =
4375 const Value *Idx = GTI.getOperand();
4376 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4381 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(
Field);
4390 N =
DAG.getMemBasePlusOffset(
4391 N,
DAG.getConstant(
Offset, dl,
N.getValueType()), dl, Flags);
4397 unsigned IdxSize =
DAG.getDataLayout().getIndexSizeInBits(AS);
4399 TypeSize ElementSize =
4400 GTI.getSequentialElementStride(
DAG.getDataLayout());
4405 bool ElementScalable = ElementSize.
isScalable();
4411 C =
C->getSplatValue();
4414 if (CI && CI->isZero())
4416 if (CI && !ElementScalable) {
4417 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4420 if (
N.getValueType().isVector())
4421 OffsVal =
DAG.getConstant(
4424 OffsVal =
DAG.getConstant(Offs, dl, IdxTy);
4431 Flags.setNoUnsignedWrap(
true);
4433 OffsVal =
DAG.getSExtOrTrunc(OffsVal, dl,
N.getValueType());
4435 N =
DAG.getMemBasePlusOffset(
N, OffsVal, dl, Flags);
4443 if (
N.getValueType().isVector()) {
4445 VectorElementCount);
4446 IdxN =
DAG.getSplat(VT, dl, IdxN);
4450 N =
DAG.getSplat(VT, dl,
N);
4456 IdxN =
DAG.getSExtOrTrunc(IdxN, dl,
N.getValueType());
4458 SDNodeFlags ScaleFlags;
4467 if (ElementScalable) {
4468 EVT VScaleTy =
N.getValueType().getScalarType();
4470 ISD::VSCALE, dl, VScaleTy,
4471 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4472 if (
N.getValueType().isVector())
4473 VScale =
DAG.getSplatVector(
N.getValueType(), dl, VScale);
4474 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, VScale,
4479 if (ElementMul != 1) {
4480 if (ElementMul.isPowerOf2()) {
4481 unsigned Amt = ElementMul.logBase2();
4484 DAG.getShiftAmountConstant(Amt,
N.getValueType(), dl),
4487 SDValue Scale =
DAG.getConstant(ElementMul.getZExtValue(), dl,
4489 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, Scale,
4499 SDNodeFlags AddFlags;
4502 N =
DAG.getMemBasePlusOffset(
N, IdxN, dl, AddFlags);
4506 if (IsVectorGEP && !
N.getValueType().isVector()) {
4508 N =
DAG.getSplat(VT, dl,
N);
4519 N =
DAG.getPtrExtendInReg(
N, dl, PtrMemTy);
4524void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4531 Type *Ty =
I.getAllocatedType();
4532 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4533 auto &
DL =
DAG.getDataLayout();
4534 TypeSize TySize =
DL.getTypeAllocSize(Ty);
4535 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4541 AllocSize =
DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4544 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4545 DAG.getVScale(dl, IntPtr,
4551 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4552 DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4558 Align StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlign();
4559 if (*Alignment <= StackAlign)
4560 Alignment = std::nullopt;
4562 const uint64_t StackAlignMask = StackAlign.
value() - 1U;
4567 DAG.getConstant(StackAlignMask, dl, IntPtr),
4572 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4576 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4578 SDValue DSA =
DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs,
Ops);
4586 return I.getMetadata(LLVMContext::MD_range);
4591 if (std::optional<ConstantRange> CR = CB->getRange())
4595 return std::nullopt;
4598void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4600 return visitAtomicLoad(
I);
4602 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4603 const Value *SV =
I.getOperand(0);
4608 if (Arg->hasSwiftErrorAttr())
4609 return visitLoadFromSwiftError(
I);
4613 if (Alloca->isSwiftError())
4614 return visitLoadFromSwiftError(
I);
4620 Type *Ty =
I.getType();
4624 unsigned NumValues = ValueVTs.
size();
4628 Align Alignment =
I.getAlign();
4629 AAMDNodes AAInfo =
I.getAAMetadata();
4631 bool isVolatile =
I.isVolatile();
4636 bool ConstantMemory =
false;
4643 BatchAA->pointsToConstantMemory(MemoryLocation(
4648 Root =
DAG.getEntryNode();
4649 ConstantMemory =
true;
4653 Root =
DAG.getRoot();
4664 unsigned ChainI = 0;
4665 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4681 MachinePointerInfo PtrInfo =
4683 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4684 : MachinePointerInfo();
4687 SDValue L =
DAG.getLoad(MemVTs[i], dl, Root,
A, PtrInfo, Alignment,
4688 MMOFlags, AAInfo, Ranges);
4689 Chains[ChainI] =
L.getValue(1);
4691 if (MemVTs[i] != ValueVTs[i])
4692 L =
DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4697 if (!ConstantMemory) {
4707 DAG.getVTList(ValueVTs), Values));
4710void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4711 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4712 "call visitStoreToSwiftError when backend supports swifterror");
4715 SmallVector<uint64_t, 4>
Offsets;
4716 const Value *SrcV =
I.getOperand(0);
4718 SrcV->
getType(), ValueVTs, &Offsets, 0);
4719 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4720 "expect a single EVT for swifterror");
4729 SDValue(Src.getNode(), Src.getResNo()));
4730 DAG.setRoot(CopyNode);
4733void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4734 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4735 "call visitLoadFromSwiftError when backend supports swifterror");
4738 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4739 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4740 "Support volatile, non temporal, invariant for load_from_swift_error");
4742 const Value *SV =
I.getOperand(0);
4743 Type *Ty =
I.getType();
4746 !
BatchAA->pointsToConstantMemory(MemoryLocation(
4748 I.getAAMetadata()))) &&
4749 "load_from_swift_error should not be constant memory");
4752 SmallVector<uint64_t, 4>
Offsets;
4754 ValueVTs, &Offsets, 0);
4755 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4756 "expect a single EVT for swifterror");
4766void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4768 return visitAtomicStore(
I);
4770 const Value *SrcV =
I.getOperand(0);
4771 const Value *PtrV =
I.getOperand(1);
4773 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4778 if (Arg->hasSwiftErrorAttr())
4779 return visitStoreToSwiftError(
I);
4783 if (Alloca->isSwiftError())
4784 return visitStoreToSwiftError(
I);
4791 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4792 unsigned NumValues = ValueVTs.
size();
4805 Align Alignment =
I.getAlign();
4806 AAMDNodes AAInfo =
I.getAAMetadata();
4810 unsigned ChainI = 0;
4811 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4821 MachinePointerInfo PtrInfo =
4823 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4824 : MachinePointerInfo();
4828 if (MemVTs[i] != ValueVTs[i])
4829 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4831 DAG.getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4832 Chains[ChainI] = St;
4838 DAG.setRoot(StoreNode);
4841void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4842 bool IsCompressing) {
4845 Value *Src0Operand =
I.getArgOperand(0);
4846 Value *PtrOperand =
I.getArgOperand(1);
4847 Value *MaskOperand =
I.getArgOperand(2);
4848 Align Alignment =
I.getParamAlign(1).valueOrOne();
4858 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4861 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4862 MachinePointerInfo(PtrOperand), MMOFlags,
4865 const auto &TLI =
DAG.getTargetLoweringInfo();
4870 I.getArgOperand(0)->getType(),
true)
4876 DAG.setRoot(StoreNode);
4902 assert(
Ptr->getType()->isVectorTy() &&
"Unexpected pointer type");
4906 C =
C->getSplatValue();
4920 if (!
GEP ||
GEP->getParent() != CurBB)
4923 if (
GEP->getNumOperands() != 2)
4926 const Value *BasePtr =
GEP->getPointerOperand();
4927 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
4933 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
4938 if (ScaleVal != 1 &&
4950void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
4954 const Value *
Ptr =
I.getArgOperand(1);
4958 Align Alignment =
I.getParamAlign(1).valueOrOne();
4959 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4967 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
4968 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4978 EVT IdxVT =
Index.getValueType();
4986 SDValue Scatter =
DAG.getMaskedScatter(
DAG.getVTList(MVT::Other), VT, sdl,
4988 DAG.setRoot(Scatter);
4992void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
4995 Value *PtrOperand =
I.getArgOperand(0);
4996 Value *MaskOperand =
I.getArgOperand(1);
4997 Value *Src0Operand =
I.getArgOperand(2);
4998 Align Alignment =
I.getParamAlign(0).valueOrOne();
5006 AAMDNodes AAInfo =
I.getAAMetadata();
5013 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
5016 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5019 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5020 MachinePointerInfo(PtrOperand), MMOFlags,
5023 const auto &TLI =
DAG.getTargetLoweringInfo();
5035 DAG.getMaskedLoad(VT, sdl, InChain,
Ptr,
Offset, Mask, Src0, VT, MMO,
5042void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5046 const Value *
Ptr =
I.getArgOperand(0);
5050 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5052 Align Alignment =
I.getParamAlign(0).valueOrOne();
5062 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
5063 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5075 EVT IdxVT =
Index.getValueType();
5084 DAG.getMaskedGather(
DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO,
5100 SDVTList VTs =
DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5102 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5105 MachineFunction &MF =
DAG.getMachineFunction();
5107 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5108 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, SuccessOrdering,
5111 SDValue L =
DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
5112 dl, MemVT, VTs, InChain,
5120 DAG.setRoot(OutChain);
5123void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5126 switch (
I.getOperation()) {
5144 NT = ISD::ATOMIC_LOAD_FMAXIMUM;
5147 NT = ISD::ATOMIC_LOAD_FMINIMUM;
5150 NT = ISD::ATOMIC_LOAD_UINC_WRAP;
5153 NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
5156 NT = ISD::ATOMIC_LOAD_USUB_COND;
5159 NT = ISD::ATOMIC_LOAD_USUB_SAT;
5168 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5171 MachineFunction &MF =
DAG.getMachineFunction();
5173 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5174 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, Ordering);
5177 DAG.getAtomic(NT, dl, MemVT, InChain,
5184 DAG.setRoot(OutChain);
5187void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5189 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5192 Ops[1] =
DAG.getTargetConstant((
unsigned)
I.getOrdering(), dl,
5194 Ops[2] =
DAG.getTargetConstant(
I.getSyncScopeID(), dl,
5201void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5208 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5219 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5220 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5221 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5231 L =
DAG.getPtrExtOrTrunc(L, dl, VT);
5234 DAG.setRoot(OutChain);
5237void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5245 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5255 MachineFunction &MF =
DAG.getMachineFunction();
5257 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5258 I.getAlign(), AAMDNodes(),
nullptr, SSID, Ordering);
5262 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5266 DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val,
Ptr, MMO);
5269 DAG.setRoot(OutChain);
5274void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5280 bool HasChain = !
F->doesNotAccessMemory();
5282 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5289 Ops.push_back(
DAG.getRoot());
5296 TargetLowering::IntrinsicInfo
Info;
5297 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5299 DAG.getMachineFunction(),
5309 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5310 const Value *Arg =
I.getArgOperand(i);
5311 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5319 assert(CI->getBitWidth() <= 64 &&
5320 "large intrinsic immediates not handled");
5321 Ops.push_back(
DAG.getTargetConstant(*CI, SDLoc(), VT));
5334 SDVTList VTs =
DAG.getVTList(ValueVTs);
5339 Flags.copyFMF(*FPMO);
5340 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
5346 auto *Token = Bundle->Inputs[0].get();
5348 assert(
Ops.back().getValueType() != MVT::Glue &&
5349 "Did not expected another glue node here.");
5351 DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
5352 Ops.push_back(ConvControlToken);
5357 if (IsTgtIntrinsic) {
5362 MachinePointerInfo MPI;
5364 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
5365 else if (
Info.fallbackAddressSpace)
5366 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
5367 EVT MemVT =
Info.memVT;
5369 if (
Size.hasValue() && !
Size.getValue())
5371 Align Alignment =
Info.align.value_or(
DAG.getEVTAlign(MemVT));
5372 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5373 MPI,
Info.flags,
Size, Alignment,
I.getAAMetadata(),
nullptr,
5377 }
else if (!HasChain) {
5379 }
else if (!
I.getType()->isVoidTy()) {
5393 if (!
I.getType()->isVoidTy()) {
5397 MaybeAlign Alignment =
I.getRetAlign();
5420 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32, t2);
5461 SDValue TwoToFractionalPartOfX;
5529 SDValue t13 = DAG.
getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5530 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32,
5538 if (
Op.getValueType() == MVT::f32 &&
5553 return DAG.
getNode(ISD::FEXP, dl,
Op.getValueType(),
Op, Flags);
5562 if (
Op.getValueType() == MVT::f32 &&
5652 return DAG.
getNode(ISD::FLOG, dl,
Op.getValueType(),
Op, Flags);
5661 if (
Op.getValueType() == MVT::f32 &&
5745 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5749 return DAG.
getNode(ISD::FLOG2, dl,
Op.getValueType(),
Op, Flags);
5758 if (
Op.getValueType() == MVT::f32 &&
5835 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5839 return DAG.
getNode(ISD::FLOG10, dl,
Op.getValueType(),
Op, Flags);
5846 if (
Op.getValueType() == MVT::f32 &&
5851 return DAG.
getNode(ISD::FEXP2, dl,
Op.getValueType(),
Op, Flags);
5859 bool IsExp10 =
false;
5860 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5864 IsExp10 = LHSC->isExactlyValue(Ten);
5891 unsigned Val = RHSC->getSExtValue();
5920 CurSquare, CurSquare);
5925 if (RHSC->getSExtValue() < 0)
5939 EVT VT =
LHS.getValueType();
5962 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
5966 Opcode, VT, ScaleInt);
6001 switch (
N.getOpcode()) {
6005 Op.getValueType().getSizeInBits());
6030bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6037 MachineFunction &MF =
DAG.getMachineFunction();
6038 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
6042 auto MakeVRegDbgValue = [&](
Register Reg, DIExpression *FragExpr,
6047 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6054 auto *NewDIExpr = FragExpr;
6061 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6064 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6065 return BuildMI(MF,
DL, Inst, Indirect,
Reg, Variable, FragExpr);
6069 if (Kind == FuncArgumentDbgValueKind::Value) {
6074 if (!IsInEntryBlock)
6090 bool VariableIsFunctionInputArg =
Variable->isParameter() &&
6091 !
DL->getInlinedAt();
6093 if (!IsInPrologue && !VariableIsFunctionInputArg)
6127 if (VariableIsFunctionInputArg) {
6129 if (ArgNo >=
FuncInfo.DescribedArgs.size())
6130 FuncInfo.DescribedArgs.resize(ArgNo + 1,
false);
6131 else if (!IsInPrologue &&
FuncInfo.DescribedArgs.test(ArgNo))
6132 return !NodeMap[
V].getNode();
6137 bool IsIndirect =
false;
6138 std::optional<MachineOperand>
Op;
6140 int FI =
FuncInfo.getArgumentFrameIndex(Arg);
6141 if (FI != std::numeric_limits<int>::max())
6145 if (!
Op &&
N.getNode()) {
6148 if (ArgRegsAndSizes.
size() == 1)
6149 Reg = ArgRegsAndSizes.
front().first;
6152 MachineRegisterInfo &RegInfo = MF.
getRegInfo();
6159 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6163 if (!
Op &&
N.getNode()) {
6167 if (FrameIndexSDNode *FINode =
6177 for (
const auto &RegAndSize : SplitRegs) {
6181 int RegFragmentSizeInBits = RegAndSize.second;
6183 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6186 if (
Offset >= ExprFragmentSizeInBits)
6190 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6191 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6196 Expr,
Offset, RegFragmentSizeInBits);
6197 Offset += RegAndSize.second;
6200 if (!FragmentExpr) {
6201 SDDbgValue *SDV =
DAG.getConstantDbgValue(
6203 DAG.AddDbgValue(SDV,
false);
6206 MachineInstr *NewMI =
6207 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6208 Kind != FuncArgumentDbgValueKind::Value);
6209 FuncInfo.ArgDbgValues.push_back(NewMI);
6216 if (VMI !=
FuncInfo.ValueMap.end()) {
6217 const auto &TLI =
DAG.getTargetLoweringInfo();
6218 RegsForValue RFV(
V->getContext(), TLI,
DAG.getDataLayout(), VMI->second,
6219 V->getType(), std::nullopt);
6220 if (RFV.occupiesMultipleRegs()) {
6221 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6226 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6227 }
else if (ArgRegsAndSizes.
size() > 1) {
6230 splitMultiRegDbgValue(ArgRegsAndSizes);
6239 "Expected inlined-at fields to agree");
6240 MachineInstr *NewMI =
nullptr;
6243 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6245 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6249 FuncInfo.ArgDbgValues.push_back(NewMI);
6258 unsigned DbgSDNodeOrder) {
6270 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6271 false, dl, DbgSDNodeOrder);
6273 return DAG.getDbgValue(Variable, Expr,
N.getNode(),
N.getResNo(),
6274 false, dl, DbgSDNodeOrder);
6279 case Intrinsic::smul_fix:
6281 case Intrinsic::umul_fix:
6283 case Intrinsic::smul_fix_sat:
6285 case Intrinsic::umul_fix_sat:
6287 case Intrinsic::sdiv_fix:
6289 case Intrinsic::udiv_fix:
6291 case Intrinsic::sdiv_fix_sat:
6293 case Intrinsic::udiv_fix_sat:
6306 "expected call_preallocated_setup Value");
6307 for (
const auto *U : PreallocatedSetup->
users()) {
6309 const Function *Fn = UseCall->getCalledFunction();
6310 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6320bool SelectionDAGBuilder::visitEntryValueDbgValue(
6330 auto ArgIt =
FuncInfo.ValueMap.find(Arg);
6331 if (ArgIt ==
FuncInfo.ValueMap.end()) {
6333 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6334 "couldn't find an associated register for the Argument\n");
6337 Register ArgVReg = ArgIt->getSecond();
6339 for (
auto [PhysReg, VirtReg] :
FuncInfo.RegInfo->liveins())
6340 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6341 SDDbgValue *SDV =
DAG.getVRegDbgValue(
6342 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6343 DAG.AddDbgValue(SDV,
false );
6346 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6347 "couldn't find a physical register\n");
6352void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6355 switch (Intrinsic) {
6356 case Intrinsic::experimental_convergence_anchor:
6357 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
6359 case Intrinsic::experimental_convergence_entry:
6360 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
6362 case Intrinsic::experimental_convergence_loop: {
6364 auto *Token = Bundle->Inputs[0].get();
6365 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
6372void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6373 unsigned IntrinsicID) {
6376 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6377 "Tried to lower unsupported histogram type");
6379 Value *
Ptr =
I.getOperand(0);
6383 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6384 DataLayout TargetDL =
DAG.getDataLayout();
6386 Align Alignment =
DAG.getEVTAlign(VT);
6397 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
6399 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
6400 MachinePointerInfo(AS),
6411 EVT IdxVT =
Index.getValueType();
6418 SDValue ID =
DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6421 SDValue Histogram =
DAG.getMaskedHistogram(
DAG.getVTList(MVT::Other), VT, sdl,
6425 DAG.setRoot(Histogram);
6428void SelectionDAGBuilder::visitVectorExtractLastActive(
const CallInst &
I,
6430 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6431 "Tried lowering invalid vector extract last");
6433 const DataLayout &Layout =
DAG.getDataLayout();
6437 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6441 SDValue Idx =
DAG.getNode(ISD::VECTOR_FIND_LAST_ACTIVE, sdl, ExtVT, Mask);
6447 EVT BoolVT =
Mask.getValueType().getScalarType();
6448 SDValue AnyActive =
DAG.getNode(ISD::VECREDUCE_OR, sdl, BoolVT, Mask);
6449 Result =
DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6456void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6458 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6465 Flags.copyFMF(*FPOp);
6467 switch (Intrinsic) {
6470 visitTargetIntrinsic(
I, Intrinsic);
6472 case Intrinsic::vscale: {
6477 case Intrinsic::vastart: visitVAStart(
I);
return;
6478 case Intrinsic::vaend: visitVAEnd(
I);
return;
6479 case Intrinsic::vacopy: visitVACopy(
I);
return;
6480 case Intrinsic::returnaddress:
6485 case Intrinsic::addressofreturnaddress:
6490 case Intrinsic::sponentry:
6495 case Intrinsic::frameaddress:
6500 case Intrinsic::read_volatile_register:
6501 case Intrinsic::read_register: {
6502 Value *
Reg =
I.getArgOperand(0);
6508 DAG.getVTList(VT, MVT::Other), Chain,
RegName);
6513 case Intrinsic::write_register: {
6514 Value *
Reg =
I.getArgOperand(0);
6515 Value *RegValue =
I.getArgOperand(1);
6523 case Intrinsic::memcpy:
6524 case Intrinsic::memcpy_inline: {
6530 "memcpy_inline needs constant size");
6532 Align DstAlign = MCI.getDestAlign().valueOrOne();
6533 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6534 Align Alignment = std::min(DstAlign, SrcAlign);
6535 bool isVol = MCI.isVolatile();
6539 SDValue MC =
DAG.getMemcpy(Root, sdl, Dst, Src,
Size, Alignment, isVol,
6540 MCI.isForceInlined(), &
I, std::nullopt,
6541 MachinePointerInfo(
I.getArgOperand(0)),
6542 MachinePointerInfo(
I.getArgOperand(1)),
6544 updateDAGForMaybeTailCall(MC);
6547 case Intrinsic::memset:
6548 case Intrinsic::memset_inline: {
6554 "memset_inline needs constant size");
6556 Align DstAlign = MSII.getDestAlign().valueOrOne();
6557 bool isVol = MSII.isVolatile();
6560 Root, sdl, Dst, Value,
Size, DstAlign, isVol, MSII.isForceInlined(),
6561 &
I, MachinePointerInfo(
I.getArgOperand(0)),
I.getAAMetadata());
6562 updateDAGForMaybeTailCall(MC);
6565 case Intrinsic::memmove: {
6571 Align DstAlign = MMI.getDestAlign().valueOrOne();
6572 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6573 Align Alignment = std::min(DstAlign, SrcAlign);
6574 bool isVol = MMI.isVolatile();
6578 SDValue MM =
DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &
I,
6580 MachinePointerInfo(
I.getArgOperand(0)),
6581 MachinePointerInfo(
I.getArgOperand(1)),
6583 updateDAGForMaybeTailCall(MM);
6586 case Intrinsic::memcpy_element_unordered_atomic: {
6592 Type *LengthTy =
MI.getLength()->getType();
6593 unsigned ElemSz =
MI.getElementSizeInBytes();
6597 isTC, MachinePointerInfo(
MI.getRawDest()),
6598 MachinePointerInfo(
MI.getRawSource()));
6599 updateDAGForMaybeTailCall(MC);
6602 case Intrinsic::memmove_element_unordered_atomic: {
6608 Type *LengthTy =
MI.getLength()->getType();
6609 unsigned ElemSz =
MI.getElementSizeInBytes();
6613 isTC, MachinePointerInfo(
MI.getRawDest()),
6614 MachinePointerInfo(
MI.getRawSource()));
6615 updateDAGForMaybeTailCall(MC);
6618 case Intrinsic::memset_element_unordered_atomic: {
6624 Type *LengthTy =
MI.getLength()->getType();
6625 unsigned ElemSz =
MI.getElementSizeInBytes();
6629 isTC, MachinePointerInfo(
MI.getRawDest()));
6630 updateDAGForMaybeTailCall(MC);
6633 case Intrinsic::call_preallocated_setup: {
6635 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6636 SDValue Res =
DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6642 case Intrinsic::call_preallocated_arg: {
6644 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6651 ISD::PREALLOCATED_ARG, sdl,
6658 case Intrinsic::eh_typeid_for: {
6661 unsigned TypeID =
DAG.getMachineFunction().getTypeIDFor(GV);
6662 Res =
DAG.getConstant(
TypeID, sdl, MVT::i32);
6667 case Intrinsic::eh_return_i32:
6668 case Intrinsic::eh_return_i64:
6669 DAG.getMachineFunction().setCallsEHReturn(
true);
6676 case Intrinsic::eh_unwind_init:
6677 DAG.getMachineFunction().setCallsUnwindInit(
true);
6679 case Intrinsic::eh_dwarf_cfa:
6684 case Intrinsic::eh_sjlj_callsite: {
6686 assert(
FuncInfo.getCurrentCallSite() == 0 &&
"Overlapping call sites!");
6691 case Intrinsic::eh_sjlj_functioncontext: {
6693 MachineFrameInfo &MFI =
DAG.getMachineFunction().getFrameInfo();
6696 int FI =
FuncInfo.StaticAllocaMap[FnCtx];
6700 case Intrinsic::eh_sjlj_setjmp: {
6705 DAG.getVTList(MVT::i32, MVT::Other),
Ops);
6707 DAG.setRoot(
Op.getValue(1));
6710 case Intrinsic::eh_sjlj_longjmp:
6714 case Intrinsic::eh_sjlj_setup_dispatch:
6718 case Intrinsic::masked_gather:
6719 visitMaskedGather(
I);
6721 case Intrinsic::masked_load:
6724 case Intrinsic::masked_scatter:
6725 visitMaskedScatter(
I);
6727 case Intrinsic::masked_store:
6728 visitMaskedStore(
I);
6730 case Intrinsic::masked_expandload:
6731 visitMaskedLoad(
I,
true );
6733 case Intrinsic::masked_compressstore:
6734 visitMaskedStore(
I,
true );
6736 case Intrinsic::powi:
6740 case Intrinsic::log:
6743 case Intrinsic::log2:
6747 case Intrinsic::log10:
6751 case Intrinsic::exp:
6754 case Intrinsic::exp2:
6758 case Intrinsic::pow:
6762 case Intrinsic::sqrt:
6763 case Intrinsic::fabs:
6764 case Intrinsic::sin:
6765 case Intrinsic::cos:
6766 case Intrinsic::tan:
6767 case Intrinsic::asin:
6768 case Intrinsic::acos:
6769 case Intrinsic::atan:
6770 case Intrinsic::sinh:
6771 case Intrinsic::cosh:
6772 case Intrinsic::tanh:
6773 case Intrinsic::exp10:
6774 case Intrinsic::floor:
6775 case Intrinsic::ceil:
6776 case Intrinsic::trunc:
6777 case Intrinsic::rint:
6778 case Intrinsic::nearbyint:
6779 case Intrinsic::round:
6780 case Intrinsic::roundeven:
6781 case Intrinsic::canonicalize: {
6784 switch (Intrinsic) {
6786 case Intrinsic::sqrt: Opcode = ISD::FSQRT;
break;
6787 case Intrinsic::fabs: Opcode = ISD::FABS;
break;
6788 case Intrinsic::sin: Opcode = ISD::FSIN;
break;
6789 case Intrinsic::cos: Opcode = ISD::FCOS;
break;
6790 case Intrinsic::tan: Opcode = ISD::FTAN;
break;
6791 case Intrinsic::asin: Opcode = ISD::FASIN;
break;
6792 case Intrinsic::acos: Opcode = ISD::FACOS;
break;
6793 case Intrinsic::atan: Opcode = ISD::FATAN;
break;
6794 case Intrinsic::sinh: Opcode = ISD::FSINH;
break;
6795 case Intrinsic::cosh: Opcode = ISD::FCOSH;
break;
6796 case Intrinsic::tanh: Opcode = ISD::FTANH;
break;
6797 case Intrinsic::exp10: Opcode = ISD::FEXP10;
break;
6798 case Intrinsic::floor: Opcode = ISD::FFLOOR;
break;
6799 case Intrinsic::ceil: Opcode = ISD::FCEIL;
break;
6800 case Intrinsic::trunc: Opcode = ISD::FTRUNC;
break;
6801 case Intrinsic::rint: Opcode = ISD::FRINT;
break;
6802 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT;
break;
6803 case Intrinsic::round: Opcode = ISD::FROUND;
break;
6804 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN;
break;
6810 getValue(
I.getArgOperand(0)).getValueType(),
6814 case Intrinsic::atan2:
6816 getValue(
I.getArgOperand(0)).getValueType(),
6820 case Intrinsic::lround:
6821 case Intrinsic::llround:
6822 case Intrinsic::lrint:
6823 case Intrinsic::llrint: {
6826 switch (Intrinsic) {
6828 case Intrinsic::lround: Opcode = ISD::LROUND;
break;
6829 case Intrinsic::llround: Opcode = ISD::LLROUND;
break;
6830 case Intrinsic::lrint: Opcode = ISD::LRINT;
break;
6831 case Intrinsic::llrint: Opcode = ISD::LLRINT;
break;
6840 case Intrinsic::minnum:
6842 getValue(
I.getArgOperand(0)).getValueType(),
6846 case Intrinsic::maxnum:
6848 getValue(
I.getArgOperand(0)).getValueType(),
6852 case Intrinsic::minimum:
6854 getValue(
I.getArgOperand(0)).getValueType(),
6858 case Intrinsic::maximum:
6860 getValue(
I.getArgOperand(0)).getValueType(),
6864 case Intrinsic::minimumnum:
6866 getValue(
I.getArgOperand(0)).getValueType(),
6870 case Intrinsic::maximumnum:
6872 getValue(
I.getArgOperand(0)).getValueType(),
6876 case Intrinsic::copysign:
6878 getValue(
I.getArgOperand(0)).getValueType(),
6882 case Intrinsic::ldexp:
6884 getValue(
I.getArgOperand(0)).getValueType(),
6888 case Intrinsic::modf:
6889 case Intrinsic::sincos:
6890 case Intrinsic::sincospi:
6891 case Intrinsic::frexp: {
6893 switch (Intrinsic) {
6896 case Intrinsic::sincos:
6897 Opcode = ISD::FSINCOS;
6899 case Intrinsic::sincospi:
6900 Opcode = ISD::FSINCOSPI;
6902 case Intrinsic::modf:
6903 Opcode = ISD::FMODF;
6905 case Intrinsic::frexp:
6906 Opcode = ISD::FFREXP;
6911 SDVTList VTs =
DAG.getVTList(ValueVTs);
6913 &
I,
DAG.getNode(Opcode, sdl, VTs,
getValue(
I.getArgOperand(0)), Flags));
6916 case Intrinsic::arithmetic_fence: {
6918 getValue(
I.getArgOperand(0)).getValueType(),
6922 case Intrinsic::fma:
6928#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6929 case Intrinsic::INTRINSIC:
6930#include "llvm/IR/ConstrainedOps.def"
6933#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6934#include "llvm/IR/VPIntrinsics.def"
6937 case Intrinsic::fptrunc_round: {
6941 std::optional<RoundingMode> RoundMode =
6949 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
6954 DAG.getTargetConstant((
int)*RoundMode, sdl, MVT::i32));
6959 case Intrinsic::fmuladd: {
6964 getValue(
I.getArgOperand(0)).getValueType(),
6971 getValue(
I.getArgOperand(0)).getValueType(),
6987 case Intrinsic::convert_to_fp16:
6991 DAG.getTargetConstant(0, sdl,
6994 case Intrinsic::convert_from_fp16:
6997 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
7000 case Intrinsic::fptosi_sat: {
7007 case Intrinsic::fptoui_sat: {
7014 case Intrinsic::set_rounding:
7015 Res =
DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
7020 case Intrinsic::is_fpclass: {
7021 const DataLayout DLayout =
DAG.getDataLayout();
7023 EVT ArgVT = TLI.
getValueType(DLayout,
I.getArgOperand(0)->getType());
7026 MachineFunction &MF =
DAG.getMachineFunction();
7030 Flags.setNoFPExcept(
7031 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7047 case Intrinsic::get_fpenv: {
7048 const DataLayout DLayout =
DAG.getDataLayout();
7050 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7056 ISD::GET_FPENV, sdl,
7065 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7068 Chain =
DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7069 Res =
DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7075 case Intrinsic::set_fpenv: {
7076 const DataLayout DLayout =
DAG.getDataLayout();
7079 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7084 Chain =
DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
7092 Chain =
DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7094 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7097 Chain =
DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7102 case Intrinsic::reset_fpenv:
7103 DAG.setRoot(
DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other,
getRoot()));
7105 case Intrinsic::get_fpmode:
7107 ISD::GET_FPMODE, sdl,
7114 case Intrinsic::set_fpmode:
7115 Res =
DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {
DAG.getRoot()},
7119 case Intrinsic::reset_fpmode: {
7120 Res =
DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other,
getRoot());
7124 case Intrinsic::pcmarker: {
7126 DAG.setRoot(
DAG.getNode(ISD::PCMARKER, sdl, MVT::Other,
getRoot(), Tmp));
7129 case Intrinsic::readcyclecounter: {
7131 Res =
DAG.getNode(ISD::READCYCLECOUNTER, sdl,
7132 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7137 case Intrinsic::readsteadycounter: {
7139 Res =
DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
7140 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7145 case Intrinsic::bitreverse:
7147 getValue(
I.getArgOperand(0)).getValueType(),
7150 case Intrinsic::bswap:
7152 getValue(
I.getArgOperand(0)).getValueType(),
7155 case Intrinsic::cttz: {
7163 case Intrinsic::ctlz: {
7171 case Intrinsic::ctpop: {
7177 case Intrinsic::fshl:
7178 case Intrinsic::fshr: {
7179 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7183 EVT VT =
X.getValueType();
7194 case Intrinsic::sadd_sat: {
7200 case Intrinsic::uadd_sat: {
7206 case Intrinsic::ssub_sat: {
7212 case Intrinsic::usub_sat: {
7218 case Intrinsic::sshl_sat: {
7224 case Intrinsic::ushl_sat: {
7230 case Intrinsic::smul_fix:
7231 case Intrinsic::umul_fix:
7232 case Intrinsic::smul_fix_sat:
7233 case Intrinsic::umul_fix_sat: {
7241 case Intrinsic::sdiv_fix:
7242 case Intrinsic::udiv_fix:
7243 case Intrinsic::sdiv_fix_sat:
7244 case Intrinsic::udiv_fix_sat: {
7249 Op1, Op2, Op3,
DAG, TLI));
7252 case Intrinsic::smax: {
7258 case Intrinsic::smin: {
7264 case Intrinsic::umax: {
7270 case Intrinsic::umin: {
7276 case Intrinsic::abs: {
7282 case Intrinsic::scmp: {
7289 case Intrinsic::ucmp: {
7296 case Intrinsic::stacksave: {
7299 Res =
DAG.getNode(ISD::STACKSAVE, sdl,
DAG.getVTList(VT, MVT::Other),
Op);
7304 case Intrinsic::stackrestore:
7306 DAG.setRoot(
DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other,
getRoot(), Res));
7308 case Intrinsic::get_dynamic_area_offset: {
7311 Res =
DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl,
DAG.getVTList(ResTy),
7317 case Intrinsic::stackguard: {
7318 MachineFunction &MF =
DAG.getMachineFunction();
7324 Res =
DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7329 MachinePointerInfo(
Global, 0), Align,
7338 case Intrinsic::stackprotector: {
7340 MachineFunction &MF =
DAG.getMachineFunction();
7360 Chain, sdl, Src, FIN,
7367 case Intrinsic::objectsize:
7370 case Intrinsic::is_constant:
7373 case Intrinsic::annotation:
7374 case Intrinsic::ptr_annotation:
7375 case Intrinsic::launder_invariant_group:
7376 case Intrinsic::strip_invariant_group:
7381 case Intrinsic::type_test:
7382 case Intrinsic::public_type_test:
7386 case Intrinsic::assume:
7387 case Intrinsic::experimental_noalias_scope_decl:
7388 case Intrinsic::var_annotation:
7389 case Intrinsic::sideeffect:
7394 case Intrinsic::codeview_annotation: {
7396 MachineFunction &MF =
DAG.getMachineFunction();
7400 Res =
DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl,
getRoot(), Label);
7405 case Intrinsic::init_trampoline: {
7413 Ops[4] =
DAG.getSrcValue(
I.getArgOperand(0));
7416 Res =
DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other,
Ops);
7421 case Intrinsic::adjust_trampoline:
7426 case Intrinsic::gcroot: {
7427 assert(
DAG.getMachineFunction().getFunction().hasGC() &&
7428 "only valid in functions with gc specified, enforced by Verifier");
7430 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7437 case Intrinsic::gcread:
7438 case Intrinsic::gcwrite:
7440 case Intrinsic::get_rounding:
7446 case Intrinsic::expect:
7447 case Intrinsic::expect_with_probability:
7453 case Intrinsic::ubsantrap:
7454 case Intrinsic::debugtrap:
7455 case Intrinsic::trap: {
7456 StringRef TrapFuncName =
7457 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7458 if (TrapFuncName.
empty()) {
7459 switch (Intrinsic) {
7460 case Intrinsic::trap:
7461 DAG.setRoot(
DAG.getNode(ISD::TRAP, sdl, MVT::Other,
getRoot()));
7463 case Intrinsic::debugtrap:
7464 DAG.setRoot(
DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other,
getRoot()));
7466 case Intrinsic::ubsantrap:
7468 ISD::UBSANTRAP, sdl, MVT::Other,
getRoot(),
7469 DAG.getTargetConstant(
7475 DAG.addNoMergeSiteInfo(
DAG.getRoot().getNode(),
7476 I.hasFnAttr(Attribute::NoMerge));
7480 if (Intrinsic == Intrinsic::ubsantrap) {
7481 Value *Arg =
I.getArgOperand(0);
7485 TargetLowering::CallLoweringInfo CLI(
DAG);
7486 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7488 DAG.getExternalSymbol(TrapFuncName.
data(),
7491 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7497 case Intrinsic::allow_runtime_check:
7498 case Intrinsic::allow_ubsan_check:
7502 case Intrinsic::uadd_with_overflow:
7503 case Intrinsic::sadd_with_overflow:
7504 case Intrinsic::usub_with_overflow:
7505 case Intrinsic::ssub_with_overflow:
7506 case Intrinsic::umul_with_overflow:
7507 case Intrinsic::smul_with_overflow: {
7509 switch (Intrinsic) {
7511 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7512 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7513 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7514 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7515 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7516 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7522 EVT OverflowVT = MVT::i1;
7527 SDVTList VTs =
DAG.getVTList(ResultVT, OverflowVT);
7531 case Intrinsic::prefetch: {
7544 ISD::PREFETCH, sdl,
DAG.getVTList(MVT::Other),
Ops,
7546 std::nullopt, Flags);
7552 DAG.setRoot(Result);
7555 case Intrinsic::lifetime_start:
7556 case Intrinsic::lifetime_end: {
7557 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7563 if (!LifetimeObject)
7568 auto SI =
FuncInfo.StaticAllocaMap.find(LifetimeObject);
7569 if (SI ==
FuncInfo.StaticAllocaMap.end())
7573 Res =
DAG.getLifetimeNode(IsStart, sdl,
getRoot(), FrameIndex);
7577 case Intrinsic::pseudoprobe: {
7585 case Intrinsic::invariant_start:
7590 case Intrinsic::invariant_end:
7593 case Intrinsic::clear_cache: {
7598 {InputChain, StartVal, EndVal});
7603 case Intrinsic::donothing:
7604 case Intrinsic::seh_try_begin:
7605 case Intrinsic::seh_scope_begin:
7606 case Intrinsic::seh_try_end:
7607 case Intrinsic::seh_scope_end:
7610 case Intrinsic::experimental_stackmap:
7613 case Intrinsic::experimental_patchpoint_void:
7614 case Intrinsic::experimental_patchpoint:
7617 case Intrinsic::experimental_gc_statepoint:
7620 case Intrinsic::experimental_gc_result:
7623 case Intrinsic::experimental_gc_relocate:
7626 case Intrinsic::instrprof_cover:
7628 case Intrinsic::instrprof_increment:
7630 case Intrinsic::instrprof_timestamp:
7632 case Intrinsic::instrprof_value_profile:
7634 case Intrinsic::instrprof_mcdc_parameters:
7636 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7638 case Intrinsic::localescape: {
7639 MachineFunction &MF =
DAG.getMachineFunction();
7640 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
7644 for (
unsigned Idx = 0,
E =
I.arg_size(); Idx <
E; ++Idx) {
7650 "can only escape static allocas");
7655 TII->get(TargetOpcode::LOCAL_ESCAPE))
7663 case Intrinsic::localrecover: {
7665 MachineFunction &MF =
DAG.getMachineFunction();
7671 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7675 Value *
FP =
I.getArgOperand(1);
7681 SDValue OffsetSym =
DAG.getMCSymbol(FrameAllocSym, PtrVT);
7686 SDValue Add =
DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7692 case Intrinsic::fake_use: {
7693 Value *
V =
I.getArgOperand(0);
7698 auto FakeUseValue = [&]() ->
SDValue {
7712 if (!FakeUseValue || FakeUseValue.isUndef())
7715 Ops[1] = FakeUseValue;
7720 DAG.setRoot(
DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other,
Ops));
7724 case Intrinsic::eh_exceptionpointer:
7725 case Intrinsic::eh_exceptioncode: {
7731 SDValue N =
DAG.getCopyFromReg(
DAG.getEntryNode(), sdl, VReg, PtrVT);
7732 if (Intrinsic == Intrinsic::eh_exceptioncode)
7733 N =
DAG.getZExtOrTrunc(
N, sdl, MVT::i32);
7737 case Intrinsic::xray_customevent: {
7740 const auto &Triple =
DAG.getTarget().getTargetTriple();
7749 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7751 Ops.push_back(LogEntryVal);
7752 Ops.push_back(StrSizeVal);
7753 Ops.push_back(Chain);
7759 MachineSDNode *MN =
DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7762 DAG.setRoot(patchableNode);
7766 case Intrinsic::xray_typedevent: {
7769 const auto &Triple =
DAG.getTarget().getTargetTriple();
7781 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7783 Ops.push_back(LogTypeId);
7784 Ops.push_back(LogEntryVal);
7785 Ops.push_back(StrSizeVal);
7786 Ops.push_back(Chain);
7792 MachineSDNode *MN =
DAG.getMachineNode(
7793 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys,
Ops);
7795 DAG.setRoot(patchableNode);
7799 case Intrinsic::experimental_deoptimize:
7802 case Intrinsic::stepvector:
7805 case Intrinsic::vector_reduce_fadd:
7806 case Intrinsic::vector_reduce_fmul:
7807 case Intrinsic::vector_reduce_add:
7808 case Intrinsic::vector_reduce_mul:
7809 case Intrinsic::vector_reduce_and:
7810 case Intrinsic::vector_reduce_or:
7811 case Intrinsic::vector_reduce_xor:
7812 case Intrinsic::vector_reduce_smax:
7813 case Intrinsic::vector_reduce_smin:
7814 case Intrinsic::vector_reduce_umax:
7815 case Intrinsic::vector_reduce_umin:
7816 case Intrinsic::vector_reduce_fmax:
7817 case Intrinsic::vector_reduce_fmin:
7818 case Intrinsic::vector_reduce_fmaximum:
7819 case Intrinsic::vector_reduce_fminimum:
7820 visitVectorReduce(
I, Intrinsic);
7823 case Intrinsic::icall_branch_funnel: {
7829 I.getArgOperand(1),
Offset,
DAG.getDataLayout()));
7832 "llvm.icall.branch.funnel operand must be a GlobalValue");
7833 Ops.push_back(
DAG.getTargetGlobalAddress(
Base, sdl, MVT::i64, 0));
7835 struct BranchFunnelTarget {
7841 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7844 if (ElemBase !=
Base)
7846 "to the same GlobalValue");
7852 "llvm.icall.branch.funnel operand must be a GlobalValue");
7858 [](
const BranchFunnelTarget &
T1,
const BranchFunnelTarget &T2) {
7859 return T1.Offset < T2.Offset;
7862 for (
auto &
T : Targets) {
7863 Ops.push_back(
DAG.getTargetConstant(
T.Offset, sdl, MVT::i32));
7864 Ops.push_back(
T.Target);
7867 Ops.push_back(
DAG.getRoot());
7868 SDValue N(
DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7877 case Intrinsic::wasm_landingpad_index:
7883 case Intrinsic::aarch64_settag:
7884 case Intrinsic::aarch64_settag_zero: {
7885 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
7886 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
7889 getValue(
I.getArgOperand(1)), MachinePointerInfo(
I.getArgOperand(0)),
7895 case Intrinsic::amdgcn_cs_chain: {
7900 Type *RetTy =
I.getType();
7910 for (
unsigned Idx : {2, 3, 1}) {
7911 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
7913 Arg.setAttributes(&
I, Idx);
7914 Args.push_back(Arg);
7917 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
7918 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
7919 Args[2].IsInReg =
true;
7922 for (
unsigned Idx = 4; Idx <
I.arg_size(); ++Idx) {
7923 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
7925 Arg.setAttributes(&
I, Idx);
7926 Args.push_back(Arg);
7929 TargetLowering::CallLoweringInfo CLI(
DAG);
7932 .setCallee(CC, RetTy, Callee, std::move(Args))
7935 .setConvergent(
I.isConvergent());
7937 std::pair<SDValue, SDValue>
Result =
7941 "Should've lowered as tail call");
7946 case Intrinsic::amdgcn_call_whole_wave: {
7948 bool isTailCall =
I.isTailCall();
7951 for (
unsigned Idx = 1; Idx <
I.arg_size(); ++Idx) {
7952 TargetLowering::ArgListEntry Arg(
getValue(
I.getArgOperand(Idx)),
7953 I.getArgOperand(Idx)->getType());
7954 Arg.setAttributes(&
I, Idx);
7961 Args.push_back(Arg);
7966 auto *Token = Bundle->Inputs[0].get();
7967 ConvControlToken =
getValue(Token);
7970 TargetLowering::CallLoweringInfo CLI(
DAG);
7974 getValue(
I.getArgOperand(0)), std::move(Args))
7978 .setConvergent(
I.isConvergent())
7979 .setConvergenceControlToken(ConvControlToken);
7982 std::pair<SDValue, SDValue>
Result =
7985 if (
Result.first.getNode())
7989 case Intrinsic::ptrmask: {
8005 auto HighOnes =
DAG.getNode(
8006 ISD::SHL, sdl, PtrVT,
DAG.getAllOnesConstant(sdl, PtrVT),
8007 DAG.getShiftAmountConstant(
Mask.getValueType().getFixedSizeInBits(),
8010 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8011 }
else if (
Mask.getValueType() != PtrVT)
8012 Mask =
DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8018 case Intrinsic::threadlocal_address: {
8022 case Intrinsic::get_active_lane_mask: {
8026 EVT ElementVT =
Index.getValueType();
8029 setValue(&
I,
DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, sdl, CCVT, Index,
8037 SDValue VectorIndex =
DAG.getSplat(VecTy, sdl, Index);
8038 SDValue VectorTripCount =
DAG.getSplat(VecTy, sdl, TripCount);
8039 SDValue VectorStep =
DAG.getStepVector(sdl, VecTy);
8042 SDValue SetCC =
DAG.getSetCC(sdl, CCVT, VectorInduction,
8047 case Intrinsic::experimental_get_vector_length: {
8049 "Expected positive VF");
8054 EVT CountVT =
Count.getValueType();
8057 visitTargetIntrinsic(
I, Intrinsic);
8066 if (CountVT.
bitsLT(VT)) {
8071 SDValue MaxEVL =
DAG.getElementCount(sdl, CountVT,
8081 case Intrinsic::vector_partial_reduce_add: {
8089 case Intrinsic::experimental_cttz_elts: {
8092 EVT OpVT =
Op.getValueType();
8095 visitTargetIntrinsic(
I, Intrinsic);
8111 ConstantRange VScaleRange(1,
true);
8140 case Intrinsic::vector_insert: {
8148 if (
Index.getValueType() != VectorIdxTy)
8149 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8156 case Intrinsic::vector_extract: {
8164 if (
Index.getValueType() != VectorIdxTy)
8165 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8171 case Intrinsic::experimental_vector_match: {
8177 EVT ResVT =
Mask.getValueType();
8183 visitTargetIntrinsic(
I, Intrinsic);
8189 for (
unsigned i = 0; i < SearchSize; ++i) {
8192 DAG.getVectorIdxConstant(i, sdl));
8201 case Intrinsic::vector_reverse:
8202 visitVectorReverse(
I);
8204 case Intrinsic::vector_splice:
8205 visitVectorSplice(
I);
8207 case Intrinsic::callbr_landingpad:
8208 visitCallBrLandingPad(
I);
8210 case Intrinsic::vector_interleave2:
8211 visitVectorInterleave(
I, 2);
8213 case Intrinsic::vector_interleave3:
8214 visitVectorInterleave(
I, 3);
8216 case Intrinsic::vector_interleave4:
8217 visitVectorInterleave(
I, 4);
8219 case Intrinsic::vector_interleave5:
8220 visitVectorInterleave(
I, 5);
8222 case Intrinsic::vector_interleave6:
8223 visitVectorInterleave(
I, 6);
8225 case Intrinsic::vector_interleave7:
8226 visitVectorInterleave(
I, 7);
8228 case Intrinsic::vector_interleave8:
8229 visitVectorInterleave(
I, 8);
8231 case Intrinsic::vector_deinterleave2:
8232 visitVectorDeinterleave(
I, 2);
8234 case Intrinsic::vector_deinterleave3:
8235 visitVectorDeinterleave(
I, 3);
8237 case Intrinsic::vector_deinterleave4:
8238 visitVectorDeinterleave(
I, 4);
8240 case Intrinsic::vector_deinterleave5:
8241 visitVectorDeinterleave(
I, 5);
8243 case Intrinsic::vector_deinterleave6:
8244 visitVectorDeinterleave(
I, 6);
8246 case Intrinsic::vector_deinterleave7:
8247 visitVectorDeinterleave(
I, 7);
8249 case Intrinsic::vector_deinterleave8:
8250 visitVectorDeinterleave(
I, 8);
8252 case Intrinsic::experimental_vector_compress:
8254 getValue(
I.getArgOperand(0)).getValueType(),
8259 case Intrinsic::experimental_convergence_anchor:
8260 case Intrinsic::experimental_convergence_entry:
8261 case Intrinsic::experimental_convergence_loop:
8262 visitConvergenceControl(
I, Intrinsic);
8264 case Intrinsic::experimental_vector_histogram_add: {
8265 visitVectorHistogram(
I, Intrinsic);
8268 case Intrinsic::experimental_vector_extract_last_active: {
8269 visitVectorExtractLastActive(
I, Intrinsic);
8272 case Intrinsic::loop_dependence_war_mask:
8278 case Intrinsic::loop_dependence_raw_mask:
8287void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8315 PendingConstrainedFP.push_back(OutChain);
8321 PendingConstrainedFPStrict.push_back(OutChain);
8326 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8328 SDVTList VTs =
DAG.getVTList(VT, MVT::Other);
8333 Flags.setNoFPExcept(
true);
8336 Flags.copyFMF(*FPOp);
8341#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8342 case Intrinsic::INTRINSIC: \
8343 Opcode = ISD::STRICT_##DAGN; \
8345#include "llvm/IR/ConstrainedOps.def"
8346 case Intrinsic::experimental_constrained_fmuladd: {
8353 pushOutChain(
Mul, EB);
8376 if (TM.Options.NoNaNsFPMath)
8384 pushOutChain(Result, EB);
8391 std::optional<unsigned> ResOPC;
8393 case Intrinsic::vp_ctlz: {
8395 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8398 case Intrinsic::vp_cttz: {
8400 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8403 case Intrinsic::vp_cttz_elts: {
8405 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8408#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8409 case Intrinsic::VPID: \
8410 ResOPC = ISD::VPSD; \
8412#include "llvm/IR/VPIntrinsics.def"
8417 "Inconsistency: no SDNode available for this VPIntrinsic!");
8419 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8420 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8422 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8423 : ISD::VP_REDUCE_FMUL;
8429void SelectionDAGBuilder::visitVPLoad(
8441 Alignment =
DAG.getEVTAlign(VT);
8444 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8445 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8448 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8449 MachinePointerInfo(PtrOperand), MMOFlags,
8451 LD =
DAG.getLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8458void SelectionDAGBuilder::visitVPLoadFF(
8461 assert(OpValues.
size() == 3 &&
"Unexpected number of operands");
8471 Alignment =
DAG.getEVTAlign(VT);
8474 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8475 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8478 LD =
DAG.getLoadFFVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8483 setValue(&VPIntrin,
DAG.getMergeValues({LD.getValue(0), Trunc},
DL));
8486void SelectionDAGBuilder::visitVPGather(
8490 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8502 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8504 *Alignment, AAInfo, Ranges);
8514 EVT IdxVT =
Index.getValueType();
8520 LD =
DAG.getGatherVP(
8521 DAG.getVTList(VT, MVT::Other), VT,
DL,
8522 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8528void SelectionDAGBuilder::visitVPStore(
8532 EVT VT = OpValues[0].getValueType();
8537 Alignment =
DAG.getEVTAlign(VT);
8540 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8543 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8544 MachinePointerInfo(PtrOperand), MMOFlags,
8553void SelectionDAGBuilder::visitVPScatter(
8556 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8558 EVT VT = OpValues[0].getValueType();
8568 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8570 *Alignment, AAInfo);
8580 EVT IdxVT =
Index.getValueType();
8586 ST =
DAG.getScatterVP(
DAG.getVTList(MVT::Other), VT,
DL,
8587 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8588 OpValues[2], OpValues[3]},
8594void SelectionDAGBuilder::visitVPStridedLoad(
8606 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8608 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8611 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8613 *Alignment, AAInfo, Ranges);
8615 SDValue LD =
DAG.getStridedLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1],
8616 OpValues[2], OpValues[3], MMO,
8624void SelectionDAGBuilder::visitVPStridedStore(
8628 EVT VT = OpValues[0].getValueType();
8634 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8637 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8639 *Alignment, AAInfo);
8643 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8651void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8652 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8663 if (TM.Options.NoNaNsFPMath)
8676 "Unexpected target EVL type");
8679 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8682 DAG.getSetCCVP(
DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8685void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8693 return visitVPCmp(*CmpI);
8696 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8698 SDVTList VTs =
DAG.getVTList(ValueVTs);
8704 "Unexpected target EVL type");
8708 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8710 if (
I == EVLParamPos)
8717 SDNodeFlags SDFlags;
8725 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8727 case ISD::VP_LOAD_FF:
8728 visitVPLoadFF(VPIntrin, ValueVTs[0], ValueVTs[1], OpValues);
8730 case ISD::VP_GATHER:
8731 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8733 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8734 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8737 visitVPStore(VPIntrin, OpValues);
8739 case ISD::VP_SCATTER:
8740 visitVPScatter(VPIntrin, OpValues);
8742 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8743 visitVPStridedStore(VPIntrin, OpValues);
8745 case ISD::VP_FMULADD: {
8746 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8747 SDNodeFlags SDFlags;
8752 setValue(&VPIntrin,
DAG.getNode(ISD::VP_FMA,
DL, VTs, OpValues, SDFlags));
8755 ISD::VP_FMUL,
DL, VTs,
8756 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8758 DAG.getNode(ISD::VP_FADD,
DL, VTs,
8759 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8764 case ISD::VP_IS_FPCLASS: {
8765 const DataLayout DLayout =
DAG.getDataLayout();
8767 auto Constant = OpValues[1]->getAsZExtVal();
8770 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8774 case ISD::VP_INTTOPTR: {
8785 case ISD::VP_PTRTOINT: {
8787 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8800 case ISD::VP_CTLZ_ZERO_UNDEF:
8802 case ISD::VP_CTTZ_ZERO_UNDEF:
8803 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8804 case ISD::VP_CTTZ_ELTS: {
8806 DAG.getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8816 MachineFunction &MF =
DAG.getMachineFunction();
8824 unsigned CallSiteIndex =
FuncInfo.getCurrentCallSite();
8825 if (CallSiteIndex) {
8839 assert(BeginLabel &&
"BeginLabel should've been set");
8841 MachineFunction &MF =
DAG.getMachineFunction();
8853 assert(
II &&
"II should've been set");
8864std::pair<SDValue, SDValue>
8878 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
8881 "Non-null chain expected with non-tail call!");
8882 assert((Result.second.getNode() || !Result.first.getNode()) &&
8883 "Null value expected with tail call!");
8885 if (!Result.second.getNode()) {
8892 PendingExports.clear();
8894 DAG.setRoot(Result.second);
8912 if (Caller->getFnAttribute(
"disable-tail-calls").getValueAsString() ==
8920 if (
DAG.getTargetLoweringInfo().supportSwiftError() &&
8921 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8930 bool isTailCall,
bool isMustTailCall,
8933 auto &
DL =
DAG.getDataLayout();
8940 const Value *SwiftErrorVal =
nullptr;
8947 const Value *V = *
I;
8950 if (V->getType()->isEmptyTy())
8955 Entry.setAttributes(&CB,
I - CB.
arg_begin());
8967 Args.push_back(Entry);
8978 Value *V = Bundle->Inputs[0];
8980 Entry.IsCFGuardTarget =
true;
8981 Args.push_back(Entry);
8994 "Target doesn't support calls with kcfi operand bundles.");
9002 auto *Token = Bundle->Inputs[0].get();
9003 ConvControlToken =
getValue(Token);
9009 .
setCallee(RetTy, FTy, Callee, std::move(Args), CB)
9021 "This target doesn't support calls with ptrauth operand bundles.");
9025 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
9027 if (Result.first.getNode()) {
9041 DAG.setRoot(CopyNode);
9057 LoadTy, Builder.DAG.getDataLayout()))
9058 return Builder.getValue(LoadCst);
9064 bool ConstantMemory =
false;
9067 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9068 Root = Builder.DAG.getEntryNode();
9069 ConstantMemory =
true;
9072 Root = Builder.DAG.getRoot();
9077 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root,
Ptr,
9080 if (!ConstantMemory)
9081 Builder.PendingLoads.push_back(LoadVal.
getValue(1));
9087void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
9090 EVT VT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9101bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
9102 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
9103 const Value *
Size =
I.getArgOperand(2);
9106 EVT CallVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9112 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9116 if (Res.first.getNode()) {
9117 processIntegerCallValue(
I, Res.first,
true);
9131 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
9132 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
9154 switch (NumBitsToCompare) {
9166 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9179 LoadL =
DAG.getBitcast(CmpVT, LoadL);
9180 LoadR =
DAG.getBitcast(CmpVT, LoadR);
9184 processIntegerCallValue(
I, Cmp,
false);
9193bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9194 const Value *Src =
I.getArgOperand(0);
9195 const Value *
Char =
I.getArgOperand(1);
9196 const Value *
Length =
I.getArgOperand(2);
9198 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9199 std::pair<SDValue, SDValue> Res =
9202 MachinePointerInfo(Src));
9203 if (Res.first.getNode()) {
9217bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9222 Align DstAlign =
DAG.InferPtrAlign(Dst).valueOrOne();
9223 Align SrcAlign =
DAG.InferPtrAlign(Src).valueOrOne();
9225 Align Alignment = std::min(DstAlign, SrcAlign);
9234 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9235 std::nullopt, MachinePointerInfo(
I.getArgOperand(0)),
9236 MachinePointerInfo(
I.getArgOperand(1)),
I.getAAMetadata());
9238 "** memcpy should not be lowered as TailCall in mempcpy context **");
9242 Size =
DAG.getSExtOrTrunc(
Size, sdl, Dst.getValueType());
9255bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9256 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9258 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9259 std::pair<SDValue, SDValue> Res =
9262 MachinePointerInfo(Arg0),
9263 MachinePointerInfo(Arg1), isStpcpy);
9264 if (Res.first.getNode()) {
9266 DAG.setRoot(Res.second);
9278bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9279 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9281 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9282 std::pair<SDValue, SDValue> Res =
9285 MachinePointerInfo(Arg0),
9286 MachinePointerInfo(Arg1));
9287 if (Res.first.getNode()) {
9288 processIntegerCallValue(
I, Res.first,
true);
9301bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9302 const Value *Arg0 =
I.getArgOperand(0);
9304 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9307 if (Res.first.getNode()) {
9308 processIntegerCallValue(
I, Res.first,
false);
9321bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9322 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9324 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9325 std::pair<SDValue, SDValue> Res =
9328 MachinePointerInfo(Arg0));
9329 if (Res.first.getNode()) {
9330 processIntegerCallValue(
I, Res.first,
false);
9343bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9346 if (!
I.onlyReadsMemory())
9363bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9366 if (!
I.onlyReadsMemory())
9379void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9381 if (
I.isInlineAsm()) {
9388 if (Function *
F =
I.getCalledFunction()) {
9389 if (
F->isDeclaration()) {
9391 if (
unsigned IID =
F->getIntrinsicID()) {
9392 visitIntrinsicCall(
I, IID);
9401 if (!
I.isNoBuiltin() && !
I.isStrictFP() && !
F->hasLocalLinkage() &&
9402 F->hasName() &&
LibInfo->getLibFunc(*
F, Func) &&
9403 LibInfo->hasOptimizedCodeGen(Func)) {
9407 if (visitMemCmpBCmpCall(
I))
9410 case LibFunc_copysign:
9411 case LibFunc_copysignf:
9412 case LibFunc_copysignl:
9415 if (
I.onlyReadsMemory()) {
9426 if (visitUnaryFloatCall(
I, ISD::FABS))
9432 if (visitBinaryFloatCall(
I, ISD::FMINNUM))
9438 if (visitBinaryFloatCall(
I, ISD::FMAXNUM))
9441 case LibFunc_fminimum_num:
9442 case LibFunc_fminimum_numf:
9443 case LibFunc_fminimum_numl:
9444 if (visitBinaryFloatCall(
I, ISD::FMINIMUMNUM))
9447 case LibFunc_fmaximum_num:
9448 case LibFunc_fmaximum_numf:
9449 case LibFunc_fmaximum_numl:
9450 if (visitBinaryFloatCall(
I, ISD::FMAXIMUMNUM))
9456 if (visitUnaryFloatCall(
I, ISD::FSIN))
9462 if (visitUnaryFloatCall(
I, ISD::FCOS))
9468 if (visitUnaryFloatCall(
I, ISD::FTAN))
9474 if (visitUnaryFloatCall(
I, ISD::FASIN))
9480 if (visitUnaryFloatCall(
I, ISD::FACOS))
9486 if (visitUnaryFloatCall(
I, ISD::FATAN))
9490 case LibFunc_atan2f:
9491 case LibFunc_atan2l:
9492 if (visitBinaryFloatCall(
I, ISD::FATAN2))
9498 if (visitUnaryFloatCall(
I, ISD::FSINH))
9504 if (visitUnaryFloatCall(
I, ISD::FCOSH))
9510 if (visitUnaryFloatCall(
I, ISD::FTANH))
9516 case LibFunc_sqrt_finite:
9517 case LibFunc_sqrtf_finite:
9518 case LibFunc_sqrtl_finite:
9519 if (visitUnaryFloatCall(
I, ISD::FSQRT))
9523 case LibFunc_floorf:
9524 case LibFunc_floorl:
9525 if (visitUnaryFloatCall(
I, ISD::FFLOOR))
9528 case LibFunc_nearbyint:
9529 case LibFunc_nearbyintf:
9530 case LibFunc_nearbyintl:
9531 if (visitUnaryFloatCall(
I, ISD::FNEARBYINT))
9537 if (visitUnaryFloatCall(
I, ISD::FCEIL))
9543 if (visitUnaryFloatCall(
I, ISD::FRINT))
9547 case LibFunc_roundf:
9548 case LibFunc_roundl:
9549 if (visitUnaryFloatCall(
I, ISD::FROUND))
9553 case LibFunc_truncf:
9554 case LibFunc_truncl:
9555 if (visitUnaryFloatCall(
I, ISD::FTRUNC))
9561 if (visitUnaryFloatCall(
I, ISD::FLOG2))
9567 if (visitUnaryFloatCall(
I, ISD::FEXP2))
9571 case LibFunc_exp10f:
9572 case LibFunc_exp10l:
9573 if (visitUnaryFloatCall(
I, ISD::FEXP10))
9577 case LibFunc_ldexpf:
9578 case LibFunc_ldexpl:
9579 if (visitBinaryFloatCall(
I, ISD::FLDEXP))
9582 case LibFunc_memcmp:
9583 if (visitMemCmpBCmpCall(
I))
9586 case LibFunc_mempcpy:
9587 if (visitMemPCpyCall(
I))
9590 case LibFunc_memchr:
9591 if (visitMemChrCall(
I))
9594 case LibFunc_strcpy:
9595 if (visitStrCpyCall(
I,
false))
9598 case LibFunc_stpcpy:
9599 if (visitStrCpyCall(
I,
true))
9602 case LibFunc_strcmp:
9603 if (visitStrCmpCall(
I))
9606 case LibFunc_strlen:
9607 if (visitStrLenCall(
I))
9610 case LibFunc_strnlen:
9611 if (visitStrNLenCall(
I))
9635 if (
I.hasDeoptState())
9652 const Value *Discriminator = PAB->Inputs[1];
9654 assert(
Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9655 assert(Discriminator->getType()->isIntegerTy(64) &&
9656 "Invalid ptrauth discriminator");
9661 if (CalleeCPA->isKnownCompatibleWith(
Key, Discriminator,
9662 DAG.getDataLayout()))
9702 for (
const auto &Code : Codes)
9717 SDISelAsmOperandInfo &MatchingOpInfo,
9719 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9725 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9727 OpInfo.ConstraintVT);
9728 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9730 MatchingOpInfo.ConstraintVT);
9731 const bool OutOpIsIntOrFP =
9732 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9733 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9734 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9735 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9738 " with a matching output constraint of"
9739 " incompatible type!");
9741 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9748 SDISelAsmOperandInfo &OpInfo,
9761 const Value *OpVal = OpInfo.CallOperandVal;
9779 DL.getPrefTypeAlign(Ty),
false,
9782 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9785 OpInfo.CallOperand = StackSlot;
9798static std::optional<unsigned>
9800 SDISelAsmOperandInfo &OpInfo,
9801 SDISelAsmOperandInfo &RefOpInfo) {
9812 return std::nullopt;
9816 unsigned AssignedReg;
9819 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9822 return std::nullopt;
9827 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9829 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9838 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9843 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9848 OpInfo.CallOperand =
9849 DAG.
getNode(ISD::BITCAST,
DL, RegVT, OpInfo.CallOperand);
9850 OpInfo.ConstraintVT = RegVT;
9854 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9857 OpInfo.CallOperand =
9858 DAG.
getNode(ISD::BITCAST,
DL, VT, OpInfo.CallOperand);
9859 OpInfo.ConstraintVT = VT;
9866 if (OpInfo.isMatchingInputConstraint())
9867 return std::nullopt;
9869 EVT ValueVT = OpInfo.ConstraintVT;
9870 if (OpInfo.ConstraintVT == MVT::Other)
9874 unsigned NumRegs = 1;
9875 if (OpInfo.ConstraintVT != MVT::Other)
9890 I = std::find(
I, RC->
end(), AssignedReg);
9891 if (
I == RC->
end()) {
9894 return {AssignedReg};
9898 for (; NumRegs; --NumRegs, ++
I) {
9899 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
9904 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
9905 return std::nullopt;
9910 const std::vector<SDValue> &AsmNodeOperands) {
9913 for (; OperandNo; --OperandNo) {
9915 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9918 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
9919 "Skipped past definitions?");
9920 CurOp +=
F.getNumOperandRegisters() + 1;
9931 explicit ExtraFlags(
const CallBase &
Call) {
9933 if (
IA->hasSideEffects())
9935 if (
IA->isAlignStack())
9942 void update(
const TargetLowering::AsmOperandInfo &OpInfo) {
9958 unsigned get()
const {
return Flags; }
9981void SelectionDAGBuilder::visitInlineAsm(
const CallBase &
Call,
9988 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
9990 DAG.getDataLayout(),
DAG.getSubtarget().getRegisterInfo(),
Call);
9994 bool HasSideEffect =
IA->hasSideEffects();
9995 ExtraFlags ExtraInfo(
Call);
9997 for (
auto &
T : TargetConstraints) {
9998 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
9999 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
10001 if (OpInfo.CallOperandVal)
10002 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
10004 if (!HasSideEffect)
10005 HasSideEffect = OpInfo.hasMemory(TLI);
10017 return emitInlineAsmError(
Call,
"constraint '" + Twine(
T.ConstraintCode) +
10018 "' expects an integer constant "
10021 ExtraInfo.update(
T);
10029 if (EmitEHLabels) {
10030 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
10034 if (IsCallBr || EmitEHLabels) {
10042 if (EmitEHLabels) {
10043 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10048 IA->collectAsmStrs(AsmStrs);
10051 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10059 if (OpInfo.hasMatchingInput()) {
10060 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10091 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
10094 OpInfo.isIndirect =
false;
10101 !OpInfo.isIndirect) {
10102 assert((OpInfo.isMultipleAlternative ||
10104 "Can only indirectify direct input operands!");
10110 OpInfo.CallOperandVal =
nullptr;
10113 OpInfo.isIndirect =
true;
10119 std::vector<SDValue> AsmNodeOperands;
10120 AsmNodeOperands.push_back(
SDValue());
10121 AsmNodeOperands.push_back(
DAG.getTargetExternalSymbol(
10128 AsmNodeOperands.push_back(
DAG.getMDNode(SrcLoc));
10132 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10137 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10139 SDISelAsmOperandInfo &RefOpInfo =
10140 OpInfo.isMatchingInputConstraint()
10141 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10143 const auto RegError =
10146 const MachineFunction &MF =
DAG.getMachineFunction();
10148 const char *
RegName =
TRI.getName(*RegError);
10149 emitInlineAsmError(
Call,
"register '" + Twine(
RegName) +
10150 "' allocated for constraint '" +
10151 Twine(OpInfo.ConstraintCode) +
10152 "' does not match required type");
10156 auto DetectWriteToReservedRegister = [&]() {
10157 const MachineFunction &MF =
DAG.getMachineFunction();
10162 emitInlineAsmError(
Call,
"write to reserved register '" +
10171 !OpInfo.isMatchingInputConstraint())) &&
10172 "Only address as input operand is allowed.");
10174 switch (OpInfo.Type) {
10180 "Failed to convert memory constraint code to constraint id.");
10184 OpFlags.setMemConstraint(ConstraintID);
10185 AsmNodeOperands.push_back(
DAG.getTargetConstant(OpFlags,
getCurSDLoc(),
10187 AsmNodeOperands.push_back(OpInfo.CallOperand);
10192 if (OpInfo.AssignedRegs.
Regs.empty()) {
10193 emitInlineAsmError(
10194 Call,
"couldn't allocate output register for constraint '" +
10195 Twine(OpInfo.ConstraintCode) +
"'");
10199 if (DetectWriteToReservedRegister())
10213 SDValue InOperandVal = OpInfo.CallOperand;
10215 if (OpInfo.isMatchingInputConstraint()) {
10220 InlineAsm::Flag
Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10221 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
10222 if (OpInfo.isIndirect) {
10224 emitInlineAsmError(
Call,
"inline asm not supported yet: "
10225 "don't know how to handle tied "
10226 "indirect register inputs");
10231 MachineFunction &MF =
DAG.getMachineFunction();
10236 MVT RegVT =
R->getSimpleValueType(0);
10237 const TargetRegisterClass *RC =
10240 :
TRI.getMinimalPhysRegClass(TiedReg);
10241 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
10244 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.
getValueType());
10248 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &
Call);
10250 OpInfo.getMatchedOperand(), dl,
DAG,
10255 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
10256 assert(
Flag.getNumOperandRegisters() == 1 &&
10257 "Unexpected number of operands");
10260 Flag.clearMemConstraint();
10261 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10262 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10264 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10275 std::vector<SDValue>
Ops;
10281 emitInlineAsmError(
Call,
"value out of range for constraint '" +
10282 Twine(OpInfo.ConstraintCode) +
"'");
10286 emitInlineAsmError(
Call,
10287 "invalid operand for inline asm constraint '" +
10288 Twine(OpInfo.ConstraintCode) +
"'");
10294 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10301 assert((OpInfo.isIndirect ||
10303 "Operand must be indirect to be a mem!");
10306 "Memory operands expect pointer values");
10311 "Failed to convert memory constraint code to constraint id.");
10315 ResOpType.setMemConstraint(ConstraintID);
10316 AsmNodeOperands.push_back(
DAG.getTargetConstant(ResOpType,
10319 AsmNodeOperands.push_back(InOperandVal);
10327 "Failed to convert memory constraint code to constraint id.");
10331 SDValue AsmOp = InOperandVal;
10335 AsmOp =
DAG.getTargetGlobalAddress(GA->getGlobal(),
getCurSDLoc(),
10341 ResOpType.setMemConstraint(ConstraintID);
10343 AsmNodeOperands.push_back(
10346 AsmNodeOperands.push_back(AsmOp);
10352 emitInlineAsmError(
Call,
"unknown asm constraint '" +
10353 Twine(OpInfo.ConstraintCode) +
"'");
10358 if (OpInfo.isIndirect) {
10359 emitInlineAsmError(
10360 Call,
"Don't know how to handle indirect register inputs yet "
10361 "for constraint '" +
10362 Twine(OpInfo.ConstraintCode) +
"'");
10367 if (OpInfo.AssignedRegs.
Regs.empty()) {
10368 emitInlineAsmError(
Call,
10369 "couldn't allocate input reg for constraint '" +
10370 Twine(OpInfo.ConstraintCode) +
"'");
10374 if (DetectWriteToReservedRegister())
10383 0, dl,
DAG, AsmNodeOperands);
10389 if (!OpInfo.AssignedRegs.
Regs.empty())
10399 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10401 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
10403 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10415 ResultTypes = StructResult->elements();
10416 else if (!CallResultType->
isVoidTy())
10417 ResultTypes =
ArrayRef(CallResultType);
10419 auto CurResultType = ResultTypes.
begin();
10420 auto handleRegAssign = [&](
SDValue V) {
10421 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10422 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10423 EVT ResultVT = TLI.
getValueType(
DAG.getDataLayout(), *CurResultType);
10435 if (ResultVT !=
V.getValueType() &&
10438 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10439 V.getValueType().isInteger()) {
10445 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10451 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10455 if (OpInfo.AssignedRegs.
Regs.empty())
10458 switch (OpInfo.ConstraintType) {
10462 Chain, &Glue, &
Call);
10474 assert(
false &&
"Unexpected unknown constraint");
10478 if (OpInfo.isIndirect) {
10479 const Value *
Ptr = OpInfo.CallOperandVal;
10480 assert(
Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10482 MachinePointerInfo(
Ptr));
10489 handleRegAssign(V);
10491 handleRegAssign(Val);
10497 if (!ResultValues.
empty()) {
10498 assert(CurResultType == ResultTypes.
end() &&
10499 "Mismatch in number of ResultTypes");
10501 "Mismatch in number of output operands in asm result");
10504 DAG.getVTList(ResultVTs), ResultValues);
10509 if (!OutChains.
empty())
10512 if (EmitEHLabels) {
10517 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10519 DAG.setRoot(Chain);
10522void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &
Call,
10523 const Twine &Message) {
10524 LLVMContext &Ctx = *
DAG.getContext();
10528 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10532 if (ValueVTs.
empty())
10536 for (
const EVT &VT : ValueVTs)
10537 Ops.push_back(
DAG.getUNDEF(VT));
10542void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10546 DAG.getSrcValue(
I.getArgOperand(0))));
10549void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10550 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10551 const DataLayout &
DL =
DAG.getDataLayout();
10555 DL.getABITypeAlign(
I.getType()).value());
10556 DAG.setRoot(
V.getValue(1));
10558 if (
I.getType()->isPointerTy())
10559 V =
DAG.getPtrExtOrTrunc(
10564void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10568 DAG.getSrcValue(
I.getArgOperand(0))));
10571void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10576 DAG.getSrcValue(
I.getArgOperand(0)),
10577 DAG.getSrcValue(
I.getArgOperand(1))));
10583 std::optional<ConstantRange> CR =
getRange(
I);
10585 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10588 APInt Lo = CR->getUnsignedMin();
10589 if (!
Lo.isMinValue())
10592 APInt Hi = CR->getUnsignedMax();
10593 unsigned Bits = std::max(
Hi.getActiveBits(),
10601 DAG.getValueType(SmallVT));
10602 unsigned NumVals =
Op.getNode()->getNumValues();
10608 Ops.push_back(ZExt);
10609 for (
unsigned I = 1;
I != NumVals; ++
I)
10610 Ops.push_back(
Op.getValue(
I));
10612 return DAG.getMergeValues(
Ops,
SL);
10623 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10626 Args.reserve(NumArgs);
10630 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10631 ArgI != ArgE; ++ArgI) {
10632 const Value *V =
Call->getOperand(ArgI);
10634 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10637 Entry.setAttributes(
Call, ArgI);
10638 Args.push_back(Entry);
10643 .
setCallee(
Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10672 for (
unsigned I = StartIdx;
I <
Call.arg_size();
I++) {
10681 Ops.push_back(Builder.getValue(
Call.getArgOperand(
I)));
10687void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10713 Ops.push_back(Chain);
10714 Ops.push_back(InGlue);
10721 assert(
ID.getValueType() == MVT::i64);
10723 DAG.getTargetConstant(
ID->getAsZExtVal(),
DL,
ID.getValueType());
10724 Ops.push_back(IDConst);
10730 Ops.push_back(ShadConst);
10736 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10737 Chain =
DAG.getNode(ISD::STACKMAP,
DL, NodeTys,
Ops);
10740 Chain =
DAG.getCALLSEQ_END(Chain, 0, 0, InGlue,
DL);
10745 DAG.setRoot(Chain);
10748 FuncInfo.MF->getFrameInfo().setHasStackMap();
10752void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10769 Callee =
DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10772 Callee =
DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10773 SDLoc(SymbolicCallee),
10774 SymbolicCallee->getValueType(0));
10784 "Not enough arguments provided to the patchpoint intrinsic");
10787 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10791 TargetLowering::CallLoweringInfo CLI(
DAG);
10796 SDNode *CallEnd =
Result.second.getNode();
10797 if (CallEnd->
getOpcode() == ISD::EH_LABEL)
10805 "Expected a callseq node.");
10807 bool HasGlue =
Call->getGluedNode();
10832 Ops.push_back(Callee);
10838 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10839 Ops.push_back(
DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10842 Ops.push_back(
DAG.getTargetConstant((
unsigned)CC, dl, MVT::i32));
10847 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10858 if (IsAnyRegCC && HasDef) {
10860 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10863 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
10868 NodeTys =
DAG.getVTList(ValueVTs);
10870 NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10873 SDValue PPV =
DAG.getNode(ISD::PATCHPOINT, dl, NodeTys,
Ops);
10887 if (IsAnyRegCC && HasDef) {
10890 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10896 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10899void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
10901 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10904 if (
I.arg_size() > 1)
10909 SDNodeFlags SDFlags;
10913 switch (Intrinsic) {
10914 case Intrinsic::vector_reduce_fadd:
10917 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10920 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10922 case Intrinsic::vector_reduce_fmul:
10925 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10928 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10930 case Intrinsic::vector_reduce_add:
10931 Res =
DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10933 case Intrinsic::vector_reduce_mul:
10934 Res =
DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10936 case Intrinsic::vector_reduce_and:
10937 Res =
DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10939 case Intrinsic::vector_reduce_or:
10940 Res =
DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10942 case Intrinsic::vector_reduce_xor:
10943 Res =
DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10945 case Intrinsic::vector_reduce_smax:
10946 Res =
DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10948 case Intrinsic::vector_reduce_smin:
10949 Res =
DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10951 case Intrinsic::vector_reduce_umax:
10952 Res =
DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10954 case Intrinsic::vector_reduce_umin:
10955 Res =
DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10957 case Intrinsic::vector_reduce_fmax:
10958 Res =
DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10960 case Intrinsic::vector_reduce_fmin:
10961 Res =
DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10963 case Intrinsic::vector_reduce_fmaximum:
10964 Res =
DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10966 case Intrinsic::vector_reduce_fminimum:
10967 Res =
DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
10980 Attrs.push_back(Attribute::SExt);
10982 Attrs.push_back(Attribute::ZExt);
10984 Attrs.push_back(Attribute::InReg);
10986 return AttributeList::get(CLI.
RetTy->
getContext(), AttributeList::ReturnIndex,
10994std::pair<SDValue, SDValue>
11008 "Only supported for non-aggregate returns");
11011 for (
Type *Ty : RetOrigTys)
11020 RetOrigTys.
swap(OldRetOrigTys);
11021 RetVTs.
swap(OldRetVTs);
11022 Offsets.swap(OldOffsets);
11024 for (
size_t i = 0, e = OldRetVTs.
size(); i != e; ++i) {
11025 EVT RetVT = OldRetVTs[i];
11029 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
11030 RetOrigTys.
append(NumRegs, OldRetOrigTys[i]);
11031 RetVTs.
append(NumRegs, RegisterVT);
11032 for (
unsigned j = 0; j != NumRegs; ++j)
11045 int DemoteStackIdx = -100;
11058 ArgListEntry Entry(DemoteStackSlot, StackSlotPtrType);
11059 Entry.IsSRet =
true;
11060 Entry.Alignment = Alignment;
11072 for (
unsigned I = 0, E = RetVTs.
size();
I != E; ++
I) {
11074 if (NeedsRegBlock) {
11075 Flags.setInConsecutiveRegs();
11076 if (
I == RetVTs.
size() - 1)
11077 Flags.setInConsecutiveRegsLast();
11079 EVT VT = RetVTs[
I];
11083 for (
unsigned i = 0; i != NumRegs; ++i) {
11087 Ret.Flags.setPointer();
11088 Ret.Flags.setPointerAddrSpace(
11092 Ret.Flags.setSExt();
11094 Ret.Flags.setZExt();
11096 Ret.Flags.setInReg();
11097 CLI.
Ins.push_back(Ret);
11106 if (Arg.IsSwiftError) {
11112 CLI.
Ins.push_back(Ret);
11120 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
11124 Type *FinalType = Args[i].Ty;
11125 if (Args[i].IsByVal)
11126 FinalType = Args[i].IndirectType;
11129 for (
unsigned Value = 0, NumValues = OrigArgTys.
size();
Value != NumValues;
11132 Type *ArgTy = OrigArgTy;
11133 if (Args[i].Ty != Args[i].OrigTy) {
11134 assert(
Value == 0 &&
"Only supported for non-aggregate arguments");
11135 ArgTy = Args[i].Ty;
11140 Args[i].Node.getResNo() +
Value);
11147 Flags.setOrigAlign(OriginalAlignment);
11152 Flags.setPointer();
11155 if (Args[i].IsZExt)
11157 if (Args[i].IsSExt)
11159 if (Args[i].IsNoExt)
11161 if (Args[i].IsInReg) {
11168 Flags.setHvaStart();
11174 if (Args[i].IsSRet)
11176 if (Args[i].IsSwiftSelf)
11177 Flags.setSwiftSelf();
11178 if (Args[i].IsSwiftAsync)
11179 Flags.setSwiftAsync();
11180 if (Args[i].IsSwiftError)
11181 Flags.setSwiftError();
11182 if (Args[i].IsCFGuardTarget)
11183 Flags.setCFGuardTarget();
11184 if (Args[i].IsByVal)
11186 if (Args[i].IsByRef)
11188 if (Args[i].IsPreallocated) {
11189 Flags.setPreallocated();
11197 if (Args[i].IsInAlloca) {
11198 Flags.setInAlloca();
11207 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11208 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11209 Flags.setByValSize(FrameSize);
11212 if (
auto MA = Args[i].Alignment)
11216 }
else if (
auto MA = Args[i].Alignment) {
11219 MemAlign = OriginalAlignment;
11221 Flags.setMemAlign(MemAlign);
11222 if (Args[i].IsNest)
11225 Flags.setInConsecutiveRegs();
11228 unsigned NumParts =
11233 if (Args[i].IsSExt)
11235 else if (Args[i].IsZExt)
11240 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11245 Args[i].Ty->getPointerAddressSpace())) &&
11246 RetVTs.
size() == NumValues &&
"unexpected use of 'returned'");
11259 CLI.
RetZExt == Args[i].IsZExt))
11260 Flags.setReturned();
11266 for (
unsigned j = 0; j != NumParts; ++j) {
11272 j * Parts[j].
getValueType().getStoreSize().getKnownMinValue());
11273 if (NumParts > 1 && j == 0)
11277 if (j == NumParts - 1)
11281 CLI.
Outs.push_back(MyFlags);
11282 CLI.
OutVals.push_back(Parts[j]);
11285 if (NeedsRegBlock &&
Value == NumValues - 1)
11286 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11298 "LowerCall didn't return a valid chain!");
11300 "LowerCall emitted a return value for a tail call!");
11302 "LowerCall didn't emit the correct number of values!");
11314 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11315 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11316 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11317 "LowerCall emitted a value with the wrong type!");
11327 unsigned NumValues = RetVTs.
size();
11328 ReturnValues.
resize(NumValues);
11335 for (
unsigned i = 0; i < NumValues; ++i) {
11342 DemoteStackIdx, Offsets[i]),
11344 ReturnValues[i] = L;
11345 Chains[i] = L.getValue(1);
11352 std::optional<ISD::NodeType> AssertOp;
11357 unsigned CurReg = 0;
11358 for (
EVT VT : RetVTs) {
11364 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11372 if (ReturnValues.
empty())
11378 return std::make_pair(Res, CLI.
Chain);
11395 if (
N->getNumValues() == 1) {
11403 "Lowering returned the wrong number of results!");
11406 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11420 "Copy from a reg to the same reg!");
11421 assert(!Reg.isPhysical() &&
"Is a physreg");
11427 RegsForValue RFV(V->getContext(), TLI,
DAG.getDataLayout(), Reg, V->getType(),
11432 auto PreferredExtendIt =
FuncInfo.PreferredExtendType.find(V);
11433 if (PreferredExtendIt !=
FuncInfo.PreferredExtendType.end())
11434 ExtendType = PreferredExtendIt->second;
11437 PendingExports.push_back(Chain);
11449 return A->use_empty();
11451 const BasicBlock &Entry =
A->getParent()->front();
11452 for (
const User *U :
A->users())
11461 std::pair<const AllocaInst *, const StoreInst *>>;
11473 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11475 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11476 StaticAllocas.
reserve(NumArgs * 2);
11478 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11481 V = V->stripPointerCasts();
11483 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11486 return &Iter.first->second;
11503 if (
I.isDebugOrPseudoInst())
11507 for (
const Use &U :
I.operands()) {
11508 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11509 *
Info = StaticAllocaInfo::Clobbered;
11515 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(
SI->getValueOperand()))
11516 *
Info = StaticAllocaInfo::Clobbered;
11519 const Value *Dst =
SI->getPointerOperand()->stripPointerCasts();
11520 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11526 if (*
Info != StaticAllocaInfo::Unknown)
11534 const Value *Val =
SI->getValueOperand()->stripPointerCasts();
11536 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11540 !
DL.typeSizeEqualsStoreSize(Arg->
getType()) ||
11541 ArgCopyElisionCandidates.count(Arg)) {
11542 *
Info = StaticAllocaInfo::Clobbered;
11546 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11550 *
Info = StaticAllocaInfo::Elidable;
11551 ArgCopyElisionCandidates.insert({Arg, {AI,
SI}});
11556 if (ArgCopyElisionCandidates.size() == NumArgs)
11580 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11581 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11582 const AllocaInst *AI = ArgCopyIter->second.first;
11583 int FixedIndex = FINode->getIndex();
11585 int OldIndex = AllocaIndex;
11589 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11595 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11596 "greater than stack argument alignment ("
11597 <<
DebugStr(RequiredAlignment) <<
" vs "
11605 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11606 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11612 AllocaIndex = FixedIndex;
11613 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11614 for (
SDValue ArgVal : ArgVals)
11618 const StoreInst *
SI = ArgCopyIter->second.second;
11631void SelectionDAGISel::LowerArguments(
const Function &
F) {
11632 SelectionDAG &DAG =
SDB->DAG;
11633 SDLoc dl =
SDB->getCurSDLoc();
11638 if (
F.hasFnAttribute(Attribute::Naked))
11643 MVT ValueVT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11645 ISD::ArgFlagsTy
Flags;
11647 MVT RegisterVT =
TLI->getRegisterType(*DAG.
getContext(), ValueVT);
11648 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT,
F.getReturnType(),
true,
11650 Ins.push_back(RetArg);
11658 ArgCopyElisionCandidates);
11661 for (
const Argument &Arg :
F.args()) {
11662 unsigned ArgNo = Arg.getArgNo();
11665 bool isArgValueUsed = !Arg.
use_empty();
11666 unsigned PartBase = 0;
11668 if (Arg.hasAttribute(Attribute::ByVal))
11669 FinalType = Arg.getParamByValType();
11670 bool NeedsRegBlock =
TLI->functionArgumentNeedsConsecutiveRegisters(
11671 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11672 for (
unsigned Value = 0, NumValues =
Types.size();
Value != NumValues;
11675 EVT VT =
TLI->getValueType(
DL, ArgTy);
11676 ISD::ArgFlagsTy
Flags;
11679 Flags.setPointer();
11682 if (Arg.hasAttribute(Attribute::ZExt))
11684 if (Arg.hasAttribute(Attribute::SExt))
11686 if (Arg.hasAttribute(Attribute::InReg)) {
11693 Flags.setHvaStart();
11699 if (Arg.hasAttribute(Attribute::StructRet))
11701 if (Arg.hasAttribute(Attribute::SwiftSelf))
11702 Flags.setSwiftSelf();
11703 if (Arg.hasAttribute(Attribute::SwiftAsync))
11704 Flags.setSwiftAsync();
11705 if (Arg.hasAttribute(Attribute::SwiftError))
11706 Flags.setSwiftError();
11707 if (Arg.hasAttribute(Attribute::ByVal))
11709 if (Arg.hasAttribute(Attribute::ByRef))
11711 if (Arg.hasAttribute(Attribute::InAlloca)) {
11712 Flags.setInAlloca();
11720 if (Arg.hasAttribute(Attribute::Preallocated)) {
11721 Flags.setPreallocated();
11733 const Align OriginalAlignment(
11734 TLI->getABIAlignmentForCallingConv(ArgTy,
DL));
11735 Flags.setOrigAlign(OriginalAlignment);
11738 Type *ArgMemTy =
nullptr;
11739 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11742 ArgMemTy = Arg.getPointeeInMemoryValueType();
11744 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11749 if (
auto ParamAlign = Arg.getParamStackAlign())
11750 MemAlign = *ParamAlign;
11751 else if ((ParamAlign = Arg.getParamAlign()))
11752 MemAlign = *ParamAlign;
11754 MemAlign =
TLI->getByValTypeAlignment(ArgMemTy,
DL);
11755 if (
Flags.isByRef())
11756 Flags.setByRefSize(MemSize);
11758 Flags.setByValSize(MemSize);
11759 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11760 MemAlign = *ParamAlign;
11762 MemAlign = OriginalAlignment;
11764 Flags.setMemAlign(MemAlign);
11766 if (Arg.hasAttribute(Attribute::Nest))
11769 Flags.setInConsecutiveRegs();
11770 if (ArgCopyElisionCandidates.count(&Arg))
11771 Flags.setCopyElisionCandidate();
11772 if (Arg.hasAttribute(Attribute::Returned))
11773 Flags.setReturned();
11775 MVT RegisterVT =
TLI->getRegisterTypeForCallingConv(
11776 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11777 unsigned NumRegs =
TLI->getNumRegistersForCallingConv(
11778 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11779 for (
unsigned i = 0; i != NumRegs; ++i) {
11783 ISD::InputArg MyFlags(
11784 Flags, RegisterVT, VT, ArgTy, isArgValueUsed, ArgNo,
11786 if (NumRegs > 1 && i == 0)
11787 MyFlags.Flags.setSplit();
11790 MyFlags.Flags.setOrigAlign(
Align(1));
11791 if (i == NumRegs - 1)
11792 MyFlags.Flags.setSplitEnd();
11794 Ins.push_back(MyFlags);
11796 if (NeedsRegBlock &&
Value == NumValues - 1)
11797 Ins[
Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11804 SDValue NewRoot =
TLI->LowerFormalArguments(
11805 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11809 "LowerFormalArguments didn't return a valid chain!");
11811 "LowerFormalArguments didn't emit the correct number of values!");
11813 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
11815 "LowerFormalArguments emitted a null value!");
11817 "LowerFormalArguments emitted a value with the wrong type!");
11829 MVT VT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11830 MVT RegVT =
TLI->getRegisterType(*
CurDAG->getContext(), VT);
11831 std::optional<ISD::NodeType> AssertOp;
11834 F.getCallingConv(), AssertOp);
11836 MachineFunction&
MF =
SDB->DAG.getMachineFunction();
11837 MachineRegisterInfo&
RegInfo =
MF.getRegInfo();
11839 RegInfo.createVirtualRegister(
TLI->getRegClassFor(RegVT));
11840 FuncInfo->DemoteRegister = SRetReg;
11842 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11850 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11851 for (
const Argument &Arg :
F.args()) {
11855 unsigned NumValues = ValueVTs.
size();
11856 if (NumValues == 0)
11863 if (Ins[i].
Flags.isCopyElisionCandidate()) {
11864 unsigned NumParts = 0;
11865 for (EVT VT : ValueVTs)
11866 NumParts +=
TLI->getNumRegistersForCallingConv(*
CurDAG->getContext(),
11867 F.getCallingConv(), VT);
11871 ArrayRef(&InVals[i], NumParts), ArgHasUses);
11876 bool isSwiftErrorArg =
11877 TLI->supportSwiftError() &&
11878 Arg.hasAttribute(Attribute::SwiftError);
11879 if (!ArgHasUses && !isSwiftErrorArg) {
11880 SDB->setUnusedArgValue(&Arg, InVals[i]);
11883 if (FrameIndexSDNode *FI =
11885 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11888 for (
unsigned Val = 0; Val != NumValues; ++Val) {
11889 EVT VT = ValueVTs[Val];
11890 MVT PartVT =
TLI->getRegisterTypeForCallingConv(*
CurDAG->getContext(),
11891 F.getCallingConv(), VT);
11892 unsigned NumParts =
TLI->getNumRegistersForCallingConv(
11893 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11898 if (ArgHasUses || isSwiftErrorArg) {
11899 std::optional<ISD::NodeType> AssertOp;
11900 if (Arg.hasAttribute(Attribute::SExt))
11902 else if (Arg.hasAttribute(Attribute::ZExt))
11907 NewRoot,
F.getCallingConv(), AssertOp);
11910 if (NoFPClass !=
fcNone) {
11912 static_cast<uint64_t
>(NoFPClass), dl, MVT::i32);
11914 OutVal, SDNoFPClass);
11923 if (ArgValues.
empty())
11927 if (FrameIndexSDNode *FI =
11929 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11932 SDB->getCurSDLoc());
11934 SDB->setValue(&Arg, Res);
11944 if (LoadSDNode *LNode =
11946 if (FrameIndexSDNode *FI =
11948 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11976 FuncInfo->InitializeRegForValue(&Arg);
11977 SDB->CopyToExportRegsIfNeeded(&Arg);
11981 if (!Chains.
empty()) {
11988 assert(i == InVals.
size() &&
"Argument register count mismatch!");
11992 if (!ArgCopyElisionFrameIndexMap.
empty()) {
11993 for (MachineFunction::VariableDbgInfo &VI :
11994 MF->getInStackSlotVariableDbgInfo()) {
11995 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
11996 if (
I != ArgCopyElisionFrameIndexMap.
end())
11997 VI.updateStackSlot(
I->second);
12012SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
12013 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12015 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12021 MachineBasicBlock *SuccMBB =
FuncInfo.getMBB(SuccBB);
12025 if (!SuccsHandled.
insert(SuccMBB).second)
12033 for (
const PHINode &PN : SuccBB->phis()) {
12035 if (PN.use_empty())
12039 if (PN.getType()->isEmptyTy())
12043 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12048 RegOut =
FuncInfo.CreateRegs(&PN);
12066 "Didn't codegen value into a register!??");
12076 for (EVT VT : ValueVTs) {
12078 for (
unsigned i = 0; i != NumRegisters; ++i)
12080 Reg += NumRegisters;
12100void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
12102 if (MaybeTC.
getNode() !=
nullptr)
12103 DAG.setRoot(MaybeTC);
12108void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W,
Value *
Cond,
12111 MachineFunction *CurMF =
FuncInfo.MF;
12112 MachineBasicBlock *NextMBB =
nullptr;
12117 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
12119 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12121 if (
Size == 2 &&
W.MBB == SwitchMBB) {
12129 CaseCluster &
Small = *
W.FirstCluster;
12130 CaseCluster &
Big = *
W.LastCluster;
12134 const APInt &SmallValue =
Small.Low->getValue();
12135 const APInt &BigValue =
Big.Low->getValue();
12138 APInt CommonBit = BigValue ^ SmallValue;
12145 DAG.getConstant(CommonBit,
DL, VT));
12147 DL, MVT::i1,
Or,
DAG.getConstant(BigValue | SmallValue,
DL, VT),
12153 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
12155 addSuccessorWithProb(
12156 SwitchMBB, DefaultMBB,
12160 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12167 BrCond =
DAG.getNode(ISD::BR,
DL, MVT::Other, BrCond,
12168 DAG.getBasicBlock(DefaultMBB));
12170 DAG.setRoot(BrCond);
12182 [](
const CaseCluster &a,
const CaseCluster &b) {
12183 return a.Prob != b.Prob ?
12185 a.Low->getValue().slt(b.Low->getValue());
12192 if (
I->Prob >
W.LastCluster->Prob)
12194 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12202 BranchProbability DefaultProb =
W.DefaultProb;
12203 BranchProbability UnhandledProbs = DefaultProb;
12205 UnhandledProbs +=
I->Prob;
12207 MachineBasicBlock *CurMBB =
W.MBB;
12209 bool FallthroughUnreachable =
false;
12210 MachineBasicBlock *Fallthrough;
12211 if (
I ==
W.LastCluster) {
12213 Fallthrough = DefaultMBB;
12218 CurMF->
insert(BBI, Fallthrough);
12222 UnhandledProbs -=
I->Prob;
12227 JumpTableHeader *JTH = &
SL->JTCases[
I->JTCasesIndex].first;
12228 SwitchCG::JumpTable *
JT = &
SL->JTCases[
I->JTCasesIndex].second;
12231 MachineBasicBlock *JumpMBB =
JT->MBB;
12232 CurMF->
insert(BBI, JumpMBB);
12234 auto JumpProb =
I->Prob;
12235 auto FallthroughProb = UnhandledProbs;
12243 if (*SI == DefaultMBB) {
12244 JumpProb += DefaultProb / 2;
12245 FallthroughProb -= DefaultProb / 2;
12263 if (FallthroughUnreachable) {
12270 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12271 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12277 JT->Default = Fallthrough;
12280 if (CurMBB == SwitchMBB) {
12288 BitTestBlock *BTB = &
SL->BitTestCases[
I->BTCasesIndex];
12291 for (BitTestCase &BTC : BTB->
Cases)
12303 BTB->
Prob += DefaultProb / 2;
12307 if (FallthroughUnreachable)
12311 if (CurMBB == SwitchMBB) {
12318 const Value *
RHS, *
LHS, *MHS;
12320 if (
I->Low ==
I->High) {
12335 if (FallthroughUnreachable)
12339 CaseBlock CB(CC,
LHS,
RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12342 if (CurMBB == SwitchMBB)
12345 SL->SwitchCases.push_back(CB);
12350 CurMBB = Fallthrough;
12354void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12355 const SwitchWorkListItem &W,
12358 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12359 "Clusters not sorted?");
12360 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12362 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12363 SL->computeSplitWorkItemInfo(W);
12368 assert(PivotCluster >
W.FirstCluster);
12369 assert(PivotCluster <=
W.LastCluster);
12374 const ConstantInt *Pivot = PivotCluster->Low;
12383 MachineBasicBlock *LeftMBB;
12384 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12385 FirstLeft->Low ==
W.GE &&
12386 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12387 LeftMBB = FirstLeft->MBB;
12389 LeftMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12390 FuncInfo.MF->insert(BBI, LeftMBB);
12392 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12400 MachineBasicBlock *RightMBB;
12401 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12402 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12403 RightMBB = FirstRight->MBB;
12405 RightMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12406 FuncInfo.MF->insert(BBI, RightMBB);
12408 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12414 CaseBlock CB(
ISD::SETLT,
Cond, Pivot,
nullptr, LeftMBB, RightMBB,
W.MBB,
12417 if (
W.MBB == SwitchMBB)
12420 SL->SwitchCases.push_back(CB);
12445 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12453 unsigned PeeledCaseIndex = 0;
12454 bool SwitchPeeled =
false;
12455 for (
unsigned Index = 0;
Index < Clusters.size(); ++
Index) {
12456 CaseCluster &CC = Clusters[
Index];
12457 if (CC.
Prob < TopCaseProb)
12459 TopCaseProb = CC.
Prob;
12460 PeeledCaseIndex =
Index;
12461 SwitchPeeled =
true;
12466 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12467 << TopCaseProb <<
"\n");
12472 MachineBasicBlock *PeeledSwitchMBB =
12474 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12477 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12478 SwitchWorkListItem
W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12479 nullptr,
nullptr, TopCaseProb.
getCompl()};
12480 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12482 Clusters.erase(PeeledCaseIt);
12483 for (CaseCluster &CC : Clusters) {
12485 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12486 << CC.
Prob <<
"\n");
12490 PeeledCaseProb = TopCaseProb;
12491 return PeeledSwitchMBB;
12494void SelectionDAGBuilder::visitSwitch(
const SwitchInst &
SI) {
12496 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12498 Clusters.reserve(
SI.getNumCases());
12499 for (
auto I :
SI.cases()) {
12500 MachineBasicBlock *Succ =
FuncInfo.getMBB(
I.getCaseSuccessor());
12501 const ConstantInt *CaseVal =
I.getCaseValue();
12502 BranchProbability Prob =
12504 : BranchProbability(1,
SI.getNumCases() + 1);
12508 MachineBasicBlock *DefaultMBB =
FuncInfo.getMBB(
SI.getDefaultDest());
12517 MachineBasicBlock *PeeledSwitchMBB =
12518 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12521 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12522 if (Clusters.empty()) {
12523 assert(PeeledSwitchMBB == SwitchMBB);
12525 if (DefaultMBB != NextBlock(SwitchMBB)) {
12532 SL->findJumpTables(Clusters, &SI,
getCurSDLoc(), DefaultMBB,
DAG.getPSI(),
12534 SL->findBitTestClusters(Clusters, &SI);
12537 dbgs() <<
"Case clusters: ";
12538 for (
const CaseCluster &
C : Clusters) {
12544 C.Low->getValue().print(
dbgs(),
true);
12545 if (
C.Low !=
C.High) {
12547 C.High->getValue().print(
dbgs(),
true);
12554 assert(!Clusters.empty());
12558 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12562 DefaultMBB ==
FuncInfo.getMBB(
SI.getDefaultDest()))
12565 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12567 while (!WorkList.
empty()) {
12569 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12574 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12578 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12582void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12583 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12589void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12590 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12595 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12604 SmallVector<int, 8>
Mask;
12606 for (
unsigned i = 0; i != NumElts; ++i)
12607 Mask.push_back(NumElts - 1 - i);
12612void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I,
12621 EVT OutVT = ValueVTs[0];
12625 for (
unsigned i = 0; i != Factor; ++i) {
12626 assert(ValueVTs[i] == OutVT &&
"Expected VTs to be the same");
12628 DAG.getVectorIdxConstant(OutNumElts * i,
DL));
12634 SDValue Even =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12636 SDValue Odd =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12644 DAG.getVTList(ValueVTs), SubVecs);
12648void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I,
12651 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12656 for (
unsigned i = 0; i < Factor; ++i) {
12659 "Expected VTs to be the same");
12677 for (
unsigned i = 0; i < Factor; ++i)
12684void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12688 unsigned NumValues = ValueVTs.
size();
12689 if (NumValues == 0)
return;
12694 for (
unsigned i = 0; i != NumValues; ++i)
12699 DAG.getVTList(ValueVTs), Values));
12702void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12703 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12715 DAG.getSignedConstant(
12722 uint64_t Idx = (NumElts +
Imm) % NumElts;
12725 SmallVector<int, 8>
Mask;
12726 for (
unsigned i = 0; i < NumElts; ++i)
12727 Mask.push_back(Idx + i);
12755 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12756 "start of copy chain MUST be COPY");
12757 Reg =
MI->getOperand(1).getReg();
12760 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12761 MI =
MRI.def_begin(
Reg)->getParent();
12764 if (
MI->getOpcode() == TargetOpcode::COPY) {
12765 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12766 Reg =
MI->getOperand(1).getReg();
12767 assert(
Reg.isPhysical() &&
"expected COPY of physical register");
12770 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12771 "end of copy chain MUST be INLINEASM_BR");
12781void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12787 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12788 const TargetRegisterInfo *
TRI =
DAG.getSubtarget().getRegisterInfo();
12789 MachineRegisterInfo &
MRI =
DAG.getMachineFunction().getRegInfo();
12797 for (
auto &
T : TargetConstraints) {
12798 SDISelAsmOperandInfo OpInfo(
T);
12806 switch (OpInfo.ConstraintType) {
12817 FuncInfo.MBB->addLiveIn(OriginalDef);
12825 ResultVTs.
push_back(OpInfo.ConstraintVT);
12834 ResultVTs.
push_back(OpInfo.ConstraintVT);
12842 DAG.getVTList(ResultVTs), ResultValues);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
static Value * getCondition(Instruction *I)
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static void failForInvalidBundles(const CallBase &I, StringRef Name, ArrayRef< uint32_t > AllowedBundles)
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
DenseMap< const Argument *, std::pair< const AllocaInst *, const StoreInst * > > ArgCopyElisionMapTy
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const unsigned char *MatcherTable, unsigned &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static const fltSemantics & IEEEsingle()
Class for arbitrary precision integers.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This class represents a no-op cast from one type to another.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Base class for variables.
LLVM_ABI std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
A parsed version of the target data layout string in and methods for querying it.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
DIExpression * getExpression() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LLVM_ABI DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Lightweight error class with error context and mandatory checking.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
MachineBasicBlock * MBB
MBB - The current block.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHContTarget(bool V=true)
Indicates if this is a target of Windows EH Continuation Guard.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
bool hasEHFunclets() const
void setHasEHContTarget(bool V)
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
DenseMap< const Constant *, Register > ConstantsOut
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool canTailCall(const CallBase &CB) const
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void CopyValueToVirtualRegister(const Value *V, Register Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC, const TargetLibraryInfo *li)
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineRegisterInfo * RegInfo
std::unique_ptr< SwiftErrorValueTracking > SwiftError
virtual void emitFunctionEntryCode()
std::unique_ptr< SelectionDAGBuilder > SDB
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, const CallInst *CI) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, const CallInst *CI) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitFunctionBasedCheckStackProtector() const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Provides information about what library functions are available for the current target.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
virtual TargetTransformInfo getTargetTransformInfo(const Function &F) const
Return a TargetTransformInfo for a given function.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned getID() const
Return the register class ID number.
const MCPhysReg * iterator
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static LLVM_ABI std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ LOOP_DEPENDENCE_RAW_MASK
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor to...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor ...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
@ LOOP_DEPENDENCE_WAR_MASK
Set rounding mode.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
auto cast_or_null(const Y &Val)
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
@ Default
The result values are uniform if and only if all operands are uniform.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
A lightweight accessor for an operand bundle meant to be passed around by value.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
void setUnpredictable(bool b)
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
Type * OrigRetTy
Original unlegalized return type.
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)