79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsAMDGPU.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
114#define DEBUG_TYPE "isel"
122 cl::desc(
"Insert the experimental `assertalign` node."),
127 cl::desc(
"Generate low-precision inline sequences "
128 "for some float libcalls"),
134 cl::desc(
"Set the case probability threshold for peeling the case from a "
135 "switch statement. A value greater than 100 will void this "
155 const SDValue *Parts,
unsigned NumParts,
158 std::optional<CallingConv::ID> CC);
167 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
169 std::optional<CallingConv::ID> CC = std::nullopt,
170 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
174 PartVT, ValueVT, CC))
181 assert(NumParts > 0 &&
"No parts to assemble!");
192 unsigned RoundBits = PartBits * RoundParts;
193 EVT RoundVT = RoundBits == ValueBits ?
199 if (RoundParts > 2) {
203 PartVT, HalfVT, V, InChain);
205 Lo = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[0]);
206 Hi = DAG.
getNode(ISD::BITCAST,
DL, HalfVT, Parts[1]);
214 if (RoundParts < NumParts) {
216 unsigned OddParts = NumParts - RoundParts;
219 OddVT, V, InChain, CC);
235 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
246 !PartVT.
isVector() &&
"Unexpected split");
258 if (PartEVT == ValueVT)
262 ValueVT.
bitsLT(PartEVT)) {
271 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
275 if (ValueVT.
bitsLT(PartEVT)) {
280 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
295 llvm::Attribute::StrictFP)) {
297 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
304 return DAG.
getNode(ISD::FP_EXTEND,
DL, ValueVT, Val);
309 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
310 ValueVT.
bitsLT(PartEVT)) {
311 Val = DAG.
getNode(ISD::BITCAST,
DL, MVT::i64, Val);
319 const Twine &ErrMsg) {
322 return Ctx.emitError(ErrMsg);
325 if (CI->isInlineAsm()) {
327 *CI, ErrMsg +
", possible invalid constraint for vector type"));
330 return Ctx.emitError(
I, ErrMsg);
339 const SDValue *Parts,
unsigned NumParts,
342 std::optional<CallingConv::ID> CallConv) {
344 assert(NumParts > 0 &&
"No parts to assemble!");
345 const bool IsABIRegCopy = CallConv.has_value();
354 unsigned NumIntermediates;
359 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
360 NumIntermediates, RegisterVT);
364 NumIntermediates, RegisterVT);
367 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
369 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
372 "Part type sizes don't match!");
376 if (NumIntermediates == NumParts) {
379 for (
unsigned i = 0; i != NumParts; ++i)
381 V, InChain, CallConv);
382 }
else if (NumParts > 0) {
385 assert(NumParts % NumIntermediates == 0 &&
386 "Must expand into a divisible number of parts!");
387 unsigned Factor = NumParts / NumIntermediates;
388 for (
unsigned i = 0; i != NumIntermediates; ++i)
390 IntermediateVT, V, InChain, CallConv);
405 DL, BuiltVectorTy,
Ops);
411 if (PartEVT == ValueVT)
417 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
427 "Cannot narrow, it would be a lossy transformation");
433 if (PartEVT == ValueVT)
436 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
440 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
451 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
457 return DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
458 }
else if (ValueVT.
bitsLT(PartEVT)) {
467 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
476 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueSVT, Val);
498 std::optional<CallingConv::ID> CallConv);
505 unsigned NumParts,
MVT PartVT,
const Value *V,
506 std::optional<CallingConv::ID> CallConv = std::nullopt,
520 unsigned OrigNumParts = NumParts;
522 "Copying to an illegal type!");
528 EVT PartEVT = PartVT;
529 if (PartEVT == ValueVT) {
530 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
539 assert(NumParts == 1 &&
"Do not know what to promote to!");
540 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
546 Val = DAG.
getNode(ISD::BITCAST,
DL, ValueVT, Val);
550 "Unknown mismatch!");
552 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
553 if (PartVT == MVT::x86mmx)
554 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
558 assert(NumParts == 1 && PartEVT != ValueVT);
559 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
564 "Unknown mismatch!");
567 if (PartVT == MVT::x86mmx)
568 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
574 "Failed to tile the value with PartVT!");
577 if (PartEVT != ValueVT) {
579 "scalar-to-vector conversion failed");
580 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
588 if (NumParts & (NumParts - 1)) {
591 "Do not know what to expand to!");
593 unsigned RoundBits = RoundParts * PartBits;
594 unsigned OddParts = NumParts - RoundParts;
603 std::reverse(Parts + RoundParts, Parts + NumParts);
605 NumParts = RoundParts;
617 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
618 for (
unsigned i = 0; i < NumParts; i += StepSize) {
619 unsigned ThisBits = StepSize * PartBits / 2;
622 SDValue &Part1 = Parts[i+StepSize/2];
629 if (ThisBits == PartBits && ThisVT != PartVT) {
630 Part0 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part0);
631 Part1 = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Part1);
637 std::reverse(Parts, Parts + OrigNumParts);
659 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
661 "Cannot widen to illegal type");
664 }
else if (PartEVT != ValueEVT) {
679 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
690 std::optional<CallingConv::ID> CallConv) {
694 const bool IsABIRegCopy = CallConv.has_value();
697 EVT PartEVT = PartVT;
698 if (PartEVT == ValueVT) {
702 Val = DAG.
getNode(ISD::BITCAST,
DL, PartVT, Val);
737 Val = DAG.
getNode(ISD::FP_EXTEND,
DL, PartVT, Val);
744 "lossy conversion of vector to scalar type");
759 unsigned NumIntermediates;
763 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
768 NumIntermediates, RegisterVT);
771 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
773 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
776 "Mixing scalable and fixed vectors when copying in parts");
778 std::optional<ElementCount> DestEltCnt;
788 if (ValueVT == BuiltVectorTy) {
792 Val = DAG.
getNode(ISD::BITCAST,
DL, BuiltVectorTy, Val);
812 for (
unsigned i = 0; i != NumIntermediates; ++i) {
827 if (NumParts == NumIntermediates) {
830 for (
unsigned i = 0; i != NumParts; ++i)
832 }
else if (NumParts > 0) {
835 assert(NumIntermediates != 0 &&
"division by zero");
836 assert(NumParts % NumIntermediates == 0 &&
837 "Must expand into a divisible number of parts!");
838 unsigned Factor = NumParts / NumIntermediates;
839 for (
unsigned i = 0; i != NumIntermediates; ++i)
847 if (
I.hasOperandBundlesOtherThan(AllowedBundles)) {
851 for (
unsigned i = 0, e =
I.getNumOperandBundles(); i != e; ++i) {
854 OS << LS << U.getTagName();
857 Twine(
"cannot lower ", Name)
863 EVT valuevt, std::optional<CallingConv::ID> CC)
869 std::optional<CallingConv::ID> CC) {
883 for (
unsigned i = 0; i != NumRegs; ++i)
884 Regs.push_back(Reg + i);
885 RegVTs.push_back(RegisterVT);
887 Reg = Reg.id() + NumRegs;
914 for (
unsigned i = 0; i != NumRegs; ++i) {
920 *Glue =
P.getValue(2);
923 Chain =
P.getValue(1);
951 EVT FromVT(MVT::Other);
955 }
else if (NumSignBits > 1) {
963 assert(FromVT != MVT::Other);
969 RegisterVT, ValueVT, V, Chain,
CallConv);
985 unsigned NumRegs =
Regs.size();
999 NumParts, RegisterVT, V,
CallConv, ExtendKind);
1005 for (
unsigned i = 0; i != NumRegs; ++i) {
1017 if (NumRegs == 1 || Glue)
1028 Chain = Chains[NumRegs-1];
1034 unsigned MatchingIdx,
const SDLoc &dl,
1036 std::vector<SDValue> &
Ops)
const {
1041 Flag.setMatchingOp(MatchingIdx);
1042 else if (!
Regs.empty() &&
Regs.front().isVirtual()) {
1050 Flag.setRegClass(RC->
getID());
1061 "No 1:1 mapping from clobbers to regs?");
1064 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1069 "If we clobbered the stack pointer, MFI should know about it.");
1078 for (
unsigned i = 0; i != NumRegs; ++i) {
1079 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1091 unsigned RegCount = std::get<0>(CountAndVT);
1092 MVT RegisterVT = std::get<1>(CountAndVT);
1110 SL->init(
DAG.getTargetLoweringInfo(), TM,
DAG.getDataLayout());
1112 *
DAG.getMachineFunction().getFunction().getParent());
1117 UnusedArgNodeMap.clear();
1119 PendingExports.clear();
1120 PendingConstrainedFP.clear();
1121 PendingConstrainedFPStrict.clear();
1129 DanglingDebugInfoMap.clear();
1136 if (Pending.
empty())
1142 unsigned i = 0, e = Pending.
size();
1143 for (; i != e; ++i) {
1145 if (Pending[i].
getNode()->getOperand(0) == Root)
1153 if (Pending.
size() == 1)
1180 if (!PendingConstrainedFPStrict.empty()) {
1181 assert(PendingConstrainedFP.empty());
1182 updateRoot(PendingConstrainedFPStrict);
1195 if (!PendingConstrainedFP.empty()) {
1196 assert(PendingConstrainedFPStrict.empty());
1197 updateRoot(PendingConstrainedFP);
1201 return DAG.getRoot();
1209 PendingConstrainedFP.size() +
1210 PendingConstrainedFPStrict.size());
1212 PendingConstrainedFP.end());
1213 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1214 PendingConstrainedFPStrict.end());
1215 PendingConstrainedFP.clear();
1216 PendingConstrainedFPStrict.clear();
1223 PendingExports.append(PendingConstrainedFPStrict.begin(),
1224 PendingConstrainedFPStrict.end());
1225 PendingConstrainedFPStrict.clear();
1226 return updateRoot(PendingExports);
1233 assert(Variable &&
"Missing variable");
1240 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1256 if (IsParameter && FINode) {
1258 SDV =
DAG.getFrameIndexDbgValue(Variable,
Expression, FINode->getIndex(),
1259 true,
DL, SDNodeOrder);
1264 FuncArgumentDbgValueKind::Declare,
N);
1267 SDV =
DAG.getDbgValue(Variable,
Expression,
N.getNode(),
N.getResNo(),
1268 true,
DL, SDNodeOrder);
1270 DAG.AddDbgValue(SDV, IsParameter);
1275 FuncArgumentDbgValueKind::Declare,
N)) {
1277 <<
" (could not emit func-arg dbg_value)\n");
1288 for (
auto It = FnVarLocs->locs_begin(&
I), End = FnVarLocs->locs_end(&
I);
1290 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1292 if (It->Values.isKillLocation(It->Expr)) {
1298 It->Values.hasArgList())) {
1301 FnVarLocs->getDILocalVariable(It->VariableID),
1302 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1315 bool SkipDbgVariableRecords =
DAG.getFunctionVarLocs();
1318 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1320 assert(DLR->getLabel() &&
"Missing label");
1322 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1323 DAG.AddDbgLabel(SDV);
1327 if (SkipDbgVariableRecords)
1335 if (
FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1337 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1346 if (Values.
empty()) {
1363 SDNodeOrder, IsVariadic)) {
1374 if (
I.isTerminator()) {
1375 HandlePHINodesInSuccessorBlocks(
I.getParent());
1382 bool NodeInserted =
false;
1383 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1384 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1385 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1386 if (PCSectionsMD || MMRA) {
1387 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1388 DAG, [&](
SDNode *) { NodeInserted =
true; });
1398 if (PCSectionsMD || MMRA) {
1399 auto It = NodeMap.find(&
I);
1400 if (It != NodeMap.end()) {
1402 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1404 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1405 }
else if (NodeInserted) {
1408 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1409 <<
I.getModule()->getName() <<
"]\n";
1418void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1428#define HANDLE_INST(NUM, OPCODE, CLASS) \
1429 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1430#include "llvm/IR/Instruction.def"
1442 for (
const Value *V : Values) {
1467 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1472 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1473 DIVariable *DanglingVariable = DDI.getVariable();
1475 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1477 << printDDI(
nullptr, DDI) <<
"\n");
1483 for (
auto &DDIMI : DanglingDebugInfoMap) {
1484 DanglingDebugInfoVector &DDIV = DDIMI.second;
1488 for (
auto &DDI : DDIV)
1489 if (isMatchingDbgValue(DDI))
1492 erase_if(DDIV, isMatchingDbgValue);
1500 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1501 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1504 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1505 for (
auto &DDI : DDIV) {
1508 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1511 assert(Variable->isValidLocationForIntrinsic(
DL) &&
1512 "Expected inlined-at fields to agree");
1521 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1522 FuncArgumentDbgValueKind::Value, Val)) {
1524 << printDDI(V, DDI) <<
"\n");
1531 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1532 << ValSDNodeOrder <<
"\n");
1533 SDV = getDbgValue(Val, Variable, Expr,
DL,
1534 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1535 DAG.AddDbgValue(SDV,
false);
1539 <<
" in EmitFuncArgumentDbgValue\n");
1541 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1545 DAG.getConstantDbgValue(Variable, Expr,
Poison,
DL, DbgSDNodeOrder);
1546 DAG.AddDbgValue(SDV,
false);
1553 DanglingDebugInfo &DDI) {
1558 const Value *OrigV = V;
1562 unsigned SDOrder = DDI.getSDNodeOrder();
1566 bool StackValue =
true;
1591 if (!AdditionalValues.
empty())
1601 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1602 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1610 assert(OrigV &&
"V shouldn't be null");
1612 auto *SDV =
DAG.getConstantDbgValue(Var, Expr,
Poison,
DL, SDNodeOrder);
1613 DAG.AddDbgValue(SDV,
false);
1615 << printDDI(OrigV, DDI) <<
"\n");
1632 unsigned Order,
bool IsVariadic) {
1637 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1642 for (
const Value *V : Values) {
1652 if (CE->getOpcode() == Instruction::IntToPtr) {
1671 N = UnusedArgNodeMap[V];
1676 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1677 FuncArgumentDbgValueKind::Value,
N))
1704 bool IsParamOfFunc =
1712 auto VMI =
FuncInfo.ValueMap.find(V);
1713 if (VMI !=
FuncInfo.ValueMap.end()) {
1718 V->getType(), std::nullopt);
1724 unsigned BitsToDescribe = 0;
1726 BitsToDescribe = *VarSize;
1728 BitsToDescribe = Fragment->SizeInBits;
1731 if (
Offset >= BitsToDescribe)
1734 unsigned RegisterSize = RegAndSize.second;
1735 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1736 ? BitsToDescribe -
Offset
1739 Expr,
Offset, FragmentSize);
1743 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1744 DAG.AddDbgValue(SDV,
false);
1760 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1761 false, DbgLoc, Order, IsVariadic);
1762 DAG.AddDbgValue(SDV,
false);
1768 for (
auto &Pair : DanglingDebugInfoMap)
1769 for (
auto &DDI : Pair.second)
1780 if (It !=
FuncInfo.ValueMap.end()) {
1784 DAG.getDataLayout(), InReg, Ty,
1801 if (
N.getNode())
return N;
1861 return DAG.getSplatBuildVector(
1864 return DAG.getConstant(*CI,
DL, VT);
1873 getValue(CPA->getAddrDiscriminator()),
1874 getValue(CPA->getDiscriminator()));
1890 visit(CE->getOpcode(), *CE);
1892 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1898 for (
const Use &U :
C->operands()) {
1904 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1905 Constants.push_back(
SDValue(Val, i));
1914 for (
uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1918 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1927 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1929 "Unknown struct or array constant!");
1933 unsigned NumElts = ValueVTs.
size();
1937 for (
unsigned i = 0; i != NumElts; ++i) {
1938 EVT EltVT = ValueVTs[i];
1940 Constants[i] =
DAG.getUNDEF(EltVT);
1951 return DAG.getBlockAddress(BA, VT);
1954 return getValue(Equiv->getGlobalValue());
1959 if (VT == MVT::aarch64svcount) {
1960 assert(
C->isNullValue() &&
"Can only zero this target type!");
1966 assert(
C->isNullValue() &&
"Can only zero this target type!");
1983 for (
unsigned i = 0; i != NumElements; ++i)
2011 return DAG.getFrameIndex(
2019 std::optional<CallingConv::ID> CallConv;
2021 if (CB && !CB->isInlineAsm())
2022 CallConv = CB->getCallingConv();
2025 Inst->getType(), CallConv);
2039void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
2052 if (IsMSVCCXX || IsCoreCLR)
2058 MachineBasicBlock *TargetMBB =
FuncInfo.getMBB(
I.getSuccessor());
2059 FuncInfo.MBB->addSuccessor(TargetMBB);
2066 if (TargetMBB != NextBlock(
FuncInfo.MBB) ||
2075 DAG.getMachineFunction().setHasEHContTarget(
true);
2081 Value *ParentPad =
I.getCatchSwitchParentPad();
2084 SuccessorColor = &
FuncInfo.Fn->getEntryBlock();
2087 assert(SuccessorColor &&
"No parent funclet for catchret!");
2088 MachineBasicBlock *SuccessorColorMBB =
FuncInfo.getMBB(SuccessorColor);
2089 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2094 DAG.getBasicBlock(SuccessorColorMBB));
2098void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2104 FuncInfo.MBB->setIsEHFuncletEntry();
2105 FuncInfo.MBB->setIsCleanupFuncletEntry();
2134 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2140 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2141 UnwindDests.back().first->setIsEHScopeEntry();
2144 UnwindDests.back().first->setIsEHFuncletEntry();
2148 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2149 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2151 if (IsMSVCCXX || IsCoreCLR)
2152 UnwindDests.back().first->setIsEHFuncletEntry();
2154 UnwindDests.back().first->setIsEHScopeEntry();
2156 NewEHPadBB = CatchSwitch->getUnwindDest();
2162 if (BPI && NewEHPadBB)
2164 EHPadBB = NewEHPadBB;
2171 auto UnwindDest =
I.getUnwindDest();
2172 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
2173 BranchProbability UnwindDestProb =
2178 for (
auto &UnwindDest : UnwindDests) {
2179 UnwindDest.first->setIsEHPad();
2180 addSuccessorWithProb(
FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2182 FuncInfo.MBB->normalizeSuccProbs();
2185 MachineBasicBlock *CleanupPadMBB =
2186 FuncInfo.getMBB(
I.getCleanupPad()->getParent());
2192void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2196void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2197 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
2198 auto &
DL =
DAG.getDataLayout();
2210 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2227 SmallVector<uint64_t, 4>
Offsets;
2230 unsigned NumValues = ValueVTs.
size();
2233 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2234 for (
unsigned i = 0; i != NumValues; ++i) {
2241 if (MemVTs[i] != ValueVTs[i])
2243 Chains[i] =
DAG.getStore(
2251 MVT::Other, Chains);
2252 }
else if (
I.getNumOperands() != 0) {
2255 unsigned NumValues =
Types.size();
2259 const Function *
F =
I.getParent()->getParent();
2262 I.getOperand(0)->getType(),
F->getCallingConv(),
2266 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2268 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2271 LLVMContext &
Context =
F->getContext();
2272 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2274 for (
unsigned j = 0;
j != NumValues; ++
j) {
2287 &Parts[0], NumParts, PartVT, &
I, CC, ExtendKind);
2290 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2294 if (
I.getOperand(0)->getType()->isPointerTy()) {
2296 Flags.setPointerAddrSpace(
2300 if (NeedsRegBlock) {
2301 Flags.setInConsecutiveRegs();
2302 if (j == NumValues - 1)
2303 Flags.setInConsecutiveRegsLast();
2311 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2314 for (
unsigned i = 0; i < NumParts; ++i) {
2317 VT, Types[j], 0, 0));
2327 const Function *
F =
I.getParent()->getParent();
2329 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2331 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2332 Flags.setSwiftError();
2344 bool isVarArg =
DAG.getMachineFunction().getFunction().isVarArg();
2346 DAG.getMachineFunction().getFunction().getCallingConv();
2347 Chain =
DAG.getTargetLoweringInfo().LowerReturn(
2352 "LowerReturn didn't return a valid chain!");
2363 if (V->getType()->isEmptyTy())
2367 if (VMI !=
FuncInfo.ValueMap.end()) {
2369 "Unused value assigned virtual registers!");
2382 if (
FuncInfo.isExportedInst(V))
return;
2394 if (VI->getParent() == FromBB)
2420 const BasicBlock *SrcBB = Src->getBasicBlock();
2421 const BasicBlock *DstBB = Dst->getBasicBlock();
2425 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2435 Src->addSuccessorWithoutProb(Dst);
2438 Prob = getEdgeProbability(Src, Dst);
2439 Src->addSuccessor(Dst, Prob);
2445 return I->getParent() == BB;
2469 if (CurBB == SwitchBB ||
2475 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2480 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2482 if (TM.Options.NoNaNsFPMath)
2486 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2488 SL->SwitchCases.push_back(CB);
2497 SL->SwitchCases.push_back(CB);
2505 unsigned Depth = 0) {
2514 if (Necessary !=
nullptr) {
2517 if (Necessary->contains(
I))
2536 if (
I.getNumSuccessors() != 2)
2539 if (!
I.isConditional())
2551 if (BPI !=
nullptr) {
2557 std::optional<bool> Likely;
2560 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2564 if (
Opc == (*Likely ? Instruction::And : Instruction::Or))
2576 if (CostThresh <= 0)
2597 Value *BrCond =
I.getCondition();
2598 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2599 for (
const auto *U : Ins->users()) {
2602 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2615 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2617 for (
const auto &InsPair : RhsDeps) {
2618 if (!ShouldCountInsn(InsPair.first)) {
2619 ToDrop = InsPair.first;
2623 if (ToDrop ==
nullptr)
2625 RhsDeps.erase(ToDrop);
2628 for (
const auto &InsPair : RhsDeps) {
2633 CostOfIncluding +=
TTI->getInstructionCost(
2636 if (CostOfIncluding > CostThresh)
2662 const Value *BOpOp0, *BOpOp1;
2676 if (BOpc == Instruction::And)
2677 BOpc = Instruction::Or;
2678 else if (BOpc == Instruction::Or)
2679 BOpc = Instruction::And;
2685 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
2690 TProb, FProb, InvertCond);
2700 if (
Opc == Instruction::Or) {
2721 auto NewTrueProb = TProb / 2;
2722 auto NewFalseProb = TProb / 2 + FProb;
2725 NewFalseProb, InvertCond);
2732 Probs[1], InvertCond);
2734 assert(
Opc == Instruction::And &&
"Unknown merge op!");
2754 auto NewTrueProb = TProb + FProb / 2;
2755 auto NewFalseProb = FProb / 2;
2758 NewFalseProb, InvertCond);
2765 Probs[1], InvertCond);
2774 if (Cases.size() != 2)
return true;
2778 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2779 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2780 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2781 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2787 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2788 Cases[0].CC == Cases[1].CC &&
2791 if (Cases[0].CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2793 if (Cases[0].CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2800void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2806 if (
I.isUnconditional()) {
2812 if (Succ0MBB != NextBlock(BrMBB) ||
2825 const Value *CondVal =
I.getCondition();
2826 MachineBasicBlock *Succ1MBB =
FuncInfo.getMBB(
I.getSuccessor(1));
2845 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2847 if (!
DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2850 const Value *BOp0, *BOp1;
2853 Opcode = Instruction::And;
2855 Opcode = Instruction::Or;
2862 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2863 Opcode, BOp0, BOp1))) {
2865 getEdgeProbability(BrMBB, Succ0MBB),
2866 getEdgeProbability(BrMBB, Succ1MBB),
2871 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2875 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2882 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2888 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2889 FuncInfo.MF->erase(
SL->SwitchCases[i].ThisBB);
2891 SL->SwitchCases.clear();
2897 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2918 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2925 auto &TLI =
DAG.getTargetLoweringInfo();
2949 Cond =
DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.
CC);
2961 Cond =
DAG.getSetCC(dl, MVT::i1, CmpOp,
DAG.getConstant(
High, dl, VT),
2965 VT, CmpOp,
DAG.getConstant(
Low, dl, VT));
2966 Cond =
DAG.getSetCC(dl, MVT::i1, SUB,
2981 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2997 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3000 DAG.setRoot(BrCond);
3006 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3007 assert(JT.Reg &&
"Should lower JT Header first!");
3008 EVT PTy =
DAG.getTargetLoweringInfo().getJumpTableRegTy(
DAG.getDataLayout());
3010 SDValue Table =
DAG.getJumpTable(JT.JTI, PTy);
3011 SDValue BrJumpTable =
DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
3012 Index.getValue(1), Table, Index);
3013 DAG.setRoot(BrJumpTable);
3021 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
3022 const SDLoc &dl = *JT.SL;
3028 DAG.getConstant(JTH.
First, dl, VT));
3043 JT.Reg = JumpTableReg;
3051 Sub.getValueType()),
3054 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3055 MVT::Other, CopyTo, CMP,
3056 DAG.getBasicBlock(JT.Default));
3059 if (JT.MBB != NextBlock(SwitchBB))
3060 BrCond =
DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
3061 DAG.getBasicBlock(JT.MBB));
3063 DAG.setRoot(BrCond);
3066 if (JT.MBB != NextBlock(SwitchBB))
3067 DAG.setRoot(
DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
3068 DAG.getBasicBlock(JT.MBB)));
3070 DAG.setRoot(CopyTo);
3093 if (PtrTy != PtrMemTy)
3109 auto &
DL =
DAG.getDataLayout();
3118 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3125 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3138 assert(GuardCheckFn &&
"Guard check function is null");
3149 Entry.IsInReg =
true;
3150 Args.push_back(Entry);
3156 getValue(GuardCheckFn), std::move(Args));
3158 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3159 DAG.setRoot(Result.second);
3171 Guard =
DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3177 Guard =
DAG.getPOISON(PtrMemTy);
3187 SDValue BrCond =
DAG.getNode(ISD::BRCOND, dl,
3220 auto &
DL =
DAG.getDataLayout();
3228 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3234 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3249 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3250 Entry.IsInReg =
true;
3251 Args.push_back(Entry);
3257 getValue(GuardCheckFn), std::move(Args));
3263 Chain = TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3271 Chain =
DAG.getNode(ISD::TRAP,
getCurSDLoc(), MVT::Other, Chain);
3286 DAG.getNode(
ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(
B.First, dl, VT));
3290 bool UsePtrType =
false;
3314 if (!
B.FallthroughUnreachable)
3315 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3316 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3320 if (!
B.FallthroughUnreachable) {
3328 Root =
DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3329 DAG.getBasicBlock(
B.Default));
3333 if (
MBB != NextBlock(SwitchBB))
3334 Root =
DAG.getNode(ISD::BR, dl, MVT::Other, Root,
DAG.getBasicBlock(
MBB));
3351 if (PopCount == 1) {
3358 }
else if (PopCount == BB.
Range) {
3366 DAG.getConstant(1, dl, VT), ShiftOp);
3370 VT, SwitchVal,
DAG.getConstant(
B.Mask, dl, VT));
3377 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3379 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3387 Cmp,
DAG.getBasicBlock(
B.TargetBB));
3390 if (NextMBB != NextBlock(SwitchBB))
3391 BrAnd =
DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3392 DAG.getBasicBlock(NextMBB));
3397void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3415 const Value *Callee(
I.getCalledOperand());
3418 visitInlineAsm(
I, EHPadBB);
3423 case Intrinsic::donothing:
3425 case Intrinsic::seh_try_begin:
3426 case Intrinsic::seh_scope_begin:
3427 case Intrinsic::seh_try_end:
3428 case Intrinsic::seh_scope_end:
3434 case Intrinsic::experimental_patchpoint_void:
3435 case Intrinsic::experimental_patchpoint:
3436 visitPatchpoint(
I, EHPadBB);
3438 case Intrinsic::experimental_gc_statepoint:
3444 case Intrinsic::wasm_throw: {
3446 std::array<SDValue, 4>
Ops = {
3457 case Intrinsic::wasm_rethrow: {
3458 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3459 std::array<SDValue, 2>
Ops = {
3468 }
else if (
I.hasDeoptState()) {
3489 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
3490 BranchProbability EHPadBBProb =
3496 addSuccessorWithProb(InvokeMBB, Return);
3497 for (
auto &UnwindDest : UnwindDests) {
3498 UnwindDest.first->setIsEHPad();
3499 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3505 DAG.getBasicBlock(Return)));
3508void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3509 MachineBasicBlock *CallBrMBB =
FuncInfo.MBB;
3516 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3521 SmallPtrSet<BasicBlock *, 8> Dests;
3522 Dests.
insert(
I.getDefaultDest());
3527 for (BasicBlock *Dest :
I.getIndirectDests()) {
3529 Target->setIsInlineAsmBrIndirectTarget();
3535 Target->setLabelMustBeEmitted();
3537 if (Dests.
insert(Dest).second)
3545 DAG.getBasicBlock(Return)));
3548void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3549 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3552void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3554 "Call to landingpad not in landing pad!");
3558 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3574 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3579 if (
FuncInfo.ExceptionPointerVirtReg) {
3580 Ops[0] =
DAG.getZExtOrTrunc(
3581 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3588 Ops[1] =
DAG.getZExtOrTrunc(
3589 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3596 DAG.getVTList(ValueVTs),
Ops);
3604 if (JTB.first.HeaderBB ==
First)
3605 JTB.first.HeaderBB =
Last;
3618 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3620 bool Inserted =
Done.insert(BB).second;
3625 addSuccessorWithProb(IndirectBrMBB, Succ);
3635 if (!
I.shouldLowerToTrap(
DAG.getTarget().Options.TrapUnreachable,
3636 DAG.getTarget().Options.NoTrapAfterNoreturn))
3642void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3645 Flags.copyFMF(*FPOp);
3653void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3656 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3657 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3660 Flags.setExact(ExactOp->isExact());
3662 Flags.setDisjoint(DisjointOp->isDisjoint());
3664 Flags.copyFMF(*FPOp);
3673void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3677 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
3682 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3684 "Unexpected shift type");
3694 if (
const OverflowingBinaryOperator *OFBinOp =
3696 nuw = OFBinOp->hasNoUnsignedWrap();
3697 nsw = OFBinOp->hasNoSignedWrap();
3699 if (
const PossiblyExactOperator *ExactOp =
3701 exact = ExactOp->isExact();
3704 Flags.setExact(exact);
3705 Flags.setNoSignedWrap(nsw);
3706 Flags.setNoUnsignedWrap(nuw);
3712void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3723void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3729 auto &TLI =
DAG.getTargetLoweringInfo();
3742 Flags.setSameSign(
I.hasSameSign());
3743 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3745 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3750void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3757 if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3761 Flags.copyFMF(*FPMO);
3762 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
3764 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3773 return isa<SelectInst>(V);
3777void SelectionDAGBuilder::visitSelect(
const User &
I) {
3781 unsigned NumValues = ValueVTs.
size();
3782 if (NumValues == 0)
return;
3792 bool IsUnaryAbs =
false;
3793 bool Negate =
false;
3797 Flags.copyFMF(*FPOp);
3799 Flags.setUnpredictable(
3804 EVT VT = ValueVTs[0];
3805 LLVMContext &Ctx = *
DAG.getContext();
3806 auto &TLI =
DAG.getTargetLoweringInfo();
3816 bool UseScalarMinMax = VT.
isVector() &&
3825 switch (SPR.Flavor) {
3831 switch (SPR.NaNBehavior) {
3844 switch (SPR.NaNBehavior) {
3888 for (
unsigned i = 0; i != NumValues; ++i) {
3894 Values[i] =
DAG.getNegative(Values[i], dl, VT);
3897 for (
unsigned i = 0; i != NumValues; ++i) {
3901 Values[i] =
DAG.getNode(
3908 DAG.getVTList(ValueVTs), Values));
3911void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3914 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3918 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3919 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3925void SelectionDAGBuilder::visitZExt(
const User &
I) {
3929 auto &TLI =
DAG.getTargetLoweringInfo();
3934 Flags.setNonNeg(PNI->hasNonNeg());
3939 if (
Flags.hasNonNeg() &&
3948void SelectionDAGBuilder::visitSExt(
const User &
I) {
3952 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3957void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3963 Flags.copyFMF(*TruncInst);
3964 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3967 DAG.getTargetConstant(
3972void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3975 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3979 Flags.copyFMF(*TruncInst);
3983void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3986 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3991void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3994 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3999void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
4002 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4006 Flags.setNonNeg(PNI->hasNonNeg());
4011void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
4014 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4019void SelectionDAGBuilder::visitPtrToAddr(
const User &
I) {
4022 const auto &TLI =
DAG.getTargetLoweringInfo();
4030void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
4034 auto &TLI =
DAG.getTargetLoweringInfo();
4035 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4044void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
4048 auto &TLI =
DAG.getTargetLoweringInfo();
4056void SelectionDAGBuilder::visitBitCast(
const User &
I) {
4059 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4064 if (DestVT !=
N.getValueType())
4072 setValue(&
I,
DAG.getConstant(
C->getValue(), dl, DestVT,
false,
4078void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
4079 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4080 const Value *SV =
I.getOperand(0);
4085 unsigned DestAS =
I.getType()->getPointerAddressSpace();
4087 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4093void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4094 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4101 InVec, InVal, InIdx));
4104void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4105 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4114void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4119 Mask = SVI->getShuffleMask();
4123 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4127 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4132 DAG.getVectorIdxConstant(0,
DL));
4143 unsigned MaskNumElts =
Mask.size();
4145 if (SrcNumElts == MaskNumElts) {
4151 if (SrcNumElts < MaskNumElts) {
4155 if (MaskNumElts % SrcNumElts == 0) {
4159 unsigned NumConcat = MaskNumElts / SrcNumElts;
4160 bool IsConcat =
true;
4161 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4162 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4168 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4169 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4170 ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts))) {
4175 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4182 for (
auto Src : ConcatSrcs) {
4195 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4196 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4212 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4213 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4215 if (Idx >= (
int)SrcNumElts)
4216 Idx -= SrcNumElts - PaddedMaskNumElts;
4224 if (MaskNumElts != PaddedMaskNumElts)
4226 DAG.getVectorIdxConstant(0,
DL));
4232 assert(SrcNumElts > MaskNumElts);
4236 int StartIdx[2] = {-1, -1};
4237 bool CanExtract =
true;
4238 for (
int Idx : Mask) {
4243 if (Idx >= (
int)SrcNumElts) {
4251 int NewStartIdx =
alignDown(Idx, MaskNumElts);
4252 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4253 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4257 StartIdx[Input] = NewStartIdx;
4260 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4266 for (
unsigned Input = 0; Input < 2; ++Input) {
4267 SDValue &Src = Input == 0 ? Src1 : Src2;
4268 if (StartIdx[Input] < 0)
4269 Src =
DAG.getUNDEF(VT);
4272 DAG.getVectorIdxConstant(StartIdx[Input],
DL));
4277 SmallVector<int, 8> MappedOps(Mask);
4278 for (
int &Idx : MappedOps) {
4279 if (Idx >= (
int)SrcNumElts)
4280 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4285 setValue(&
I,
DAG.getVectorShuffle(VT,
DL, Src1, Src2, MappedOps));
4294 for (
int Idx : Mask) {
4298 Res =
DAG.getUNDEF(EltVT);
4300 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4301 if (Idx >= (
int)SrcNumElts) Idx -= SrcNumElts;
4304 DAG.getVectorIdxConstant(Idx,
DL));
4314 ArrayRef<unsigned> Indices =
I.getIndices();
4315 const Value *Op0 =
I.getOperand(0);
4317 Type *AggTy =
I.getType();
4324 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4330 unsigned NumAggValues = AggValueVTs.
size();
4331 unsigned NumValValues = ValValueVTs.
size();
4335 if (!NumAggValues) {
4343 for (; i != LinearIndex; ++i)
4344 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4349 for (; i != LinearIndex + NumValValues; ++i)
4350 Values[i] = FromUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4354 for (; i != NumAggValues; ++i)
4355 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4359 DAG.getVTList(AggValueVTs), Values));
4363 ArrayRef<unsigned> Indices =
I.getIndices();
4364 const Value *Op0 =
I.getOperand(0);
4366 Type *ValTy =
I.getType();
4371 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4375 unsigned NumValValues = ValValueVTs.
size();
4378 if (!NumValValues) {
4387 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4388 Values[i - LinearIndex] =
4394 DAG.getVTList(ValValueVTs), Values));
4397void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4398 Value *Op0 =
I.getOperand(0);
4404 auto &TLI =
DAG.getTargetLoweringInfo();
4409 bool IsVectorGEP =
I.getType()->isVectorTy();
4410 ElementCount VectorElementCount =
4416 const Value *Idx = GTI.getOperand();
4417 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4422 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(
Field);
4432 N =
DAG.getMemBasePlusOffset(
4433 N,
DAG.getConstant(
Offset, dl,
N.getValueType()), dl, Flags);
4439 unsigned IdxSize =
DAG.getDataLayout().getIndexSizeInBits(AS);
4441 TypeSize ElementSize =
4442 GTI.getSequentialElementStride(
DAG.getDataLayout());
4447 bool ElementScalable = ElementSize.
isScalable();
4453 C =
C->getSplatValue();
4456 if (CI && CI->isZero())
4458 if (CI && !ElementScalable) {
4459 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4462 if (
N.getValueType().isVector())
4463 OffsVal =
DAG.getConstant(
4466 OffsVal =
DAG.getConstant(Offs, dl, IdxTy);
4473 Flags.setNoUnsignedWrap(
true);
4476 OffsVal =
DAG.getSExtOrTrunc(OffsVal, dl,
N.getValueType());
4478 N =
DAG.getMemBasePlusOffset(
N, OffsVal, dl, Flags);
4486 if (
N.getValueType().isVector()) {
4488 VectorElementCount);
4489 IdxN =
DAG.getSplat(VT, dl, IdxN);
4493 N =
DAG.getSplat(VT, dl,
N);
4499 IdxN =
DAG.getSExtOrTrunc(IdxN, dl,
N.getValueType());
4501 SDNodeFlags ScaleFlags;
4510 if (ElementScalable) {
4511 EVT VScaleTy =
N.getValueType().getScalarType();
4513 ISD::VSCALE, dl, VScaleTy,
4514 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4515 if (
N.getValueType().isVector())
4516 VScale =
DAG.getSplatVector(
N.getValueType(), dl, VScale);
4517 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, VScale,
4522 if (ElementMul != 1) {
4523 if (ElementMul.isPowerOf2()) {
4524 unsigned Amt = ElementMul.logBase2();
4527 DAG.getShiftAmountConstant(Amt,
N.getValueType(), dl),
4530 SDValue Scale =
DAG.getConstant(ElementMul.getZExtValue(), dl,
4532 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, Scale,
4542 SDNodeFlags AddFlags;
4546 N =
DAG.getMemBasePlusOffset(
N, IdxN, dl, AddFlags);
4550 if (IsVectorGEP && !
N.getValueType().isVector()) {
4552 N =
DAG.getSplat(VT, dl,
N);
4563 N =
DAG.getPtrExtendInReg(
N, dl, PtrMemTy);
4568void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4575 Type *Ty =
I.getAllocatedType();
4576 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4577 auto &
DL =
DAG.getDataLayout();
4578 TypeSize TySize =
DL.getTypeAllocSize(Ty);
4579 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4585 AllocSize =
DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4588 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4589 DAG.getVScale(dl, IntPtr,
4595 AllocSize =
DAG.getNode(
ISD::MUL, dl, IntPtr, AllocSize,
4596 DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4602 Align StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlign();
4603 if (*Alignment <= StackAlign)
4604 Alignment = std::nullopt;
4606 const uint64_t StackAlignMask = StackAlign.
value() - 1U;
4611 DAG.getConstant(StackAlignMask, dl, IntPtr),
4616 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4620 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4622 SDValue DSA =
DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs,
Ops);
4630 return I.getMetadata(LLVMContext::MD_range);
4635 if (std::optional<ConstantRange> CR = CB->getRange())
4639 return std::nullopt;
4644 return CB->getRetNoFPClass();
4648void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4650 return visitAtomicLoad(
I);
4652 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4653 const Value *SV =
I.getOperand(0);
4658 if (Arg->hasSwiftErrorAttr())
4659 return visitLoadFromSwiftError(
I);
4663 if (Alloca->isSwiftError())
4664 return visitLoadFromSwiftError(
I);
4670 Type *Ty =
I.getType();
4674 unsigned NumValues = ValueVTs.
size();
4678 Align Alignment =
I.getAlign();
4679 AAMDNodes AAInfo =
I.getAAMetadata();
4681 bool isVolatile =
I.isVolatile();
4686 bool ConstantMemory =
false;
4693 BatchAA->pointsToConstantMemory(MemoryLocation(
4698 Root =
DAG.getEntryNode();
4699 ConstantMemory =
true;
4703 Root =
DAG.getRoot();
4714 unsigned ChainI = 0;
4715 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4731 MachinePointerInfo PtrInfo =
4733 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4734 : MachinePointerInfo();
4736 SDValue A =
DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4737 SDValue L =
DAG.getLoad(MemVTs[i], dl, Root,
A, PtrInfo, Alignment,
4738 MMOFlags, AAInfo, Ranges);
4739 Chains[ChainI] =
L.getValue(1);
4741 if (MemVTs[i] != ValueVTs[i])
4742 L =
DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4747 if (!ConstantMemory) {
4757 DAG.getVTList(ValueVTs), Values));
4760void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4761 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4762 "call visitStoreToSwiftError when backend supports swifterror");
4765 SmallVector<uint64_t, 4>
Offsets;
4766 const Value *SrcV =
I.getOperand(0);
4768 SrcV->
getType(), ValueVTs,
nullptr, &Offsets, 0);
4769 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4770 "expect a single EVT for swifterror");
4779 SDValue(Src.getNode(), Src.getResNo()));
4780 DAG.setRoot(CopyNode);
4783void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4784 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4785 "call visitLoadFromSwiftError when backend supports swifterror");
4788 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4789 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4790 "Support volatile, non temporal, invariant for load_from_swift_error");
4792 const Value *SV =
I.getOperand(0);
4793 Type *Ty =
I.getType();
4796 !
BatchAA->pointsToConstantMemory(MemoryLocation(
4798 I.getAAMetadata()))) &&
4799 "load_from_swift_error should not be constant memory");
4802 SmallVector<uint64_t, 4>
Offsets;
4804 ValueVTs,
nullptr, &Offsets, 0);
4805 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4806 "expect a single EVT for swifterror");
4816void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4818 return visitAtomicStore(
I);
4820 const Value *SrcV =
I.getOperand(0);
4821 const Value *PtrV =
I.getOperand(1);
4823 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4828 if (Arg->hasSwiftErrorAttr())
4829 return visitStoreToSwiftError(
I);
4833 if (Alloca->isSwiftError())
4834 return visitStoreToSwiftError(
I);
4841 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4842 unsigned NumValues = ValueVTs.
size();
4855 Align Alignment =
I.getAlign();
4856 AAMDNodes AAInfo =
I.getAAMetadata();
4860 unsigned ChainI = 0;
4861 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4871 MachinePointerInfo PtrInfo =
4873 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4874 : MachinePointerInfo();
4878 if (MemVTs[i] != ValueVTs[i])
4879 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4881 DAG.getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4882 Chains[ChainI] = St;
4888 DAG.setRoot(StoreNode);
4891void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4892 bool IsCompressing) {
4895 Value *Src0Operand =
I.getArgOperand(0);
4896 Value *PtrOperand =
I.getArgOperand(1);
4897 Value *MaskOperand =
I.getArgOperand(2);
4898 Align Alignment =
I.getParamAlign(1).valueOrOne();
4908 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4911 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4912 MachinePointerInfo(PtrOperand), MMOFlags,
4915 const auto &TLI =
DAG.getTargetLoweringInfo();
4918 !IsCompressing &&
TTI->hasConditionalLoadStoreForType(
4919 I.getArgOperand(0)->getType(),
true)
4925 DAG.setRoot(StoreNode);
4955 C =
C->getSplatValue();
4969 if (!
GEP ||
GEP->getParent() != CurBB)
4972 if (
GEP->getNumOperands() != 2)
4975 const Value *BasePtr =
GEP->getPointerOperand();
4976 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
4982 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
4987 if (ScaleVal != 1 &&
4999void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
5003 const Value *Ptr =
I.getArgOperand(1);
5007 Align Alignment =
I.getParamAlign(1).valueOrOne();
5008 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5017 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5027 EVT IdxVT =
Index.getValueType();
5035 SDValue Scatter =
DAG.getMaskedScatter(
DAG.getVTList(MVT::Other), VT, sdl,
5037 DAG.setRoot(Scatter);
5041void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
5044 Value *PtrOperand =
I.getArgOperand(0);
5045 Value *MaskOperand =
I.getArgOperand(1);
5046 Value *Src0Operand =
I.getArgOperand(2);
5047 Align Alignment =
I.getParamAlign(0).valueOrOne();
5055 AAMDNodes AAInfo =
I.getAAMetadata();
5062 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
5065 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5067 if (
I.hasMetadata(LLVMContext::MD_invariant_load))
5070 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5071 MachinePointerInfo(PtrOperand), MMOFlags,
5074 const auto &TLI =
DAG.getTargetLoweringInfo();
5081 TTI->hasConditionalLoadStoreForType(Src0Operand->
getType(),
5086 DAG.getMaskedLoad(VT, sdl, InChain, Ptr,
Offset, Mask, Src0, VT, MMO,
5093void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5097 const Value *Ptr =
I.getArgOperand(0);
5101 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5103 Align Alignment =
I.getParamAlign(0).valueOrOne();
5114 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5126 EVT IdxVT =
Index.getValueType();
5135 DAG.getMaskedGather(
DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO,
5151 SDVTList VTs =
DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5153 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5156 MachineFunction &MF =
DAG.getMachineFunction();
5158 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5159 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, SuccessOrdering,
5162 SDValue L =
DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
5163 dl, MemVT, VTs, InChain,
5171 DAG.setRoot(OutChain);
5174void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5177 switch (
I.getOperation()) {
5195 NT = ISD::ATOMIC_LOAD_FMAXIMUM;
5198 NT = ISD::ATOMIC_LOAD_FMINIMUM;
5201 NT = ISD::ATOMIC_LOAD_UINC_WRAP;
5204 NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
5207 NT = ISD::ATOMIC_LOAD_USUB_COND;
5210 NT = ISD::ATOMIC_LOAD_USUB_SAT;
5219 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5222 MachineFunction &MF =
DAG.getMachineFunction();
5224 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5225 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, Ordering);
5228 DAG.getAtomic(NT, dl, MemVT, InChain,
5235 DAG.setRoot(OutChain);
5238void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5240 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5243 Ops[1] =
DAG.getTargetConstant((
unsigned)
I.getOrdering(), dl,
5245 Ops[2] =
DAG.getTargetConstant(
I.getSyncScopeID(), dl,
5252void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5259 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5270 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5271 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5272 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5282 L =
DAG.getPtrExtOrTrunc(L, dl, VT);
5285 DAG.setRoot(OutChain);
5288void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5296 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5306 MachineFunction &MF =
DAG.getMachineFunction();
5308 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5309 I.getAlign(), AAMDNodes(),
nullptr, SSID, Ordering);
5313 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5317 DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
5320 DAG.setRoot(OutChain);
5328std::pair<bool, bool>
5329SelectionDAGBuilder::getTargetIntrinsicCallProperties(
const CallBase &
I) {
5331 bool HasChain = !
F->doesNotAccessMemory();
5333 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5335 return {HasChain, OnlyLoad};
5339 const CallBase &
I,
bool HasChain,
bool OnlyLoad,
5341 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5348 Ops.push_back(
DAG.getRoot());
5361 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5362 const Value *Arg =
I.getArgOperand(i);
5363 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5371 assert(CI->getBitWidth() <= 64 &&
5372 "large intrinsic immediates not handled");
5373 Ops.push_back(
DAG.getTargetConstant(*CI, SDLoc(), VT));
5380 if (std::optional<OperandBundleUse> Bundle =
5382 auto *Sym = Bundle->Inputs[0].get();
5385 Ops.push_back(SDSym);
5388 if (std::optional<OperandBundleUse> Bundle =
5390 Value *Token = Bundle->Inputs[0].get();
5392 assert(
Ops.back().getValueType() != MVT::Glue &&
5393 "Did not expect another glue node here.");
5395 DAG.getNode(ISD::CONVERGENCECTRL_GLUE, {}, MVT::Glue, ConvControlToken);
5396 Ops.push_back(ConvControlToken);
5404 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5412 return DAG.getVTList(ValueVTs);
5416SDValue SelectionDAGBuilder::getTargetNonMemIntrinsicNode(
5439 if (
I.getType()->isVoidTy())
5454void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5456 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(
I);
5459 TargetLowering::IntrinsicInfo
Info;
5460 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5461 bool IsTgtMemIntrinsic =
5465 I, HasChain, OnlyLoad, IsTgtMemIntrinsic ? &
Info :
nullptr);
5466 SDVTList VTs = getTargetIntrinsicVTList(
I, HasChain);
5471 Flags.copyFMF(*FPMO);
5472 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
5479 if (IsTgtMemIntrinsic) {
5484 MachinePointerInfo MPI;
5486 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
5487 else if (
Info.fallbackAddressSpace)
5488 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
5489 EVT MemVT =
Info.memVT;
5491 if (
Size.hasValue() && !
Size.getValue())
5493 Align Alignment =
Info.align.value_or(
DAG.getEVTAlign(MemVT));
5494 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5495 MPI,
Info.flags,
Size, Alignment,
I.getAAMetadata(),
nullptr,
5500 Result = getTargetNonMemIntrinsicNode(*
I.getType(), HasChain,
Ops, VTs);
5503 Result = handleTargetIntrinsicRet(
I, HasChain, OnlyLoad, Result);
5519 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32, t2);
5560 SDValue TwoToFractionalPartOfX;
5628 SDValue t13 = DAG.
getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5629 return DAG.
getNode(ISD::BITCAST, dl, MVT::f32,
5637 if (
Op.getValueType() == MVT::f32 &&
5652 return DAG.
getNode(ISD::FEXP, dl,
Op.getValueType(),
Op, Flags);
5661 if (
Op.getValueType() == MVT::f32 &&
5751 return DAG.
getNode(ISD::FLOG, dl,
Op.getValueType(),
Op, Flags);
5760 if (
Op.getValueType() == MVT::f32 &&
5844 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5848 return DAG.
getNode(ISD::FLOG2, dl,
Op.getValueType(),
Op, Flags);
5857 if (
Op.getValueType() == MVT::f32 &&
5934 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5938 return DAG.
getNode(ISD::FLOG10, dl,
Op.getValueType(),
Op, Flags);
5945 if (
Op.getValueType() == MVT::f32 &&
5950 return DAG.
getNode(ISD::FEXP2, dl,
Op.getValueType(),
Op, Flags);
5958 bool IsExp10 =
false;
5959 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5963 IsExp10 = LHSC->isExactlyValue(Ten);
5990 unsigned Val = RHSC->getSExtValue();
6019 CurSquare, CurSquare);
6024 if (RHSC->getSExtValue() < 0)
6038 EVT VT =
LHS.getValueType();
6061 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
6065 Opcode, VT, ScaleInt);
6100 switch (
N.getOpcode()) {
6104 Op.getValueType().getSizeInBits());
6129bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6136 MachineFunction &MF =
DAG.getMachineFunction();
6137 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
6141 auto MakeVRegDbgValue = [&](
Register Reg, DIExpression *FragExpr,
6146 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6153 auto *NewDIExpr = FragExpr;
6160 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6163 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6164 return BuildMI(MF,
DL, Inst, Indirect,
Reg, Variable, FragExpr);
6168 if (Kind == FuncArgumentDbgValueKind::Value) {
6173 if (!IsInEntryBlock)
6189 bool VariableIsFunctionInputArg =
Variable->isParameter() &&
6190 !
DL->getInlinedAt();
6192 if (!IsInPrologue && !VariableIsFunctionInputArg)
6226 if (VariableIsFunctionInputArg) {
6228 if (ArgNo >=
FuncInfo.DescribedArgs.size())
6229 FuncInfo.DescribedArgs.resize(ArgNo + 1,
false);
6230 else if (!IsInPrologue &&
FuncInfo.DescribedArgs.test(ArgNo))
6231 return !NodeMap[
V].getNode();
6236 bool IsIndirect =
false;
6237 std::optional<MachineOperand>
Op;
6239 int FI =
FuncInfo.getArgumentFrameIndex(Arg);
6240 if (FI != std::numeric_limits<int>::max())
6244 if (!
Op &&
N.getNode()) {
6247 if (ArgRegsAndSizes.
size() == 1)
6248 Reg = ArgRegsAndSizes.
front().first;
6251 MachineRegisterInfo &RegInfo = MF.
getRegInfo();
6258 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6262 if (!
Op &&
N.getNode()) {
6266 if (FrameIndexSDNode *FINode =
6276 for (
const auto &RegAndSize : SplitRegs) {
6280 int RegFragmentSizeInBits = RegAndSize.second;
6282 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6285 if (
Offset >= ExprFragmentSizeInBits)
6289 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6290 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6295 Expr,
Offset, RegFragmentSizeInBits);
6296 Offset += RegAndSize.second;
6299 if (!FragmentExpr) {
6300 SDDbgValue *SDV =
DAG.getConstantDbgValue(
6302 DAG.AddDbgValue(SDV,
false);
6305 MachineInstr *NewMI =
6306 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6307 Kind != FuncArgumentDbgValueKind::Value);
6308 FuncInfo.ArgDbgValues.push_back(NewMI);
6315 if (VMI !=
FuncInfo.ValueMap.end()) {
6316 const auto &TLI =
DAG.getTargetLoweringInfo();
6317 RegsForValue RFV(
V->getContext(), TLI,
DAG.getDataLayout(), VMI->second,
6318 V->getType(), std::nullopt);
6319 if (RFV.occupiesMultipleRegs()) {
6320 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6325 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6326 }
else if (ArgRegsAndSizes.
size() > 1) {
6329 splitMultiRegDbgValue(ArgRegsAndSizes);
6338 "Expected inlined-at fields to agree");
6339 MachineInstr *NewMI =
nullptr;
6342 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6344 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6348 FuncInfo.ArgDbgValues.push_back(NewMI);
6357 unsigned DbgSDNodeOrder) {
6369 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6370 false, dl, DbgSDNodeOrder);
6372 return DAG.getDbgValue(Variable, Expr,
N.getNode(),
N.getResNo(),
6373 false, dl, DbgSDNodeOrder);
6378 case Intrinsic::smul_fix:
6380 case Intrinsic::umul_fix:
6382 case Intrinsic::smul_fix_sat:
6384 case Intrinsic::umul_fix_sat:
6386 case Intrinsic::sdiv_fix:
6388 case Intrinsic::udiv_fix:
6390 case Intrinsic::sdiv_fix_sat:
6392 case Intrinsic::udiv_fix_sat:
6405 "expected call_preallocated_setup Value");
6406 for (
const auto *U : PreallocatedSetup->
users()) {
6408 const Function *Fn = UseCall->getCalledFunction();
6409 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6419bool SelectionDAGBuilder::visitEntryValueDbgValue(
6429 auto ArgIt =
FuncInfo.ValueMap.find(Arg);
6430 if (ArgIt ==
FuncInfo.ValueMap.end()) {
6432 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6433 "couldn't find an associated register for the Argument\n");
6436 Register ArgVReg = ArgIt->getSecond();
6438 for (
auto [PhysReg, VirtReg] :
FuncInfo.RegInfo->liveins())
6439 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6440 SDDbgValue *SDV =
DAG.getVRegDbgValue(
6441 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6442 DAG.AddDbgValue(SDV,
false );
6445 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6446 "couldn't find a physical register\n");
6451void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6454 switch (Intrinsic) {
6455 case Intrinsic::experimental_convergence_anchor:
6456 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ANCHOR, sdl, MVT::Untyped));
6458 case Intrinsic::experimental_convergence_entry:
6459 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_ENTRY, sdl, MVT::Untyped));
6461 case Intrinsic::experimental_convergence_loop: {
6463 auto *Token = Bundle->Inputs[0].get();
6464 setValue(&
I,
DAG.getNode(ISD::CONVERGENCECTRL_LOOP, sdl, MVT::Untyped,
6471void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6472 unsigned IntrinsicID) {
6475 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6476 "Tried to lower unsupported histogram type");
6482 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6483 DataLayout TargetDL =
DAG.getDataLayout();
6485 Align Alignment =
DAG.getEVTAlign(VT);
6498 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
6499 MachinePointerInfo(AS),
6510 EVT IdxVT =
Index.getValueType();
6517 SDValue ID =
DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6520 SDValue Histogram =
DAG.getMaskedHistogram(
DAG.getVTList(MVT::Other), VT, sdl,
6524 DAG.setRoot(Histogram);
6527void SelectionDAGBuilder::visitVectorExtractLastActive(
const CallInst &
I,
6529 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6530 "Tried lowering invalid vector extract last");
6532 const DataLayout &Layout =
DAG.getDataLayout();
6536 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6540 SDValue Idx =
DAG.getNode(ISD::VECTOR_FIND_LAST_ACTIVE, sdl, ExtVT, Mask);
6546 EVT BoolVT =
Mask.getValueType().getScalarType();
6547 SDValue AnyActive =
DAG.getNode(ISD::VECREDUCE_OR, sdl, BoolVT, Mask);
6548 Result =
DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6555void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6557 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6564 Flags.copyFMF(*FPOp);
6566 switch (Intrinsic) {
6569 visitTargetIntrinsic(
I, Intrinsic);
6571 case Intrinsic::vscale: {
6576 case Intrinsic::vastart: visitVAStart(
I);
return;
6577 case Intrinsic::vaend: visitVAEnd(
I);
return;
6578 case Intrinsic::vacopy: visitVACopy(
I);
return;
6579 case Intrinsic::returnaddress:
6584 case Intrinsic::addressofreturnaddress:
6589 case Intrinsic::sponentry:
6594 case Intrinsic::frameaddress:
6599 case Intrinsic::read_volatile_register:
6600 case Intrinsic::read_register: {
6601 Value *
Reg =
I.getArgOperand(0);
6607 DAG.getVTList(VT, MVT::Other), Chain,
RegName);
6612 case Intrinsic::write_register: {
6613 Value *
Reg =
I.getArgOperand(0);
6614 Value *RegValue =
I.getArgOperand(1);
6622 case Intrinsic::memcpy:
6623 case Intrinsic::memcpy_inline: {
6629 "memcpy_inline needs constant size");
6631 Align DstAlign = MCI.getDestAlign().valueOrOne();
6632 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6633 Align Alignment = std::min(DstAlign, SrcAlign);
6634 bool isVol = MCI.isVolatile();
6638 SDValue MC =
DAG.getMemcpy(Root, sdl, Dst, Src,
Size, Alignment, isVol,
6639 MCI.isForceInlined(), &
I, std::nullopt,
6640 MachinePointerInfo(
I.getArgOperand(0)),
6641 MachinePointerInfo(
I.getArgOperand(1)),
6643 updateDAGForMaybeTailCall(MC);
6646 case Intrinsic::memset:
6647 case Intrinsic::memset_inline: {
6653 "memset_inline needs constant size");
6655 Align DstAlign = MSII.getDestAlign().valueOrOne();
6656 bool isVol = MSII.isVolatile();
6659 Root, sdl, Dst, Value,
Size, DstAlign, isVol, MSII.isForceInlined(),
6660 &
I, MachinePointerInfo(
I.getArgOperand(0)),
I.getAAMetadata());
6661 updateDAGForMaybeTailCall(MC);
6664 case Intrinsic::memmove: {
6670 Align DstAlign = MMI.getDestAlign().valueOrOne();
6671 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6672 Align Alignment = std::min(DstAlign, SrcAlign);
6673 bool isVol = MMI.isVolatile();
6677 SDValue MM =
DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &
I,
6679 MachinePointerInfo(
I.getArgOperand(0)),
6680 MachinePointerInfo(
I.getArgOperand(1)),
6682 updateDAGForMaybeTailCall(MM);
6685 case Intrinsic::memcpy_element_unordered_atomic: {
6691 Type *LengthTy =
MI.getLength()->getType();
6692 unsigned ElemSz =
MI.getElementSizeInBytes();
6696 isTC, MachinePointerInfo(
MI.getRawDest()),
6697 MachinePointerInfo(
MI.getRawSource()));
6698 updateDAGForMaybeTailCall(MC);
6701 case Intrinsic::memmove_element_unordered_atomic: {
6707 Type *LengthTy =
MI.getLength()->getType();
6708 unsigned ElemSz =
MI.getElementSizeInBytes();
6712 isTC, MachinePointerInfo(
MI.getRawDest()),
6713 MachinePointerInfo(
MI.getRawSource()));
6714 updateDAGForMaybeTailCall(MC);
6717 case Intrinsic::memset_element_unordered_atomic: {
6723 Type *LengthTy =
MI.getLength()->getType();
6724 unsigned ElemSz =
MI.getElementSizeInBytes();
6728 isTC, MachinePointerInfo(
MI.getRawDest()));
6729 updateDAGForMaybeTailCall(MC);
6732 case Intrinsic::call_preallocated_setup: {
6734 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6735 SDValue Res =
DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6741 case Intrinsic::call_preallocated_arg: {
6743 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6750 ISD::PREALLOCATED_ARG, sdl,
6757 case Intrinsic::eh_typeid_for: {
6760 unsigned TypeID =
DAG.getMachineFunction().getTypeIDFor(GV);
6761 Res =
DAG.getConstant(
TypeID, sdl, MVT::i32);
6766 case Intrinsic::eh_return_i32:
6767 case Intrinsic::eh_return_i64:
6768 DAG.getMachineFunction().setCallsEHReturn(
true);
6775 case Intrinsic::eh_unwind_init:
6776 DAG.getMachineFunction().setCallsUnwindInit(
true);
6778 case Intrinsic::eh_dwarf_cfa:
6783 case Intrinsic::eh_sjlj_callsite: {
6785 assert(
FuncInfo.getCurrentCallSite() == 0 &&
"Overlapping call sites!");
6790 case Intrinsic::eh_sjlj_functioncontext: {
6792 MachineFrameInfo &MFI =
DAG.getMachineFunction().getFrameInfo();
6795 int FI =
FuncInfo.StaticAllocaMap[FnCtx];
6799 case Intrinsic::eh_sjlj_setjmp: {
6804 DAG.getVTList(MVT::i32, MVT::Other),
Ops);
6806 DAG.setRoot(
Op.getValue(1));
6809 case Intrinsic::eh_sjlj_longjmp:
6813 case Intrinsic::eh_sjlj_setup_dispatch:
6817 case Intrinsic::masked_gather:
6818 visitMaskedGather(
I);
6820 case Intrinsic::masked_load:
6823 case Intrinsic::masked_scatter:
6824 visitMaskedScatter(
I);
6826 case Intrinsic::masked_store:
6827 visitMaskedStore(
I);
6829 case Intrinsic::masked_expandload:
6830 visitMaskedLoad(
I,
true );
6832 case Intrinsic::masked_compressstore:
6833 visitMaskedStore(
I,
true );
6835 case Intrinsic::powi:
6839 case Intrinsic::log:
6842 case Intrinsic::log2:
6846 case Intrinsic::log10:
6850 case Intrinsic::exp:
6853 case Intrinsic::exp2:
6857 case Intrinsic::pow:
6861 case Intrinsic::sqrt:
6862 case Intrinsic::fabs:
6863 case Intrinsic::sin:
6864 case Intrinsic::cos:
6865 case Intrinsic::tan:
6866 case Intrinsic::asin:
6867 case Intrinsic::acos:
6868 case Intrinsic::atan:
6869 case Intrinsic::sinh:
6870 case Intrinsic::cosh:
6871 case Intrinsic::tanh:
6872 case Intrinsic::exp10:
6873 case Intrinsic::floor:
6874 case Intrinsic::ceil:
6875 case Intrinsic::trunc:
6876 case Intrinsic::rint:
6877 case Intrinsic::nearbyint:
6878 case Intrinsic::round:
6879 case Intrinsic::roundeven:
6880 case Intrinsic::canonicalize: {
6883 switch (Intrinsic) {
6885 case Intrinsic::sqrt: Opcode = ISD::FSQRT;
break;
6886 case Intrinsic::fabs: Opcode = ISD::FABS;
break;
6887 case Intrinsic::sin: Opcode = ISD::FSIN;
break;
6888 case Intrinsic::cos: Opcode = ISD::FCOS;
break;
6889 case Intrinsic::tan: Opcode = ISD::FTAN;
break;
6890 case Intrinsic::asin: Opcode = ISD::FASIN;
break;
6891 case Intrinsic::acos: Opcode = ISD::FACOS;
break;
6892 case Intrinsic::atan: Opcode = ISD::FATAN;
break;
6893 case Intrinsic::sinh: Opcode = ISD::FSINH;
break;
6894 case Intrinsic::cosh: Opcode = ISD::FCOSH;
break;
6895 case Intrinsic::tanh: Opcode = ISD::FTANH;
break;
6896 case Intrinsic::exp10: Opcode = ISD::FEXP10;
break;
6897 case Intrinsic::floor: Opcode = ISD::FFLOOR;
break;
6898 case Intrinsic::ceil: Opcode = ISD::FCEIL;
break;
6899 case Intrinsic::trunc: Opcode = ISD::FTRUNC;
break;
6900 case Intrinsic::rint: Opcode = ISD::FRINT;
break;
6901 case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT;
break;
6902 case Intrinsic::round: Opcode = ISD::FROUND;
break;
6903 case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN;
break;
6909 getValue(
I.getArgOperand(0)).getValueType(),
6913 case Intrinsic::atan2:
6915 getValue(
I.getArgOperand(0)).getValueType(),
6919 case Intrinsic::lround:
6920 case Intrinsic::llround:
6921 case Intrinsic::lrint:
6922 case Intrinsic::llrint: {
6925 switch (Intrinsic) {
6927 case Intrinsic::lround: Opcode = ISD::LROUND;
break;
6928 case Intrinsic::llround: Opcode = ISD::LLROUND;
break;
6929 case Intrinsic::lrint: Opcode = ISD::LRINT;
break;
6930 case Intrinsic::llrint: Opcode = ISD::LLRINT;
break;
6939 case Intrinsic::minnum:
6941 getValue(
I.getArgOperand(0)).getValueType(),
6945 case Intrinsic::maxnum:
6947 getValue(
I.getArgOperand(0)).getValueType(),
6951 case Intrinsic::minimum:
6953 getValue(
I.getArgOperand(0)).getValueType(),
6957 case Intrinsic::maximum:
6959 getValue(
I.getArgOperand(0)).getValueType(),
6963 case Intrinsic::minimumnum:
6965 getValue(
I.getArgOperand(0)).getValueType(),
6969 case Intrinsic::maximumnum:
6971 getValue(
I.getArgOperand(0)).getValueType(),
6975 case Intrinsic::copysign:
6977 getValue(
I.getArgOperand(0)).getValueType(),
6981 case Intrinsic::ldexp:
6983 getValue(
I.getArgOperand(0)).getValueType(),
6987 case Intrinsic::modf:
6988 case Intrinsic::sincos:
6989 case Intrinsic::sincospi:
6990 case Intrinsic::frexp: {
6992 switch (Intrinsic) {
6995 case Intrinsic::sincos:
6996 Opcode = ISD::FSINCOS;
6998 case Intrinsic::sincospi:
6999 Opcode = ISD::FSINCOSPI;
7001 case Intrinsic::modf:
7002 Opcode = ISD::FMODF;
7004 case Intrinsic::frexp:
7005 Opcode = ISD::FFREXP;
7010 SDVTList VTs =
DAG.getVTList(ValueVTs);
7012 &
I,
DAG.getNode(Opcode, sdl, VTs,
getValue(
I.getArgOperand(0)), Flags));
7015 case Intrinsic::arithmetic_fence: {
7017 getValue(
I.getArgOperand(0)).getValueType(),
7021 case Intrinsic::fma:
7027#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
7028 case Intrinsic::INTRINSIC:
7029#include "llvm/IR/ConstrainedOps.def"
7032#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
7033#include "llvm/IR/VPIntrinsics.def"
7036 case Intrinsic::fptrunc_round: {
7040 std::optional<RoundingMode> RoundMode =
7048 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
7053 DAG.getTargetConstant((
int)*RoundMode, sdl, MVT::i32));
7058 case Intrinsic::fmuladd: {
7063 getValue(
I.getArgOperand(0)).getValueType(),
7070 getValue(
I.getArgOperand(0)).getValueType(),
7086 case Intrinsic::convert_to_fp16:
7090 DAG.getTargetConstant(0, sdl,
7093 case Intrinsic::convert_from_fp16:
7096 DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
7099 case Intrinsic::fptosi_sat: {
7106 case Intrinsic::fptoui_sat: {
7113 case Intrinsic::set_rounding:
7114 Res =
DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
7119 case Intrinsic::is_fpclass: {
7120 const DataLayout DLayout =
DAG.getDataLayout();
7122 EVT ArgVT = TLI.
getValueType(DLayout,
I.getArgOperand(0)->getType());
7125 MachineFunction &MF =
DAG.getMachineFunction();
7129 Flags.setNoFPExcept(
7130 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7146 case Intrinsic::get_fpenv: {
7147 const DataLayout DLayout =
DAG.getDataLayout();
7149 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7155 ISD::GET_FPENV, sdl,
7164 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7167 Chain =
DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7168 Res =
DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7174 case Intrinsic::set_fpenv: {
7175 const DataLayout DLayout =
DAG.getDataLayout();
7178 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7183 Chain =
DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
7191 Chain =
DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7193 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7196 Chain =
DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7201 case Intrinsic::reset_fpenv:
7202 DAG.setRoot(
DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other,
getRoot()));
7204 case Intrinsic::get_fpmode:
7206 ISD::GET_FPMODE, sdl,
7213 case Intrinsic::set_fpmode:
7214 Res =
DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {
DAG.getRoot()},
7218 case Intrinsic::reset_fpmode: {
7219 Res =
DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other,
getRoot());
7223 case Intrinsic::pcmarker: {
7225 DAG.setRoot(
DAG.getNode(ISD::PCMARKER, sdl, MVT::Other,
getRoot(), Tmp));
7228 case Intrinsic::readcyclecounter: {
7230 Res =
DAG.getNode(ISD::READCYCLECOUNTER, sdl,
7231 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7236 case Intrinsic::readsteadycounter: {
7238 Res =
DAG.getNode(ISD::READSTEADYCOUNTER, sdl,
7239 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7244 case Intrinsic::bitreverse:
7246 getValue(
I.getArgOperand(0)).getValueType(),
7249 case Intrinsic::bswap:
7251 getValue(
I.getArgOperand(0)).getValueType(),
7254 case Intrinsic::cttz: {
7262 case Intrinsic::ctlz: {
7270 case Intrinsic::ctpop: {
7276 case Intrinsic::fshl:
7277 case Intrinsic::fshr: {
7278 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7282 EVT VT =
X.getValueType();
7293 case Intrinsic::sadd_sat: {
7299 case Intrinsic::uadd_sat: {
7305 case Intrinsic::ssub_sat: {
7311 case Intrinsic::usub_sat: {
7317 case Intrinsic::sshl_sat: {
7323 case Intrinsic::ushl_sat: {
7329 case Intrinsic::smul_fix:
7330 case Intrinsic::umul_fix:
7331 case Intrinsic::smul_fix_sat:
7332 case Intrinsic::umul_fix_sat: {
7340 case Intrinsic::sdiv_fix:
7341 case Intrinsic::udiv_fix:
7342 case Intrinsic::sdiv_fix_sat:
7343 case Intrinsic::udiv_fix_sat: {
7348 Op1, Op2, Op3,
DAG, TLI));
7351 case Intrinsic::smax: {
7357 case Intrinsic::smin: {
7363 case Intrinsic::umax: {
7369 case Intrinsic::umin: {
7375 case Intrinsic::abs: {
7381 case Intrinsic::scmp: {
7388 case Intrinsic::ucmp: {
7395 case Intrinsic::stacksave: {
7398 Res =
DAG.getNode(ISD::STACKSAVE, sdl,
DAG.getVTList(VT, MVT::Other),
Op);
7403 case Intrinsic::stackrestore:
7405 DAG.setRoot(
DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other,
getRoot(), Res));
7407 case Intrinsic::get_dynamic_area_offset: {
7410 Res =
DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl,
DAG.getVTList(ResTy),
7416 case Intrinsic::stackguard: {
7417 MachineFunction &MF =
DAG.getMachineFunction();
7423 Res =
DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7427 LLVMContext &Ctx = *
DAG.getContext();
7428 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
7435 MachinePointerInfo(
Global, 0), Align,
7444 case Intrinsic::stackprotector: {
7446 MachineFunction &MF =
DAG.getMachineFunction();
7466 Chain, sdl, Src, FIN,
7473 case Intrinsic::objectsize:
7476 case Intrinsic::is_constant:
7479 case Intrinsic::annotation:
7480 case Intrinsic::ptr_annotation:
7481 case Intrinsic::launder_invariant_group:
7482 case Intrinsic::strip_invariant_group:
7487 case Intrinsic::type_test:
7488 case Intrinsic::public_type_test:
7492 case Intrinsic::assume:
7493 case Intrinsic::experimental_noalias_scope_decl:
7494 case Intrinsic::var_annotation:
7495 case Intrinsic::sideeffect:
7500 case Intrinsic::codeview_annotation: {
7502 MachineFunction &MF =
DAG.getMachineFunction();
7506 Res =
DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl,
getRoot(), Label);
7511 case Intrinsic::init_trampoline: {
7519 Ops[4] =
DAG.getSrcValue(
I.getArgOperand(0));
7522 Res =
DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other,
Ops);
7527 case Intrinsic::adjust_trampoline:
7532 case Intrinsic::gcroot: {
7533 assert(
DAG.getMachineFunction().getFunction().hasGC() &&
7534 "only valid in functions with gc specified, enforced by Verifier");
7536 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7543 case Intrinsic::gcread:
7544 case Intrinsic::gcwrite:
7546 case Intrinsic::get_rounding:
7552 case Intrinsic::expect:
7553 case Intrinsic::expect_with_probability:
7559 case Intrinsic::ubsantrap:
7560 case Intrinsic::debugtrap:
7561 case Intrinsic::trap: {
7562 StringRef TrapFuncName =
7563 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7564 if (TrapFuncName.
empty()) {
7565 switch (Intrinsic) {
7566 case Intrinsic::trap:
7567 DAG.setRoot(
DAG.getNode(ISD::TRAP, sdl, MVT::Other,
getRoot()));
7569 case Intrinsic::debugtrap:
7570 DAG.setRoot(
DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other,
getRoot()));
7572 case Intrinsic::ubsantrap:
7574 ISD::UBSANTRAP, sdl, MVT::Other,
getRoot(),
7575 DAG.getTargetConstant(
7581 DAG.addNoMergeSiteInfo(
DAG.getRoot().getNode(),
7582 I.hasFnAttr(Attribute::NoMerge));
7586 if (Intrinsic == Intrinsic::ubsantrap) {
7587 Value *Arg =
I.getArgOperand(0);
7591 TargetLowering::CallLoweringInfo CLI(
DAG);
7592 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7594 DAG.getExternalSymbol(TrapFuncName.
data(),
7597 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7603 case Intrinsic::allow_runtime_check:
7604 case Intrinsic::allow_ubsan_check:
7608 case Intrinsic::uadd_with_overflow:
7609 case Intrinsic::sadd_with_overflow:
7610 case Intrinsic::usub_with_overflow:
7611 case Intrinsic::ssub_with_overflow:
7612 case Intrinsic::umul_with_overflow:
7613 case Intrinsic::smul_with_overflow: {
7615 switch (Intrinsic) {
7617 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7618 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7619 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7620 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7621 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7622 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7628 EVT OverflowVT = MVT::i1;
7633 SDVTList VTs =
DAG.getVTList(ResultVT, OverflowVT);
7637 case Intrinsic::prefetch: {
7650 ISD::PREFETCH, sdl,
DAG.getVTList(MVT::Other),
Ops,
7652 std::nullopt, Flags);
7658 DAG.setRoot(Result);
7661 case Intrinsic::lifetime_start:
7662 case Intrinsic::lifetime_end: {
7663 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7669 if (!LifetimeObject)
7674 auto SI =
FuncInfo.StaticAllocaMap.find(LifetimeObject);
7675 if (SI ==
FuncInfo.StaticAllocaMap.end())
7679 Res =
DAG.getLifetimeNode(IsStart, sdl,
getRoot(), FrameIndex);
7683 case Intrinsic::pseudoprobe: {
7691 case Intrinsic::invariant_start:
7696 case Intrinsic::invariant_end:
7699 case Intrinsic::clear_cache: {
7704 {InputChain, StartVal, EndVal});
7709 case Intrinsic::donothing:
7710 case Intrinsic::seh_try_begin:
7711 case Intrinsic::seh_scope_begin:
7712 case Intrinsic::seh_try_end:
7713 case Intrinsic::seh_scope_end:
7716 case Intrinsic::experimental_stackmap:
7719 case Intrinsic::experimental_patchpoint_void:
7720 case Intrinsic::experimental_patchpoint:
7723 case Intrinsic::experimental_gc_statepoint:
7726 case Intrinsic::experimental_gc_result:
7729 case Intrinsic::experimental_gc_relocate:
7732 case Intrinsic::instrprof_cover:
7734 case Intrinsic::instrprof_increment:
7736 case Intrinsic::instrprof_timestamp:
7738 case Intrinsic::instrprof_value_profile:
7740 case Intrinsic::instrprof_mcdc_parameters:
7742 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7744 case Intrinsic::localescape: {
7745 MachineFunction &MF =
DAG.getMachineFunction();
7746 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
7750 for (
unsigned Idx = 0,
E =
I.arg_size(); Idx <
E; ++Idx) {
7756 "can only escape static allocas");
7761 TII->get(TargetOpcode::LOCAL_ESCAPE))
7769 case Intrinsic::localrecover: {
7771 MachineFunction &MF =
DAG.getMachineFunction();
7777 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7781 Value *
FP =
I.getArgOperand(1);
7787 SDValue OffsetSym =
DAG.getMCSymbol(FrameAllocSym, PtrVT);
7792 SDValue Add =
DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7798 case Intrinsic::fake_use: {
7799 Value *
V =
I.getArgOperand(0);
7804 auto FakeUseValue = [&]() ->
SDValue {
7818 if (!FakeUseValue || FakeUseValue.isUndef())
7821 Ops[1] = FakeUseValue;
7826 DAG.setRoot(
DAG.getNode(ISD::FAKE_USE, sdl, MVT::Other,
Ops));
7830 case Intrinsic::reloc_none: {
7835 DAG.getTargetExternalSymbol(
7837 DAG.setRoot(
DAG.getNode(ISD::RELOC_NONE, sdl, MVT::Other,
Ops));
7841 case Intrinsic::eh_exceptionpointer:
7842 case Intrinsic::eh_exceptioncode: {
7848 SDValue N =
DAG.getCopyFromReg(
DAG.getEntryNode(), sdl, VReg, PtrVT);
7849 if (Intrinsic == Intrinsic::eh_exceptioncode)
7850 N =
DAG.getZExtOrTrunc(
N, sdl, MVT::i32);
7854 case Intrinsic::xray_customevent: {
7857 const auto &Triple =
DAG.getTarget().getTargetTriple();
7866 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7868 Ops.push_back(LogEntryVal);
7869 Ops.push_back(StrSizeVal);
7870 Ops.push_back(Chain);
7876 MachineSDNode *MN =
DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7879 DAG.setRoot(patchableNode);
7883 case Intrinsic::xray_typedevent: {
7886 const auto &Triple =
DAG.getTarget().getTargetTriple();
7898 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7900 Ops.push_back(LogTypeId);
7901 Ops.push_back(LogEntryVal);
7902 Ops.push_back(StrSizeVal);
7903 Ops.push_back(Chain);
7909 MachineSDNode *MN =
DAG.getMachineNode(
7910 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys,
Ops);
7912 DAG.setRoot(patchableNode);
7916 case Intrinsic::experimental_deoptimize:
7919 case Intrinsic::stepvector:
7922 case Intrinsic::vector_reduce_fadd:
7923 case Intrinsic::vector_reduce_fmul:
7924 case Intrinsic::vector_reduce_add:
7925 case Intrinsic::vector_reduce_mul:
7926 case Intrinsic::vector_reduce_and:
7927 case Intrinsic::vector_reduce_or:
7928 case Intrinsic::vector_reduce_xor:
7929 case Intrinsic::vector_reduce_smax:
7930 case Intrinsic::vector_reduce_smin:
7931 case Intrinsic::vector_reduce_umax:
7932 case Intrinsic::vector_reduce_umin:
7933 case Intrinsic::vector_reduce_fmax:
7934 case Intrinsic::vector_reduce_fmin:
7935 case Intrinsic::vector_reduce_fmaximum:
7936 case Intrinsic::vector_reduce_fminimum:
7937 visitVectorReduce(
I, Intrinsic);
7940 case Intrinsic::icall_branch_funnel: {
7946 I.getArgOperand(1),
Offset,
DAG.getDataLayout()));
7949 "llvm.icall.branch.funnel operand must be a GlobalValue");
7950 Ops.push_back(
DAG.getTargetGlobalAddress(
Base, sdl, MVT::i64, 0));
7952 struct BranchFunnelTarget {
7958 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7961 if (ElemBase !=
Base)
7963 "to the same GlobalValue");
7969 "llvm.icall.branch.funnel operand must be a GlobalValue");
7975 [](
const BranchFunnelTarget &
T1,
const BranchFunnelTarget &T2) {
7976 return T1.Offset < T2.Offset;
7979 for (
auto &
T : Targets) {
7980 Ops.push_back(
DAG.getTargetConstant(
T.Offset, sdl, MVT::i32));
7981 Ops.push_back(
T.Target);
7984 Ops.push_back(
DAG.getRoot());
7985 SDValue N(
DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7994 case Intrinsic::wasm_landingpad_index:
8000 case Intrinsic::aarch64_settag:
8001 case Intrinsic::aarch64_settag_zero: {
8002 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
8003 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
8006 getValue(
I.getArgOperand(1)), MachinePointerInfo(
I.getArgOperand(0)),
8012 case Intrinsic::amdgcn_cs_chain: {
8017 Type *RetTy =
I.getType();
8027 for (
unsigned Idx : {2, 3, 1}) {
8028 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8030 Arg.setAttributes(&
I, Idx);
8031 Args.push_back(Arg);
8034 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
8035 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
8036 Args[2].IsInReg =
true;
8039 for (
unsigned Idx = 4; Idx <
I.arg_size(); ++Idx) {
8040 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8042 Arg.setAttributes(&
I, Idx);
8043 Args.push_back(Arg);
8046 TargetLowering::CallLoweringInfo CLI(
DAG);
8049 .setCallee(CC, RetTy, Callee, std::move(Args))
8052 .setConvergent(
I.isConvergent());
8054 std::pair<SDValue, SDValue>
Result =
8058 "Should've lowered as tail call");
8063 case Intrinsic::amdgcn_call_whole_wave: {
8065 bool isTailCall =
I.isTailCall();
8068 for (
unsigned Idx = 1; Idx <
I.arg_size(); ++Idx) {
8069 TargetLowering::ArgListEntry Arg(
getValue(
I.getArgOperand(Idx)),
8070 I.getArgOperand(Idx)->getType());
8071 Arg.setAttributes(&
I, Idx);
8078 Args.push_back(Arg);
8083 auto *Token = Bundle->Inputs[0].get();
8084 ConvControlToken =
getValue(Token);
8087 TargetLowering::CallLoweringInfo CLI(
DAG);
8091 getValue(
I.getArgOperand(0)), std::move(Args))
8095 .setConvergent(
I.isConvergent())
8096 .setConvergenceControlToken(ConvControlToken);
8099 std::pair<SDValue, SDValue>
Result =
8102 if (
Result.first.getNode())
8106 case Intrinsic::ptrmask: {
8122 auto HighOnes =
DAG.getNode(
8123 ISD::SHL, sdl, PtrVT,
DAG.getAllOnesConstant(sdl, PtrVT),
8124 DAG.getShiftAmountConstant(
Mask.getValueType().getFixedSizeInBits(),
8127 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8128 }
else if (
Mask.getValueType() != PtrVT)
8129 Mask =
DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8135 case Intrinsic::threadlocal_address: {
8139 case Intrinsic::get_active_lane_mask: {
8143 EVT ElementVT =
Index.getValueType();
8146 setValue(&
I,
DAG.getNode(ISD::GET_ACTIVE_LANE_MASK, sdl, CCVT, Index,
8154 SDValue VectorIndex =
DAG.getSplat(VecTy, sdl, Index);
8155 SDValue VectorTripCount =
DAG.getSplat(VecTy, sdl, TripCount);
8156 SDValue VectorStep =
DAG.getStepVector(sdl, VecTy);
8159 SDValue SetCC =
DAG.getSetCC(sdl, CCVT, VectorInduction,
8164 case Intrinsic::experimental_get_vector_length: {
8166 "Expected positive VF");
8171 EVT CountVT =
Count.getValueType();
8174 visitTargetIntrinsic(
I, Intrinsic);
8183 if (CountVT.
bitsLT(VT)) {
8188 SDValue MaxEVL =
DAG.getElementCount(sdl, CountVT,
8198 case Intrinsic::vector_partial_reduce_add: {
8206 case Intrinsic::vector_partial_reduce_fadd: {
8210 ISD::PARTIAL_REDUCE_FMLA, sdl, Acc.
getValueType(), Acc,
8214 case Intrinsic::experimental_cttz_elts: {
8217 EVT OpVT =
Op.getValueType();
8220 visitTargetIntrinsic(
I, Intrinsic);
8236 ConstantRange VScaleRange(1,
true);
8265 case Intrinsic::vector_insert: {
8273 if (
Index.getValueType() != VectorIdxTy)
8274 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8281 case Intrinsic::vector_extract: {
8289 if (
Index.getValueType() != VectorIdxTy)
8290 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8296 case Intrinsic::experimental_vector_match: {
8302 EVT ResVT =
Mask.getValueType();
8308 visitTargetIntrinsic(
I, Intrinsic);
8314 for (
unsigned i = 0; i < SearchSize; ++i) {
8317 DAG.getVectorIdxConstant(i, sdl));
8326 case Intrinsic::vector_reverse:
8327 visitVectorReverse(
I);
8329 case Intrinsic::vector_splice:
8330 visitVectorSplice(
I);
8332 case Intrinsic::callbr_landingpad:
8333 visitCallBrLandingPad(
I);
8335 case Intrinsic::vector_interleave2:
8336 visitVectorInterleave(
I, 2);
8338 case Intrinsic::vector_interleave3:
8339 visitVectorInterleave(
I, 3);
8341 case Intrinsic::vector_interleave4:
8342 visitVectorInterleave(
I, 4);
8344 case Intrinsic::vector_interleave5:
8345 visitVectorInterleave(
I, 5);
8347 case Intrinsic::vector_interleave6:
8348 visitVectorInterleave(
I, 6);
8350 case Intrinsic::vector_interleave7:
8351 visitVectorInterleave(
I, 7);
8353 case Intrinsic::vector_interleave8:
8354 visitVectorInterleave(
I, 8);
8356 case Intrinsic::vector_deinterleave2:
8357 visitVectorDeinterleave(
I, 2);
8359 case Intrinsic::vector_deinterleave3:
8360 visitVectorDeinterleave(
I, 3);
8362 case Intrinsic::vector_deinterleave4:
8363 visitVectorDeinterleave(
I, 4);
8365 case Intrinsic::vector_deinterleave5:
8366 visitVectorDeinterleave(
I, 5);
8368 case Intrinsic::vector_deinterleave6:
8369 visitVectorDeinterleave(
I, 6);
8371 case Intrinsic::vector_deinterleave7:
8372 visitVectorDeinterleave(
I, 7);
8374 case Intrinsic::vector_deinterleave8:
8375 visitVectorDeinterleave(
I, 8);
8377 case Intrinsic::experimental_vector_compress:
8379 getValue(
I.getArgOperand(0)).getValueType(),
8384 case Intrinsic::experimental_convergence_anchor:
8385 case Intrinsic::experimental_convergence_entry:
8386 case Intrinsic::experimental_convergence_loop:
8387 visitConvergenceControl(
I, Intrinsic);
8389 case Intrinsic::experimental_vector_histogram_add: {
8390 visitVectorHistogram(
I, Intrinsic);
8393 case Intrinsic::experimental_vector_extract_last_active: {
8394 visitVectorExtractLastActive(
I, Intrinsic);
8397 case Intrinsic::loop_dependence_war_mask:
8403 case Intrinsic::loop_dependence_raw_mask:
8412void SelectionDAGBuilder::pushFPOpOutChain(
SDValue Result,
8428 PendingConstrainedFP.push_back(OutChain);
8431 PendingConstrainedFPStrict.push_back(OutChain);
8436void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8450 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8452 SDVTList VTs =
DAG.getVTList(VT, MVT::Other);
8456 Flags.setNoFPExcept(
true);
8459 Flags.copyFMF(*FPOp);
8464#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8465 case Intrinsic::INTRINSIC: \
8466 Opcode = ISD::STRICT_##DAGN; \
8468#include "llvm/IR/ConstrainedOps.def"
8469 case Intrinsic::experimental_constrained_fmuladd: {
8476 pushFPOpOutChain(
Mul, EB);
8499 if (TM.Options.NoNaNsFPMath)
8507 pushFPOpOutChain(Result, EB);
8514 std::optional<unsigned> ResOPC;
8516 case Intrinsic::vp_ctlz: {
8518 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8521 case Intrinsic::vp_cttz: {
8523 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8526 case Intrinsic::vp_cttz_elts: {
8528 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8531#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8532 case Intrinsic::VPID: \
8533 ResOPC = ISD::VPSD; \
8535#include "llvm/IR/VPIntrinsics.def"
8540 "Inconsistency: no SDNode available for this VPIntrinsic!");
8542 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8543 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8545 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8546 : ISD::VP_REDUCE_FMUL;
8552void SelectionDAGBuilder::visitVPLoad(
8564 Alignment =
DAG.getEVTAlign(VT);
8567 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8568 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8571 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8572 MachinePointerInfo(PtrOperand), MMOFlags,
8574 LD =
DAG.getLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8581void SelectionDAGBuilder::visitVPLoadFF(
8584 assert(OpValues.
size() == 3 &&
"Unexpected number of operands");
8594 Alignment =
DAG.getEVTAlign(VT);
8597 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8598 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8601 LD =
DAG.getLoadFFVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8606 setValue(&VPIntrin,
DAG.getMergeValues({LD.getValue(0), Trunc},
DL));
8609void SelectionDAGBuilder::visitVPGather(
8613 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8625 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8627 *Alignment, AAInfo, Ranges);
8637 EVT IdxVT =
Index.getValueType();
8643 LD =
DAG.getGatherVP(
8644 DAG.getVTList(VT, MVT::Other), VT,
DL,
8645 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8651void SelectionDAGBuilder::visitVPStore(
8655 EVT VT = OpValues[0].getValueType();
8660 Alignment =
DAG.getEVTAlign(VT);
8663 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8666 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8667 MachinePointerInfo(PtrOperand), MMOFlags,
8676void SelectionDAGBuilder::visitVPScatter(
8679 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8681 EVT VT = OpValues[0].getValueType();
8691 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8693 *Alignment, AAInfo);
8703 EVT IdxVT =
Index.getValueType();
8709 ST =
DAG.getScatterVP(
DAG.getVTList(MVT::Other), VT,
DL,
8710 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8711 OpValues[2], OpValues[3]},
8717void SelectionDAGBuilder::visitVPStridedLoad(
8729 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8731 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8734 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8736 *Alignment, AAInfo, Ranges);
8738 SDValue LD =
DAG.getStridedLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1],
8739 OpValues[2], OpValues[3], MMO,
8747void SelectionDAGBuilder::visitVPStridedStore(
8751 EVT VT = OpValues[0].getValueType();
8757 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8760 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8762 *Alignment, AAInfo);
8766 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8774void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8775 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8786 if (TM.Options.NoNaNsFPMath)
8799 "Unexpected target EVL type");
8802 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8805 DAG.getSetCCVP(
DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8808void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8816 return visitVPCmp(*CmpI);
8819 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8821 SDVTList VTs =
DAG.getVTList(ValueVTs);
8827 "Unexpected target EVL type");
8831 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8833 if (
I == EVLParamPos)
8840 SDNodeFlags SDFlags;
8848 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8850 case ISD::VP_LOAD_FF:
8851 visitVPLoadFF(VPIntrin, ValueVTs[0], ValueVTs[1], OpValues);
8853 case ISD::VP_GATHER:
8854 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8856 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8857 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8860 visitVPStore(VPIntrin, OpValues);
8862 case ISD::VP_SCATTER:
8863 visitVPScatter(VPIntrin, OpValues);
8865 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8866 visitVPStridedStore(VPIntrin, OpValues);
8868 case ISD::VP_FMULADD: {
8869 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8870 SDNodeFlags SDFlags;
8875 setValue(&VPIntrin,
DAG.getNode(ISD::VP_FMA,
DL, VTs, OpValues, SDFlags));
8878 ISD::VP_FMUL,
DL, VTs,
8879 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8881 DAG.getNode(ISD::VP_FADD,
DL, VTs,
8882 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8887 case ISD::VP_IS_FPCLASS: {
8888 const DataLayout DLayout =
DAG.getDataLayout();
8890 auto Constant = OpValues[1]->getAsZExtVal();
8893 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8897 case ISD::VP_INTTOPTR: {
8908 case ISD::VP_PTRTOINT: {
8910 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8923 case ISD::VP_CTLZ_ZERO_UNDEF:
8925 case ISD::VP_CTTZ_ZERO_UNDEF:
8926 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8927 case ISD::VP_CTTZ_ELTS: {
8929 DAG.getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8939 MachineFunction &MF =
DAG.getMachineFunction();
8947 unsigned CallSiteIndex =
FuncInfo.getCurrentCallSite();
8948 if (CallSiteIndex) {
8962 assert(BeginLabel &&
"BeginLabel should've been set");
8964 MachineFunction &MF =
DAG.getMachineFunction();
8976 assert(
II &&
"II should've been set");
8987std::pair<SDValue, SDValue>
9001 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
9004 "Non-null chain expected with non-tail call!");
9005 assert((Result.second.getNode() || !Result.first.getNode()) &&
9006 "Null value expected with tail call!");
9008 if (!Result.second.getNode()) {
9015 PendingExports.clear();
9017 DAG.setRoot(Result.second);
9035 if (!isMustTailCall &&
9036 Caller->getFnAttribute(
"disable-tail-calls").getValueAsBool())
9042 if (
DAG.getTargetLoweringInfo().supportSwiftError() &&
9043 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
9052 bool isTailCall,
bool isMustTailCall,
9055 auto &
DL =
DAG.getDataLayout();
9062 const Value *SwiftErrorVal =
nullptr;
9069 const Value *V = *
I;
9072 if (V->getType()->isEmptyTy())
9077 Entry.setAttributes(&CB,
I - CB.
arg_begin());
9089 Args.push_back(Entry);
9100 Value *V = Bundle->Inputs[0];
9102 Entry.IsCFGuardTarget =
true;
9103 Args.push_back(Entry);
9116 "Target doesn't support calls with kcfi operand bundles.");
9124 auto *Token = Bundle->Inputs[0].get();
9125 ConvControlToken =
getValue(Token);
9136 .
setCallee(RetTy, FTy, Callee, std::move(Args), CB)
9149 "This target doesn't support calls with ptrauth operand bundles.");
9153 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
9155 if (Result.first.getNode()) {
9170 DAG.setRoot(CopyNode);
9186 LoadTy, Builder.DAG.getDataLayout()))
9187 return Builder.getValue(LoadCst);
9193 bool ConstantMemory =
false;
9196 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9197 Root = Builder.DAG.getEntryNode();
9198 ConstantMemory =
true;
9201 Root = Builder.DAG.getRoot();
9206 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
9209 if (!ConstantMemory)
9210 Builder.PendingLoads.push_back(LoadVal.
getValue(1));
9216void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
9219 EVT VT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9230bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
9231 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
9232 const Value *
Size =
I.getArgOperand(2);
9235 EVT CallVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9241 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9245 if (Res.first.getNode()) {
9246 processIntegerCallValue(
I, Res.first,
true);
9260 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
9261 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
9283 switch (NumBitsToCompare) {
9295 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9308 LoadL =
DAG.getBitcast(CmpVT, LoadL);
9309 LoadR =
DAG.getBitcast(CmpVT, LoadR);
9313 processIntegerCallValue(
I, Cmp,
false);
9322bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9323 const Value *Src =
I.getArgOperand(0);
9324 const Value *
Char =
I.getArgOperand(1);
9325 const Value *
Length =
I.getArgOperand(2);
9327 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9328 std::pair<SDValue, SDValue> Res =
9331 MachinePointerInfo(Src));
9332 if (Res.first.getNode()) {
9346bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9351 Align DstAlign =
DAG.InferPtrAlign(Dst).valueOrOne();
9352 Align SrcAlign =
DAG.InferPtrAlign(Src).valueOrOne();
9354 Align Alignment = std::min(DstAlign, SrcAlign);
9363 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9364 std::nullopt, MachinePointerInfo(
I.getArgOperand(0)),
9365 MachinePointerInfo(
I.getArgOperand(1)),
I.getAAMetadata());
9367 "** memcpy should not be lowered as TailCall in mempcpy context **");
9371 Size =
DAG.getSExtOrTrunc(
Size, sdl, Dst.getValueType());
9384bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9385 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9387 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9388 std::pair<SDValue, SDValue> Res =
9391 MachinePointerInfo(Arg0),
9392 MachinePointerInfo(Arg1), isStpcpy);
9393 if (Res.first.getNode()) {
9395 DAG.setRoot(Res.second);
9407bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9408 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9410 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9411 std::pair<SDValue, SDValue> Res =
9414 MachinePointerInfo(Arg0),
9415 MachinePointerInfo(Arg1));
9416 if (Res.first.getNode()) {
9417 processIntegerCallValue(
I, Res.first,
true);
9430bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9431 const Value *Arg0 =
I.getArgOperand(0);
9433 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9436 if (Res.first.getNode()) {
9437 processIntegerCallValue(
I, Res.first,
false);
9450bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9451 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9453 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9454 std::pair<SDValue, SDValue> Res =
9457 MachinePointerInfo(Arg0));
9458 if (Res.first.getNode()) {
9459 processIntegerCallValue(
I, Res.first,
false);
9472bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9477 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9494bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9499 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9512void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9514 if (
I.isInlineAsm()) {
9521 if (Function *
F =
I.getCalledFunction()) {
9522 if (
F->isDeclaration()) {
9524 if (
unsigned IID =
F->getIntrinsicID()) {
9525 visitIntrinsicCall(
I, IID);
9534 if (!
I.isNoBuiltin() && !
F->hasLocalLinkage() &&
F->hasName() &&
9535 LibInfo->getLibFunc(*
F, Func) &&
LibInfo->hasOptimizedCodeGen(Func)) {
9539 if (visitMemCmpBCmpCall(
I))
9542 case LibFunc_copysign:
9543 case LibFunc_copysignf:
9544 case LibFunc_copysignl:
9547 if (
I.onlyReadsMemory()) {
9558 if (visitUnaryFloatCall(
I, ISD::FABS))
9564 if (visitBinaryFloatCall(
I, ISD::FMINNUM))
9570 if (visitBinaryFloatCall(
I, ISD::FMAXNUM))
9573 case LibFunc_fminimum_num:
9574 case LibFunc_fminimum_numf:
9575 case LibFunc_fminimum_numl:
9576 if (visitBinaryFloatCall(
I, ISD::FMINIMUMNUM))
9579 case LibFunc_fmaximum_num:
9580 case LibFunc_fmaximum_numf:
9581 case LibFunc_fmaximum_numl:
9582 if (visitBinaryFloatCall(
I, ISD::FMAXIMUMNUM))
9588 if (visitUnaryFloatCall(
I, ISD::FSIN))
9594 if (visitUnaryFloatCall(
I, ISD::FCOS))
9600 if (visitUnaryFloatCall(
I, ISD::FTAN))
9606 if (visitUnaryFloatCall(
I, ISD::FASIN))
9612 if (visitUnaryFloatCall(
I, ISD::FACOS))
9618 if (visitUnaryFloatCall(
I, ISD::FATAN))
9622 case LibFunc_atan2f:
9623 case LibFunc_atan2l:
9624 if (visitBinaryFloatCall(
I, ISD::FATAN2))
9630 if (visitUnaryFloatCall(
I, ISD::FSINH))
9636 if (visitUnaryFloatCall(
I, ISD::FCOSH))
9642 if (visitUnaryFloatCall(
I, ISD::FTANH))
9648 case LibFunc_sqrt_finite:
9649 case LibFunc_sqrtf_finite:
9650 case LibFunc_sqrtl_finite:
9651 if (visitUnaryFloatCall(
I, ISD::FSQRT))
9655 case LibFunc_floorf:
9656 case LibFunc_floorl:
9657 if (visitUnaryFloatCall(
I, ISD::FFLOOR))
9660 case LibFunc_nearbyint:
9661 case LibFunc_nearbyintf:
9662 case LibFunc_nearbyintl:
9663 if (visitUnaryFloatCall(
I, ISD::FNEARBYINT))
9669 if (visitUnaryFloatCall(
I, ISD::FCEIL))
9675 if (visitUnaryFloatCall(
I, ISD::FRINT))
9679 case LibFunc_roundf:
9680 case LibFunc_roundl:
9681 if (visitUnaryFloatCall(
I, ISD::FROUND))
9685 case LibFunc_truncf:
9686 case LibFunc_truncl:
9687 if (visitUnaryFloatCall(
I, ISD::FTRUNC))
9693 if (visitUnaryFloatCall(
I, ISD::FLOG2))
9699 if (visitUnaryFloatCall(
I, ISD::FEXP2))
9703 case LibFunc_exp10f:
9704 case LibFunc_exp10l:
9705 if (visitUnaryFloatCall(
I, ISD::FEXP10))
9709 case LibFunc_ldexpf:
9710 case LibFunc_ldexpl:
9711 if (visitBinaryFloatCall(
I, ISD::FLDEXP))
9714 case LibFunc_memcmp:
9715 if (visitMemCmpBCmpCall(
I))
9718 case LibFunc_mempcpy:
9719 if (visitMemPCpyCall(
I))
9722 case LibFunc_memchr:
9723 if (visitMemChrCall(
I))
9726 case LibFunc_strcpy:
9727 if (visitStrCpyCall(
I,
false))
9730 case LibFunc_stpcpy:
9731 if (visitStrCpyCall(
I,
true))
9734 case LibFunc_strcmp:
9735 if (visitStrCmpCall(
I))
9738 case LibFunc_strlen:
9739 if (visitStrLenCall(
I))
9742 case LibFunc_strnlen:
9743 if (visitStrNLenCall(
I))
9767 if (
I.hasDeoptState())
9784 const Value *Discriminator = PAB->Inputs[1];
9786 assert(
Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9787 assert(Discriminator->getType()->isIntegerTy(64) &&
9788 "Invalid ptrauth discriminator");
9793 if (CalleeCPA->isKnownCompatibleWith(
Key, Discriminator,
9794 DAG.getDataLayout()))
9834 for (
const auto &Code : Codes)
9849 SDISelAsmOperandInfo &MatchingOpInfo,
9851 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9857 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9859 OpInfo.ConstraintVT);
9860 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9862 MatchingOpInfo.ConstraintVT);
9863 const bool OutOpIsIntOrFP =
9864 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9865 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9866 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9867 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9870 " with a matching output constraint of"
9871 " incompatible type!");
9873 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9880 SDISelAsmOperandInfo &OpInfo,
9893 const Value *OpVal = OpInfo.CallOperandVal;
9911 DL.getPrefTypeAlign(Ty),
false,
9914 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9917 OpInfo.CallOperand = StackSlot;
9930static std::optional<unsigned>
9932 SDISelAsmOperandInfo &OpInfo,
9933 SDISelAsmOperandInfo &RefOpInfo) {
9944 return std::nullopt;
9948 unsigned AssignedReg;
9951 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9954 return std::nullopt;
9959 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9961 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9970 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9975 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9980 OpInfo.CallOperand =
9981 DAG.
getNode(ISD::BITCAST,
DL, RegVT, OpInfo.CallOperand);
9982 OpInfo.ConstraintVT = RegVT;
9986 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9989 OpInfo.CallOperand =
9990 DAG.
getNode(ISD::BITCAST,
DL, VT, OpInfo.CallOperand);
9991 OpInfo.ConstraintVT = VT;
9998 if (OpInfo.isMatchingInputConstraint())
9999 return std::nullopt;
10001 EVT ValueVT = OpInfo.ConstraintVT;
10002 if (OpInfo.ConstraintVT == MVT::Other)
10006 unsigned NumRegs = 1;
10007 if (OpInfo.ConstraintVT != MVT::Other)
10022 I = std::find(
I, RC->
end(), AssignedReg);
10023 if (
I == RC->
end()) {
10026 return {AssignedReg};
10030 for (; NumRegs; --NumRegs, ++
I) {
10031 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
10036 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
10037 return std::nullopt;
10042 const std::vector<SDValue> &AsmNodeOperands) {
10045 for (; OperandNo; --OperandNo) {
10047 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
10050 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
10051 "Skipped past definitions?");
10052 CurOp +=
F.getNumOperandRegisters() + 1;
10060 unsigned Flags = 0;
10063 explicit ExtraFlags(
const CallBase &
Call) {
10065 if (
IA->hasSideEffects())
10067 if (
IA->isAlignStack())
10074 void update(
const TargetLowering::AsmOperandInfo &OpInfo) {
10090 unsigned get()
const {
return Flags; }
10113void SelectionDAGBuilder::visitInlineAsm(
const CallBase &
Call,
10120 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10122 DAG.getDataLayout(),
DAG.getSubtarget().getRegisterInfo(),
Call);
10126 bool HasSideEffect =
IA->hasSideEffects();
10127 ExtraFlags ExtraInfo(
Call);
10129 for (
auto &
T : TargetConstraints) {
10130 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
10131 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
10133 if (OpInfo.CallOperandVal)
10134 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
10136 if (!HasSideEffect)
10137 HasSideEffect = OpInfo.hasMemory(TLI);
10149 return emitInlineAsmError(
Call,
"constraint '" + Twine(
T.ConstraintCode) +
10150 "' expects an integer constant "
10153 ExtraInfo.update(
T);
10161 if (EmitEHLabels) {
10162 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
10166 if (IsCallBr || EmitEHLabels) {
10174 if (EmitEHLabels) {
10175 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
10180 IA->collectAsmStrs(AsmStrs);
10183 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10191 if (OpInfo.hasMatchingInput()) {
10192 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
10223 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
10226 OpInfo.isIndirect =
false;
10233 !OpInfo.isIndirect) {
10234 assert((OpInfo.isMultipleAlternative ||
10236 "Can only indirectify direct input operands!");
10242 OpInfo.CallOperandVal =
nullptr;
10245 OpInfo.isIndirect =
true;
10251 std::vector<SDValue> AsmNodeOperands;
10252 AsmNodeOperands.push_back(
SDValue());
10253 AsmNodeOperands.push_back(
DAG.getTargetExternalSymbol(
10260 AsmNodeOperands.push_back(
DAG.getMDNode(SrcLoc));
10264 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10269 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10271 SDISelAsmOperandInfo &RefOpInfo =
10272 OpInfo.isMatchingInputConstraint()
10273 ? ConstraintOperands[OpInfo.getMatchedOperand()]
10275 const auto RegError =
10278 const MachineFunction &MF =
DAG.getMachineFunction();
10280 const char *
RegName =
TRI.getName(*RegError);
10281 emitInlineAsmError(
Call,
"register '" + Twine(
RegName) +
10282 "' allocated for constraint '" +
10283 Twine(OpInfo.ConstraintCode) +
10284 "' does not match required type");
10288 auto DetectWriteToReservedRegister = [&]() {
10289 const MachineFunction &MF =
DAG.getMachineFunction();
10294 emitInlineAsmError(
Call,
"write to reserved register '" +
10303 !OpInfo.isMatchingInputConstraint())) &&
10304 "Only address as input operand is allowed.");
10306 switch (OpInfo.Type) {
10312 "Failed to convert memory constraint code to constraint id.");
10316 OpFlags.setMemConstraint(ConstraintID);
10317 AsmNodeOperands.push_back(
DAG.getTargetConstant(OpFlags,
getCurSDLoc(),
10319 AsmNodeOperands.push_back(OpInfo.CallOperand);
10324 if (OpInfo.AssignedRegs.
Regs.empty()) {
10325 emitInlineAsmError(
10326 Call,
"couldn't allocate output register for constraint '" +
10327 Twine(OpInfo.ConstraintCode) +
"'");
10331 if (DetectWriteToReservedRegister())
10345 SDValue InOperandVal = OpInfo.CallOperand;
10347 if (OpInfo.isMatchingInputConstraint()) {
10352 InlineAsm::Flag
Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
10353 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
10354 if (OpInfo.isIndirect) {
10356 emitInlineAsmError(
Call,
"inline asm not supported yet: "
10357 "don't know how to handle tied "
10358 "indirect register inputs");
10363 MachineFunction &MF =
DAG.getMachineFunction();
10368 MVT RegVT =
R->getSimpleValueType(0);
10369 const TargetRegisterClass *RC =
10372 :
TRI.getMinimalPhysRegClass(TiedReg);
10373 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
10376 RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.
getValueType());
10380 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &
Call);
10382 OpInfo.getMatchedOperand(), dl,
DAG,
10387 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
10388 assert(
Flag.getNumOperandRegisters() == 1 &&
10389 "Unexpected number of operands");
10392 Flag.clearMemConstraint();
10393 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10394 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10396 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
10407 std::vector<SDValue>
Ops;
10413 emitInlineAsmError(
Call,
"value out of range for constraint '" +
10414 Twine(OpInfo.ConstraintCode) +
"'");
10418 emitInlineAsmError(
Call,
10419 "invalid operand for inline asm constraint '" +
10420 Twine(OpInfo.ConstraintCode) +
"'");
10426 AsmNodeOperands.push_back(
DAG.getTargetConstant(
10433 assert((OpInfo.isIndirect ||
10435 "Operand must be indirect to be a mem!");
10438 "Memory operands expect pointer values");
10443 "Failed to convert memory constraint code to constraint id.");
10447 ResOpType.setMemConstraint(ConstraintID);
10448 AsmNodeOperands.push_back(
DAG.getTargetConstant(ResOpType,
10451 AsmNodeOperands.push_back(InOperandVal);
10459 "Failed to convert memory constraint code to constraint id.");
10463 SDValue AsmOp = InOperandVal;
10467 AsmOp =
DAG.getTargetGlobalAddress(GA->getGlobal(),
getCurSDLoc(),
10473 ResOpType.setMemConstraint(ConstraintID);
10475 AsmNodeOperands.push_back(
10478 AsmNodeOperands.push_back(AsmOp);
10484 emitInlineAsmError(
Call,
"unknown asm constraint '" +
10485 Twine(OpInfo.ConstraintCode) +
"'");
10490 if (OpInfo.isIndirect) {
10491 emitInlineAsmError(
10492 Call,
"Don't know how to handle indirect register inputs yet "
10493 "for constraint '" +
10494 Twine(OpInfo.ConstraintCode) +
"'");
10499 if (OpInfo.AssignedRegs.
Regs.empty()) {
10500 emitInlineAsmError(
Call,
10501 "couldn't allocate input reg for constraint '" +
10502 Twine(OpInfo.ConstraintCode) +
"'");
10506 if (DetectWriteToReservedRegister())
10515 0, dl,
DAG, AsmNodeOperands);
10521 if (!OpInfo.AssignedRegs.
Regs.empty())
10531 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10533 unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
10535 DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10547 ResultTypes = StructResult->elements();
10548 else if (!CallResultType->
isVoidTy())
10549 ResultTypes =
ArrayRef(CallResultType);
10551 auto CurResultType = ResultTypes.
begin();
10552 auto handleRegAssign = [&](
SDValue V) {
10553 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10554 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10555 EVT ResultVT = TLI.
getValueType(
DAG.getDataLayout(), *CurResultType);
10567 if (ResultVT !=
V.getValueType() &&
10570 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10571 V.getValueType().isInteger()) {
10577 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10583 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10587 if (OpInfo.AssignedRegs.
Regs.empty())
10590 switch (OpInfo.ConstraintType) {
10594 Chain, &Glue, &
Call);
10606 assert(
false &&
"Unexpected unknown constraint");
10610 if (OpInfo.isIndirect) {
10611 const Value *Ptr = OpInfo.CallOperandVal;
10612 assert(Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10614 MachinePointerInfo(Ptr));
10621 handleRegAssign(V);
10623 handleRegAssign(Val);
10629 if (!ResultValues.
empty()) {
10630 assert(CurResultType == ResultTypes.
end() &&
10631 "Mismatch in number of ResultTypes");
10633 "Mismatch in number of output operands in asm result");
10636 DAG.getVTList(ResultVTs), ResultValues);
10641 if (!OutChains.
empty())
10644 if (EmitEHLabels) {
10649 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10651 DAG.setRoot(Chain);
10654void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &
Call,
10655 const Twine &Message) {
10656 LLVMContext &Ctx = *
DAG.getContext();
10660 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10664 if (ValueVTs.
empty())
10668 for (
const EVT &VT : ValueVTs)
10669 Ops.push_back(
DAG.getUNDEF(VT));
10674void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10678 DAG.getSrcValue(
I.getArgOperand(0))));
10681void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10682 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10683 const DataLayout &
DL =
DAG.getDataLayout();
10687 DL.getABITypeAlign(
I.getType()).value());
10688 DAG.setRoot(
V.getValue(1));
10690 if (
I.getType()->isPointerTy())
10691 V =
DAG.getPtrExtOrTrunc(
10696void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10700 DAG.getSrcValue(
I.getArgOperand(0))));
10703void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10708 DAG.getSrcValue(
I.getArgOperand(0)),
10709 DAG.getSrcValue(
I.getArgOperand(1))));
10715 std::optional<ConstantRange> CR =
getRange(
I);
10717 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10720 APInt Lo = CR->getUnsignedMin();
10721 if (!
Lo.isMinValue())
10724 APInt Hi = CR->getUnsignedMax();
10725 unsigned Bits = std::max(
Hi.getActiveBits(),
10733 DAG.getValueType(SmallVT));
10734 unsigned NumVals =
Op.getNode()->getNumValues();
10740 Ops.push_back(ZExt);
10741 for (
unsigned I = 1;
I != NumVals; ++
I)
10742 Ops.push_back(
Op.getValue(
I));
10744 return DAG.getMergeValues(
Ops,
SL);
10754 SDValue TestConst =
DAG.getTargetConstant(Classes,
SDLoc(), MVT::i32);
10762 for (
unsigned I = 0, E =
Ops.size();
I != E; ++
I) {
10765 MergeOp, TestConst);
10768 return DAG.getMergeValues(
Ops,
SL);
10779 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10782 Args.reserve(NumArgs);
10786 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10787 ArgI != ArgE; ++ArgI) {
10788 const Value *V =
Call->getOperand(ArgI);
10790 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10793 Entry.setAttributes(
Call, ArgI);
10794 Args.push_back(Entry);
10799 .
setCallee(
Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10828 for (
unsigned I = StartIdx;
I <
Call.arg_size();
I++) {
10837 Ops.push_back(Builder.getValue(
Call.getArgOperand(
I)));
10843void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10869 Ops.push_back(Chain);
10870 Ops.push_back(InGlue);
10877 assert(
ID.getValueType() == MVT::i64);
10879 DAG.getTargetConstant(
ID->getAsZExtVal(),
DL,
ID.getValueType());
10880 Ops.push_back(IDConst);
10886 Ops.push_back(ShadConst);
10892 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
10893 Chain =
DAG.getNode(ISD::STACKMAP,
DL, NodeTys,
Ops);
10896 Chain =
DAG.getCALLSEQ_END(Chain, 0, 0, InGlue,
DL);
10901 DAG.setRoot(Chain);
10904 FuncInfo.MF->getFrameInfo().setHasStackMap();
10908void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10925 Callee =
DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10928 Callee =
DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10929 SDLoc(SymbolicCallee),
10930 SymbolicCallee->getValueType(0));
10940 "Not enough arguments provided to the patchpoint intrinsic");
10943 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10947 TargetLowering::CallLoweringInfo CLI(
DAG);
10952 SDNode *CallEnd =
Result.second.getNode();
10953 if (CallEnd->
getOpcode() == ISD::EH_LABEL)
10961 "Expected a callseq node.");
10963 bool HasGlue =
Call->getGluedNode();
10988 Ops.push_back(Callee);
10994 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10995 Ops.push_back(
DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10998 Ops.push_back(
DAG.getTargetConstant((
unsigned)CC, dl, MVT::i32));
11003 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
11014 if (IsAnyRegCC && HasDef) {
11016 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11019 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
11024 NodeTys =
DAG.getVTList(ValueVTs);
11026 NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
11029 SDValue PPV =
DAG.getNode(ISD::PATCHPOINT, dl, NodeTys,
Ops);
11043 if (IsAnyRegCC && HasDef) {
11046 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
11052 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
11055void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
11057 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11060 if (
I.arg_size() > 1)
11065 SDNodeFlags SDFlags;
11069 switch (Intrinsic) {
11070 case Intrinsic::vector_reduce_fadd:
11073 DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
11076 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
11078 case Intrinsic::vector_reduce_fmul:
11081 DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
11084 Res =
DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
11086 case Intrinsic::vector_reduce_add:
11087 Res =
DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
11089 case Intrinsic::vector_reduce_mul:
11090 Res =
DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
11092 case Intrinsic::vector_reduce_and:
11093 Res =
DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
11095 case Intrinsic::vector_reduce_or:
11096 Res =
DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
11098 case Intrinsic::vector_reduce_xor:
11099 Res =
DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
11101 case Intrinsic::vector_reduce_smax:
11102 Res =
DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
11104 case Intrinsic::vector_reduce_smin:
11105 Res =
DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
11107 case Intrinsic::vector_reduce_umax:
11108 Res =
DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
11110 case Intrinsic::vector_reduce_umin:
11111 Res =
DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
11113 case Intrinsic::vector_reduce_fmax:
11114 Res =
DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
11116 case Intrinsic::vector_reduce_fmin:
11117 Res =
DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
11119 case Intrinsic::vector_reduce_fmaximum:
11120 Res =
DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
11122 case Intrinsic::vector_reduce_fminimum:
11123 Res =
DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
11136 Attrs.push_back(Attribute::SExt);
11138 Attrs.push_back(Attribute::ZExt);
11140 Attrs.push_back(Attribute::InReg);
11142 return AttributeList::get(CLI.
RetTy->
getContext(), AttributeList::ReturnIndex,
11150std::pair<SDValue, SDValue>
11164 "Only supported for non-aggregate returns");
11167 for (
Type *Ty : RetOrigTys)
11176 RetOrigTys.
swap(OldRetOrigTys);
11177 RetVTs.
swap(OldRetVTs);
11178 Offsets.swap(OldOffsets);
11180 for (
size_t i = 0, e = OldRetVTs.
size(); i != e; ++i) {
11181 EVT RetVT = OldRetVTs[i];
11185 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
11186 RetOrigTys.
append(NumRegs, OldRetOrigTys[i]);
11187 RetVTs.
append(NumRegs, RegisterVT);
11188 for (
unsigned j = 0; j != NumRegs; ++j)
11201 int DemoteStackIdx = -100;
11214 ArgListEntry Entry(DemoteStackSlot, StackSlotPtrType);
11215 Entry.IsSRet =
true;
11216 Entry.Alignment = Alignment;
11228 for (
unsigned I = 0, E = RetVTs.
size();
I != E; ++
I) {
11230 if (NeedsRegBlock) {
11231 Flags.setInConsecutiveRegs();
11232 if (
I == RetVTs.
size() - 1)
11233 Flags.setInConsecutiveRegsLast();
11235 EVT VT = RetVTs[
I];
11239 for (
unsigned i = 0; i != NumRegs; ++i) {
11243 Ret.Flags.setPointer();
11244 Ret.Flags.setPointerAddrSpace(
11248 Ret.Flags.setSExt();
11250 Ret.Flags.setZExt();
11252 Ret.Flags.setInReg();
11253 CLI.
Ins.push_back(Ret);
11262 if (Arg.IsSwiftError) {
11268 CLI.
Ins.push_back(Ret);
11276 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
11280 Type *FinalType = Args[i].Ty;
11281 if (Args[i].IsByVal)
11282 FinalType = Args[i].IndirectType;
11285 for (
unsigned Value = 0, NumValues = OrigArgTys.
size();
Value != NumValues;
11288 Type *ArgTy = OrigArgTy;
11289 if (Args[i].Ty != Args[i].OrigTy) {
11290 assert(
Value == 0 &&
"Only supported for non-aggregate arguments");
11291 ArgTy = Args[i].Ty;
11296 Args[i].Node.getResNo() +
Value);
11303 Flags.setOrigAlign(OriginalAlignment);
11308 Flags.setPointer();
11311 if (Args[i].IsZExt)
11313 if (Args[i].IsSExt)
11315 if (Args[i].IsNoExt)
11317 if (Args[i].IsInReg) {
11324 Flags.setHvaStart();
11330 if (Args[i].IsSRet)
11332 if (Args[i].IsSwiftSelf)
11333 Flags.setSwiftSelf();
11334 if (Args[i].IsSwiftAsync)
11335 Flags.setSwiftAsync();
11336 if (Args[i].IsSwiftError)
11337 Flags.setSwiftError();
11338 if (Args[i].IsCFGuardTarget)
11339 Flags.setCFGuardTarget();
11340 if (Args[i].IsByVal)
11342 if (Args[i].IsByRef)
11344 if (Args[i].IsPreallocated) {
11345 Flags.setPreallocated();
11353 if (Args[i].IsInAlloca) {
11354 Flags.setInAlloca();
11363 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11364 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11365 Flags.setByValSize(FrameSize);
11368 if (
auto MA = Args[i].Alignment)
11372 }
else if (
auto MA = Args[i].Alignment) {
11375 MemAlign = OriginalAlignment;
11377 Flags.setMemAlign(MemAlign);
11378 if (Args[i].IsNest)
11381 Flags.setInConsecutiveRegs();
11384 unsigned NumParts =
11389 if (Args[i].IsSExt)
11391 else if (Args[i].IsZExt)
11396 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11401 Args[i].Ty->getPointerAddressSpace())) &&
11402 RetVTs.
size() == NumValues &&
"unexpected use of 'returned'");
11415 CLI.
RetZExt == Args[i].IsZExt))
11416 Flags.setReturned();
11422 for (
unsigned j = 0; j != NumParts; ++j) {
11428 j * Parts[j].
getValueType().getStoreSize().getKnownMinValue());
11429 if (NumParts > 1 && j == 0)
11433 if (j == NumParts - 1)
11437 CLI.
Outs.push_back(MyFlags);
11438 CLI.
OutVals.push_back(Parts[j]);
11441 if (NeedsRegBlock &&
Value == NumValues - 1)
11442 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11454 "LowerCall didn't return a valid chain!");
11456 "LowerCall emitted a return value for a tail call!");
11458 "LowerCall didn't emit the correct number of values!");
11470 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11471 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11472 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11473 "LowerCall emitted a value with the wrong type!");
11483 unsigned NumValues = RetVTs.
size();
11484 ReturnValues.
resize(NumValues);
11491 for (
unsigned i = 0; i < NumValues; ++i) {
11498 DemoteStackIdx, Offsets[i]),
11500 ReturnValues[i] = L;
11501 Chains[i] = L.getValue(1);
11508 std::optional<ISD::NodeType> AssertOp;
11513 unsigned CurReg = 0;
11514 for (
EVT VT : RetVTs) {
11520 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11528 if (ReturnValues.
empty())
11534 return std::make_pair(Res, CLI.
Chain);
11551 if (
N->getNumValues() == 1) {
11559 "Lowering returned the wrong number of results!");
11562 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11576 "Copy from a reg to the same reg!");
11577 assert(!Reg.isPhysical() &&
"Is a physreg");
11583 RegsForValue RFV(V->getContext(), TLI,
DAG.getDataLayout(), Reg, V->getType(),
11588 auto PreferredExtendIt =
FuncInfo.PreferredExtendType.find(V);
11589 if (PreferredExtendIt !=
FuncInfo.PreferredExtendType.end())
11590 ExtendType = PreferredExtendIt->second;
11593 PendingExports.push_back(Chain);
11605 return A->use_empty();
11607 const BasicBlock &Entry =
A->getParent()->front();
11608 for (
const User *U :
A->users())
11617 std::pair<const AllocaInst *, const StoreInst *>>;
11629 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11631 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11632 StaticAllocas.
reserve(NumArgs * 2);
11634 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11637 V = V->stripPointerCasts();
11639 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11642 return &Iter.first->second;
11659 if (
I.isDebugOrPseudoInst())
11663 for (
const Use &U :
I.operands()) {
11664 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11665 *
Info = StaticAllocaInfo::Clobbered;
11671 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(
SI->getValueOperand()))
11672 *
Info = StaticAllocaInfo::Clobbered;
11675 const Value *Dst =
SI->getPointerOperand()->stripPointerCasts();
11676 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11682 if (*
Info != StaticAllocaInfo::Unknown)
11690 const Value *Val =
SI->getValueOperand()->stripPointerCasts();
11692 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11696 !
DL.typeSizeEqualsStoreSize(Arg->
getType()) ||
11697 ArgCopyElisionCandidates.count(Arg)) {
11698 *
Info = StaticAllocaInfo::Clobbered;
11702 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11706 *
Info = StaticAllocaInfo::Elidable;
11707 ArgCopyElisionCandidates.insert({Arg, {AI,
SI}});
11712 if (ArgCopyElisionCandidates.size() == NumArgs)
11736 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11737 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11738 const AllocaInst *AI = ArgCopyIter->second.first;
11739 int FixedIndex = FINode->getIndex();
11741 int OldIndex = AllocaIndex;
11745 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11751 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11752 "greater than stack argument alignment ("
11753 <<
DebugStr(RequiredAlignment) <<
" vs "
11761 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11762 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11768 AllocaIndex = FixedIndex;
11769 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11770 for (
SDValue ArgVal : ArgVals)
11774 const StoreInst *
SI = ArgCopyIter->second.second;
11787void SelectionDAGISel::LowerArguments(
const Function &
F) {
11788 SelectionDAG &DAG =
SDB->DAG;
11789 SDLoc dl =
SDB->getCurSDLoc();
11794 if (
F.hasFnAttribute(Attribute::Naked))
11799 MVT ValueVT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11801 ISD::ArgFlagsTy
Flags;
11803 MVT RegisterVT =
TLI->getRegisterType(*DAG.
getContext(), ValueVT);
11804 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT,
F.getReturnType(),
true,
11806 Ins.push_back(RetArg);
11814 ArgCopyElisionCandidates);
11817 for (
const Argument &Arg :
F.args()) {
11818 unsigned ArgNo = Arg.getArgNo();
11821 bool isArgValueUsed = !Arg.
use_empty();
11822 unsigned PartBase = 0;
11824 if (Arg.hasAttribute(Attribute::ByVal))
11825 FinalType = Arg.getParamByValType();
11826 bool NeedsRegBlock =
TLI->functionArgumentNeedsConsecutiveRegisters(
11827 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11828 for (
unsigned Value = 0, NumValues =
Types.size();
Value != NumValues;
11831 EVT VT =
TLI->getValueType(
DL, ArgTy);
11832 ISD::ArgFlagsTy
Flags;
11835 Flags.setPointer();
11838 if (Arg.hasAttribute(Attribute::ZExt))
11840 if (Arg.hasAttribute(Attribute::SExt))
11842 if (Arg.hasAttribute(Attribute::InReg)) {
11849 Flags.setHvaStart();
11855 if (Arg.hasAttribute(Attribute::StructRet))
11857 if (Arg.hasAttribute(Attribute::SwiftSelf))
11858 Flags.setSwiftSelf();
11859 if (Arg.hasAttribute(Attribute::SwiftAsync))
11860 Flags.setSwiftAsync();
11861 if (Arg.hasAttribute(Attribute::SwiftError))
11862 Flags.setSwiftError();
11863 if (Arg.hasAttribute(Attribute::ByVal))
11865 if (Arg.hasAttribute(Attribute::ByRef))
11867 if (Arg.hasAttribute(Attribute::InAlloca)) {
11868 Flags.setInAlloca();
11876 if (Arg.hasAttribute(Attribute::Preallocated)) {
11877 Flags.setPreallocated();
11889 const Align OriginalAlignment(
11890 TLI->getABIAlignmentForCallingConv(ArgTy,
DL));
11891 Flags.setOrigAlign(OriginalAlignment);
11894 Type *ArgMemTy =
nullptr;
11895 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11898 ArgMemTy = Arg.getPointeeInMemoryValueType();
11900 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11905 if (
auto ParamAlign = Arg.getParamStackAlign())
11906 MemAlign = *ParamAlign;
11907 else if ((ParamAlign = Arg.getParamAlign()))
11908 MemAlign = *ParamAlign;
11910 MemAlign =
TLI->getByValTypeAlignment(ArgMemTy,
DL);
11911 if (
Flags.isByRef())
11912 Flags.setByRefSize(MemSize);
11914 Flags.setByValSize(MemSize);
11915 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11916 MemAlign = *ParamAlign;
11918 MemAlign = OriginalAlignment;
11920 Flags.setMemAlign(MemAlign);
11922 if (Arg.hasAttribute(Attribute::Nest))
11925 Flags.setInConsecutiveRegs();
11926 if (ArgCopyElisionCandidates.count(&Arg))
11927 Flags.setCopyElisionCandidate();
11928 if (Arg.hasAttribute(Attribute::Returned))
11929 Flags.setReturned();
11931 MVT RegisterVT =
TLI->getRegisterTypeForCallingConv(
11932 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11933 unsigned NumRegs =
TLI->getNumRegistersForCallingConv(
11934 *
CurDAG->getContext(),
F.getCallingConv(), VT);
11935 for (
unsigned i = 0; i != NumRegs; ++i) {
11939 ISD::InputArg MyFlags(
11940 Flags, RegisterVT, VT, ArgTy, isArgValueUsed, ArgNo,
11942 if (NumRegs > 1 && i == 0)
11943 MyFlags.Flags.setSplit();
11946 MyFlags.Flags.setOrigAlign(
Align(1));
11947 if (i == NumRegs - 1)
11948 MyFlags.Flags.setSplitEnd();
11950 Ins.push_back(MyFlags);
11952 if (NeedsRegBlock &&
Value == NumValues - 1)
11953 Ins[
Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11960 SDValue NewRoot =
TLI->LowerFormalArguments(
11961 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11965 "LowerFormalArguments didn't return a valid chain!");
11967 "LowerFormalArguments didn't emit the correct number of values!");
11969 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
11971 "LowerFormalArguments emitted a null value!");
11973 "LowerFormalArguments emitted a value with the wrong type!");
11985 MVT VT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11986 MVT RegVT =
TLI->getRegisterType(*
CurDAG->getContext(), VT);
11987 std::optional<ISD::NodeType> AssertOp;
11990 F.getCallingConv(), AssertOp);
11992 MachineFunction&
MF =
SDB->DAG.getMachineFunction();
11993 MachineRegisterInfo&
RegInfo =
MF.getRegInfo();
11995 RegInfo.createVirtualRegister(
TLI->getRegClassFor(RegVT));
11996 FuncInfo->DemoteRegister = SRetReg;
11998 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
12006 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
12007 for (
const Argument &Arg :
F.args()) {
12011 unsigned NumValues = ValueVTs.
size();
12012 if (NumValues == 0)
12019 if (Ins[i].
Flags.isCopyElisionCandidate()) {
12020 unsigned NumParts = 0;
12021 for (EVT VT : ValueVTs)
12022 NumParts +=
TLI->getNumRegistersForCallingConv(*
CurDAG->getContext(),
12023 F.getCallingConv(), VT);
12027 ArrayRef(&InVals[i], NumParts), ArgHasUses);
12032 bool isSwiftErrorArg =
12033 TLI->supportSwiftError() &&
12034 Arg.hasAttribute(Attribute::SwiftError);
12035 if (!ArgHasUses && !isSwiftErrorArg) {
12036 SDB->setUnusedArgValue(&Arg, InVals[i]);
12039 if (FrameIndexSDNode *FI =
12041 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12044 for (
unsigned Val = 0; Val != NumValues; ++Val) {
12045 EVT VT = ValueVTs[Val];
12046 MVT PartVT =
TLI->getRegisterTypeForCallingConv(*
CurDAG->getContext(),
12047 F.getCallingConv(), VT);
12048 unsigned NumParts =
TLI->getNumRegistersForCallingConv(
12049 *
CurDAG->getContext(),
F.getCallingConv(), VT);
12054 if (ArgHasUses || isSwiftErrorArg) {
12055 std::optional<ISD::NodeType> AssertOp;
12056 if (Arg.hasAttribute(Attribute::SExt))
12058 else if (Arg.hasAttribute(Attribute::ZExt))
12063 NewRoot,
F.getCallingConv(), AssertOp);
12066 if (NoFPClass !=
fcNone) {
12068 static_cast<uint64_t
>(NoFPClass), dl, MVT::i32);
12070 OutVal, SDNoFPClass);
12079 if (ArgValues.
empty())
12083 if (FrameIndexSDNode *FI =
12085 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12088 SDB->getCurSDLoc());
12090 SDB->setValue(&Arg, Res);
12100 if (LoadSDNode *LNode =
12102 if (FrameIndexSDNode *FI =
12104 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12132 FuncInfo->InitializeRegForValue(&Arg);
12133 SDB->CopyToExportRegsIfNeeded(&Arg);
12137 if (!Chains.
empty()) {
12144 assert(i == InVals.
size() &&
"Argument register count mismatch!");
12148 if (!ArgCopyElisionFrameIndexMap.
empty()) {
12149 for (MachineFunction::VariableDbgInfo &VI :
12150 MF->getInStackSlotVariableDbgInfo()) {
12151 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
12152 if (
I != ArgCopyElisionFrameIndexMap.
end())
12153 VI.updateStackSlot(
I->second);
12168SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
12169 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12171 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12177 MachineBasicBlock *SuccMBB =
FuncInfo.getMBB(SuccBB);
12181 if (!SuccsHandled.
insert(SuccMBB).second)
12189 for (
const PHINode &PN : SuccBB->phis()) {
12191 if (PN.use_empty())
12195 if (PN.getType()->isEmptyTy())
12199 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12204 RegOut =
FuncInfo.CreateRegs(&PN);
12222 "Didn't codegen value into a register!??");
12232 for (EVT VT : ValueVTs) {
12234 for (
unsigned i = 0; i != NumRegisters; ++i)
12236 Reg += NumRegisters;
12256void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
12258 if (MaybeTC.
getNode() !=
nullptr)
12259 DAG.setRoot(MaybeTC);
12264void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W,
Value *
Cond,
12267 MachineFunction *CurMF =
FuncInfo.MF;
12268 MachineBasicBlock *NextMBB =
nullptr;
12273 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
12275 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12277 if (
Size == 2 &&
W.MBB == SwitchMBB) {
12285 CaseCluster &
Small = *
W.FirstCluster;
12286 CaseCluster &
Big = *
W.LastCluster;
12290 const APInt &SmallValue =
Small.Low->getValue();
12291 const APInt &BigValue =
Big.Low->getValue();
12294 APInt CommonBit = BigValue ^ SmallValue;
12301 DAG.getConstant(CommonBit,
DL, VT));
12303 DL, MVT::i1,
Or,
DAG.getConstant(BigValue | SmallValue,
DL, VT),
12309 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
12311 addSuccessorWithProb(
12312 SwitchMBB, DefaultMBB,
12316 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12323 BrCond =
DAG.getNode(ISD::BR,
DL, MVT::Other, BrCond,
12324 DAG.getBasicBlock(DefaultMBB));
12326 DAG.setRoot(BrCond);
12338 [](
const CaseCluster &a,
const CaseCluster &b) {
12339 return a.Prob != b.Prob ?
12341 a.Low->getValue().slt(b.Low->getValue());
12348 if (
I->Prob >
W.LastCluster->Prob)
12350 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12358 BranchProbability DefaultProb =
W.DefaultProb;
12359 BranchProbability UnhandledProbs = DefaultProb;
12361 UnhandledProbs +=
I->Prob;
12363 MachineBasicBlock *CurMBB =
W.MBB;
12365 bool FallthroughUnreachable =
false;
12366 MachineBasicBlock *Fallthrough;
12367 if (
I ==
W.LastCluster) {
12369 Fallthrough = DefaultMBB;
12374 CurMF->
insert(BBI, Fallthrough);
12378 UnhandledProbs -=
I->Prob;
12383 JumpTableHeader *JTH = &
SL->JTCases[
I->JTCasesIndex].first;
12384 SwitchCG::JumpTable *
JT = &
SL->JTCases[
I->JTCasesIndex].second;
12387 MachineBasicBlock *JumpMBB =
JT->MBB;
12388 CurMF->
insert(BBI, JumpMBB);
12390 auto JumpProb =
I->Prob;
12391 auto FallthroughProb = UnhandledProbs;
12399 if (*SI == DefaultMBB) {
12400 JumpProb += DefaultProb / 2;
12401 FallthroughProb -= DefaultProb / 2;
12419 if (FallthroughUnreachable) {
12426 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12427 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12433 JT->Default = Fallthrough;
12436 if (CurMBB == SwitchMBB) {
12444 BitTestBlock *BTB = &
SL->BitTestCases[
I->BTCasesIndex];
12447 for (BitTestCase &BTC : BTB->
Cases)
12459 BTB->
Prob += DefaultProb / 2;
12463 if (FallthroughUnreachable)
12467 if (CurMBB == SwitchMBB) {
12474 const Value *
RHS, *
LHS, *MHS;
12476 if (
I->Low ==
I->High) {
12491 if (FallthroughUnreachable)
12495 CaseBlock CB(CC,
LHS,
RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12498 if (CurMBB == SwitchMBB)
12501 SL->SwitchCases.push_back(CB);
12506 CurMBB = Fallthrough;
12510void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12511 const SwitchWorkListItem &W,
12514 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12515 "Clusters not sorted?");
12516 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12518 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12519 SL->computeSplitWorkItemInfo(W);
12524 assert(PivotCluster >
W.FirstCluster);
12525 assert(PivotCluster <=
W.LastCluster);
12530 const ConstantInt *Pivot = PivotCluster->Low;
12539 MachineBasicBlock *LeftMBB;
12540 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12541 FirstLeft->Low ==
W.GE &&
12542 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12543 LeftMBB = FirstLeft->MBB;
12545 LeftMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12546 FuncInfo.MF->insert(BBI, LeftMBB);
12548 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12556 MachineBasicBlock *RightMBB;
12557 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12558 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12559 RightMBB = FirstRight->MBB;
12561 RightMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12562 FuncInfo.MF->insert(BBI, RightMBB);
12564 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12570 CaseBlock CB(
ISD::SETLT,
Cond, Pivot,
nullptr, LeftMBB, RightMBB,
W.MBB,
12573 if (
W.MBB == SwitchMBB)
12576 SL->SwitchCases.push_back(CB);
12601 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12609 unsigned PeeledCaseIndex = 0;
12610 bool SwitchPeeled =
false;
12611 for (
unsigned Index = 0;
Index < Clusters.size(); ++
Index) {
12612 CaseCluster &CC = Clusters[
Index];
12613 if (CC.
Prob < TopCaseProb)
12615 TopCaseProb = CC.
Prob;
12616 PeeledCaseIndex =
Index;
12617 SwitchPeeled =
true;
12622 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12623 << TopCaseProb <<
"\n");
12628 MachineBasicBlock *PeeledSwitchMBB =
12630 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12633 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12634 SwitchWorkListItem
W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12635 nullptr,
nullptr, TopCaseProb.
getCompl()};
12636 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12638 Clusters.erase(PeeledCaseIt);
12639 for (CaseCluster &CC : Clusters) {
12641 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12642 << CC.
Prob <<
"\n");
12646 PeeledCaseProb = TopCaseProb;
12647 return PeeledSwitchMBB;
12650void SelectionDAGBuilder::visitSwitch(
const SwitchInst &
SI) {
12652 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12654 Clusters.reserve(
SI.getNumCases());
12655 for (
auto I :
SI.cases()) {
12656 MachineBasicBlock *Succ =
FuncInfo.getMBB(
I.getCaseSuccessor());
12657 const ConstantInt *CaseVal =
I.getCaseValue();
12658 BranchProbability Prob =
12660 : BranchProbability(1,
SI.getNumCases() + 1);
12664 MachineBasicBlock *DefaultMBB =
FuncInfo.getMBB(
SI.getDefaultDest());
12673 MachineBasicBlock *PeeledSwitchMBB =
12674 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12677 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12678 if (Clusters.empty()) {
12679 assert(PeeledSwitchMBB == SwitchMBB);
12681 if (DefaultMBB != NextBlock(SwitchMBB)) {
12688 SL->findJumpTables(Clusters, &SI,
getCurSDLoc(), DefaultMBB,
DAG.getPSI(),
12690 SL->findBitTestClusters(Clusters, &SI);
12693 dbgs() <<
"Case clusters: ";
12694 for (
const CaseCluster &
C : Clusters) {
12700 C.Low->getValue().print(
dbgs(),
true);
12701 if (
C.Low !=
C.High) {
12703 C.High->getValue().print(
dbgs(),
true);
12710 assert(!Clusters.empty());
12714 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12718 DefaultMBB ==
FuncInfo.getMBB(
SI.getDefaultDest()))
12721 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12723 while (!WorkList.
empty()) {
12725 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12730 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12734 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12738void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12739 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12745void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12746 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12751 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12760 SmallVector<int, 8>
Mask;
12762 for (
unsigned i = 0; i != NumElts; ++i)
12763 Mask.push_back(NumElts - 1 - i);
12768void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I,
12777 EVT OutVT = ValueVTs[0];
12781 for (
unsigned i = 0; i != Factor; ++i) {
12782 assert(ValueVTs[i] == OutVT &&
"Expected VTs to be the same");
12784 DAG.getVectorIdxConstant(OutNumElts * i,
DL));
12790 SDValue Even =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12792 SDValue Odd =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12800 DAG.getVTList(ValueVTs), SubVecs);
12804void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I,
12807 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12812 for (
unsigned i = 0; i < Factor; ++i) {
12815 "Expected VTs to be the same");
12833 for (
unsigned i = 0; i < Factor; ++i)
12840void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12844 unsigned NumValues = ValueVTs.
size();
12845 if (NumValues == 0)
return;
12850 for (
unsigned i = 0; i != NumValues; ++i)
12855 DAG.getVTList(ValueVTs), Values));
12858void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12859 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12871 DAG.getSignedConstant(
12878 uint64_t Idx = (NumElts +
Imm) % NumElts;
12881 SmallVector<int, 8>
Mask;
12882 for (
unsigned i = 0; i < NumElts; ++i)
12883 Mask.push_back(Idx + i);
12911 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12912 "start of copy chain MUST be COPY");
12913 Reg =
MI->getOperand(1).getReg();
12916 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12917 MI =
MRI.def_begin(
Reg)->getParent();
12920 if (
MI->getOpcode() == TargetOpcode::COPY) {
12921 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
12922 Reg =
MI->getOperand(1).getReg();
12923 assert(
Reg.isPhysical() &&
"expected COPY of physical register");
12926 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12927 "end of copy chain MUST be INLINEASM_BR");
12937void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12943 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12944 const TargetRegisterInfo *
TRI =
DAG.getSubtarget().getRegisterInfo();
12945 MachineRegisterInfo &
MRI =
DAG.getMachineFunction().getRegInfo();
12953 for (
auto &
T : TargetConstraints) {
12954 SDISelAsmOperandInfo OpInfo(
T);
12962 switch (OpInfo.ConstraintType) {
12973 FuncInfo.MBB->addLiveIn(OriginalDef);
12981 ResultVTs.
push_back(OpInfo.ConstraintVT);
12990 ResultVTs.
push_back(OpInfo.ConstraintVT);
12998 DAG.getVTList(ResultVTs), ResultValues);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
static Value * getCondition(Instruction *I)
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static Type * getValueType(Value *V)
Returns the type of the given value/instruction V.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static void failForInvalidBundles(const CallBase &I, StringRef Name, ArrayRef< uint32_t > AllowedBundles)
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
DenseMap< const Argument *, std::pair< const AllocaInst *, const StoreInst * > > ArgCopyElisionMapTy
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static FPClassTest getNoFPClass(const Instruction &I)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const unsigned char *MatcherTable, unsigned &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
static const fltSemantics & IEEEsingle()
Class for arbitrary precision integers.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This class represents a no-op cast from one type to another.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Base class for variables.
LLVM_ABI std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
A parsed version of the target data layout string in and methods for querying it.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LLVM_ABI DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Lightweight error class with error context and mandatory checking.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
MachineBasicBlock * MBB
MBB - The current block.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHContTarget(bool V=true)
Indicates if this is a target of Windows EH Continuation Guard.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
bool hasEHFunclets() const
void setHasEHContTarget(bool V)
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
DenseMap< const Constant *, Register > ConstantsOut
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
const TargetTransformInfo * TTI
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
SDValue lowerNoFPClassToAssertNoFPClass(SelectionDAG &DAG, const Instruction &I, SDValue Op)
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool canTailCall(const CallBase &CB) const
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void CopyValueToVirtualRegister(const Value *V, Register Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC, const TargetLibraryInfo *li, const TargetTransformInfo &TTI)
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
SDValue getFPOperationRoot(fp::ExceptionBehavior EB)
Return the current virtual root of the Selection DAG, flushing PendingConstrainedFP or PendingConstra...
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineRegisterInfo * RegInfo
std::unique_ptr< SwiftErrorValueTracking > SwiftError
virtual void emitFunctionEntryCode()
std::unique_ptr< SelectionDAGBuilder > SDB
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, const CallInst *CI) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, const CallInst *CI) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitFunctionBasedCheckStackProtector() const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Provides information about what library functions are available for the current target.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned getID() const
Return the register class ID number.
const MCPhysReg * iterator
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static LLVM_ABI std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ LOOP_DEPENDENCE_RAW_MASK
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ SIGN_EXTEND
Conversion operators.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ SSUBO
Same for subtraction.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor to...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor ...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
@ LOOP_DEPENDENCE_WAR_MASK
Set rounding mode.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
auto cast_or_null(const Y &Val)
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
@ Sub
Subtraction of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
@ Default
The result values are uniform if and only if all operands are uniform.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A lightweight accessor for an operand bundle meant to be passed around by value.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
void setUnpredictable(bool b)
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setDeactivationSymbol(GlobalValue *Sym)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
Type * OrigRetTy
Original unlegalized return type.
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)