79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsAMDGPU.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
114#define DEBUG_TYPE "isel"
122 cl::desc(
"Insert the experimental `assertalign` node."),
127 cl::desc(
"Generate low-precision inline sequences "
128 "for some float libcalls"),
134 cl::desc(
"Set the case probability threshold for peeling the case from a "
135 "switch statement. A value greater than 100 will void this "
155 const SDValue *Parts,
unsigned NumParts,
158 std::optional<CallingConv::ID> CC);
167 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
169 std::optional<CallingConv::ID> CC = std::nullopt,
170 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
174 PartVT, ValueVT, CC))
181 assert(NumParts > 0 &&
"No parts to assemble!");
192 unsigned RoundBits = PartBits * RoundParts;
193 EVT RoundVT = RoundBits == ValueBits ?
199 if (RoundParts > 2) {
203 PartVT, HalfVT, V, InChain);
214 if (RoundParts < NumParts) {
216 unsigned OddParts = NumParts - RoundParts;
219 OddVT, V, InChain, CC);
235 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
246 !PartVT.
isVector() &&
"Unexpected split");
258 if (PartEVT == ValueVT)
262 ValueVT.
bitsLT(PartEVT)) {
275 if (ValueVT.
bitsLT(PartEVT)) {
280 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
295 llvm::Attribute::StrictFP)) {
297 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
309 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
310 ValueVT.
bitsLT(PartEVT)) {
319 const Twine &ErrMsg) {
322 return Ctx.emitError(ErrMsg);
325 if (CI->isInlineAsm()) {
327 *CI, ErrMsg +
", possible invalid constraint for vector type"));
330 return Ctx.emitError(
I, ErrMsg);
339 const SDValue *Parts,
unsigned NumParts,
342 std::optional<CallingConv::ID> CallConv) {
344 assert(NumParts > 0 &&
"No parts to assemble!");
345 const bool IsABIRegCopy = CallConv.has_value();
354 unsigned NumIntermediates;
359 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
360 NumIntermediates, RegisterVT);
364 NumIntermediates, RegisterVT);
367 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
369 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
372 "Part type sizes don't match!");
376 if (NumIntermediates == NumParts) {
379 for (
unsigned i = 0; i != NumParts; ++i)
381 V, InChain, CallConv);
382 }
else if (NumParts > 0) {
385 assert(NumParts % NumIntermediates == 0 &&
386 "Must expand into a divisible number of parts!");
387 unsigned Factor = NumParts / NumIntermediates;
388 for (
unsigned i = 0; i != NumIntermediates; ++i)
390 IntermediateVT, V, InChain, CallConv);
405 DL, BuiltVectorTy,
Ops);
411 if (PartEVT == ValueVT)
427 "Cannot narrow, it would be a lossy transformation");
433 if (PartEVT == ValueVT)
458 }
else if (ValueVT.
bitsLT(PartEVT)) {
467 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
498 std::optional<CallingConv::ID> CallConv);
505 unsigned NumParts,
MVT PartVT,
const Value *V,
506 std::optional<CallingConv::ID> CallConv = std::nullopt,
520 unsigned OrigNumParts = NumParts;
522 "Copying to an illegal type!");
528 EVT PartEVT = PartVT;
529 if (PartEVT == ValueVT) {
530 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
539 assert(NumParts == 1 &&
"Do not know what to promote to!");
550 "Unknown mismatch!");
552 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
553 if (PartVT == MVT::x86mmx)
558 assert(NumParts == 1 && PartEVT != ValueVT);
564 "Unknown mismatch!");
567 if (PartVT == MVT::x86mmx)
574 "Failed to tile the value with PartVT!");
577 if (PartEVT != ValueVT) {
579 "scalar-to-vector conversion failed");
588 if (NumParts & (NumParts - 1)) {
591 "Do not know what to expand to!");
593 unsigned RoundBits = RoundParts * PartBits;
594 unsigned OddParts = NumParts - RoundParts;
603 std::reverse(Parts + RoundParts, Parts + NumParts);
605 NumParts = RoundParts;
617 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
618 for (
unsigned i = 0; i < NumParts; i += StepSize) {
619 unsigned ThisBits = StepSize * PartBits / 2;
622 SDValue &Part1 = Parts[i+StepSize/2];
629 if (ThisBits == PartBits && ThisVT != PartVT) {
637 std::reverse(Parts, Parts + OrigNumParts);
659 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
661 "Cannot widen to illegal type");
665 }
else if (PartEVT != ValueEVT) {
680 Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
691 std::optional<CallingConv::ID> CallConv) {
695 const bool IsABIRegCopy = CallConv.has_value();
698 EVT PartEVT = PartVT;
699 if (PartEVT == ValueVT) {
745 "lossy conversion of vector to scalar type");
760 unsigned NumIntermediates;
764 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
769 NumIntermediates, RegisterVT);
772 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
774 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
777 "Mixing scalable and fixed vectors when copying in parts");
779 std::optional<ElementCount> DestEltCnt;
789 if (ValueVT == BuiltVectorTy) {
813 for (
unsigned i = 0; i != NumIntermediates; ++i) {
828 if (NumParts == NumIntermediates) {
831 for (
unsigned i = 0; i != NumParts; ++i)
833 }
else if (NumParts > 0) {
836 assert(NumIntermediates != 0 &&
"division by zero");
837 assert(NumParts % NumIntermediates == 0 &&
838 "Must expand into a divisible number of parts!");
839 unsigned Factor = NumParts / NumIntermediates;
840 for (
unsigned i = 0; i != NumIntermediates; ++i)
848 if (
I.hasOperandBundlesOtherThan(AllowedBundles)) {
852 for (
unsigned i = 0, e =
I.getNumOperandBundles(); i != e; ++i) {
855 OS << LS << U.getTagName();
858 Twine(
"cannot lower ", Name)
864 EVT valuevt, std::optional<CallingConv::ID> CC)
870 std::optional<CallingConv::ID> CC) {
884 for (
unsigned i = 0; i != NumRegs; ++i)
885 Regs.push_back(Reg + i);
886 RegVTs.push_back(RegisterVT);
888 Reg = Reg.id() + NumRegs;
915 for (
unsigned i = 0; i != NumRegs; ++i) {
921 *Glue =
P.getValue(2);
924 Chain =
P.getValue(1);
952 EVT FromVT(MVT::Other);
956 }
else if (NumSignBits > 1) {
964 assert(FromVT != MVT::Other);
970 RegisterVT, ValueVT, V, Chain,
CallConv);
986 unsigned NumRegs =
Regs.size();
1000 NumParts, RegisterVT, V,
CallConv, ExtendKind);
1006 for (
unsigned i = 0; i != NumRegs; ++i) {
1018 if (NumRegs == 1 || Glue)
1029 Chain = Chains[NumRegs-1];
1035 unsigned MatchingIdx,
const SDLoc &dl,
1037 std::vector<SDValue> &
Ops)
const {
1042 Flag.setMatchingOp(MatchingIdx);
1043 else if (!
Regs.empty() &&
Regs.front().isVirtual()) {
1051 Flag.setRegClass(RC->
getID());
1062 "No 1:1 mapping from clobbers to regs?");
1065 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1070 "If we clobbered the stack pointer, MFI should know about it.");
1079 for (
unsigned i = 0; i != NumRegs; ++i) {
1080 assert(Reg <
Regs.size() &&
"Mismatch in # registers expected");
1092 unsigned RegCount = std::get<0>(CountAndVT);
1093 MVT RegisterVT = std::get<1>(CountAndVT);
1111 SL->init(
DAG.getTargetLoweringInfo(), TM,
DAG.getDataLayout());
1113 *
DAG.getMachineFunction().getFunction().getParent());
1118 UnusedArgNodeMap.clear();
1120 PendingExports.clear();
1121 PendingConstrainedFP.clear();
1122 PendingConstrainedFPStrict.clear();
1130 DanglingDebugInfoMap.clear();
1137 if (Pending.
empty())
1143 unsigned i = 0, e = Pending.
size();
1144 for (; i != e; ++i) {
1146 if (Pending[i].
getNode()->getOperand(0) == Root)
1154 if (Pending.
size() == 1)
1181 if (!PendingConstrainedFPStrict.empty()) {
1182 assert(PendingConstrainedFP.empty());
1183 updateRoot(PendingConstrainedFPStrict);
1196 if (!PendingConstrainedFP.empty()) {
1197 assert(PendingConstrainedFPStrict.empty());
1198 updateRoot(PendingConstrainedFP);
1202 return DAG.getRoot();
1210 PendingConstrainedFP.size() +
1211 PendingConstrainedFPStrict.size());
1213 PendingConstrainedFP.end());
1214 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1215 PendingConstrainedFPStrict.end());
1216 PendingConstrainedFP.clear();
1217 PendingConstrainedFPStrict.clear();
1224 PendingExports.append(PendingConstrainedFPStrict.begin(),
1225 PendingConstrainedFPStrict.end());
1226 PendingConstrainedFPStrict.clear();
1227 return updateRoot(PendingExports);
1234 assert(Variable &&
"Missing variable");
1241 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1257 if (IsParameter && FINode) {
1259 SDV =
DAG.getFrameIndexDbgValue(Variable,
Expression, FINode->getIndex(),
1260 true,
DL, SDNodeOrder);
1265 FuncArgumentDbgValueKind::Declare,
N);
1268 SDV =
DAG.getDbgValue(Variable,
Expression,
N.getNode(),
N.getResNo(),
1269 true,
DL, SDNodeOrder);
1271 DAG.AddDbgValue(SDV, IsParameter);
1276 FuncArgumentDbgValueKind::Declare,
N)) {
1278 <<
" (could not emit func-arg dbg_value)\n");
1289 for (
auto It = FnVarLocs->locs_begin(&
I), End = FnVarLocs->locs_end(&
I);
1291 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1293 if (It->Values.isKillLocation(It->Expr)) {
1299 It->Values.hasArgList())) {
1302 FnVarLocs->getDILocalVariable(It->VariableID),
1303 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1316 bool SkipDbgVariableRecords =
DAG.getFunctionVarLocs();
1319 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1321 assert(DLR->getLabel() &&
"Missing label");
1323 DAG.getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1324 DAG.AddDbgLabel(SDV);
1328 if (SkipDbgVariableRecords)
1336 if (
FuncInfo.PreprocessedDVRDeclares.contains(&DVR))
1338 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1347 if (Values.
empty()) {
1364 SDNodeOrder, IsVariadic)) {
1375 if (
I.isTerminator()) {
1376 HandlePHINodesInSuccessorBlocks(
I.getParent());
1383 bool NodeInserted =
false;
1384 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1385 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1386 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1387 if (PCSectionsMD || MMRA) {
1388 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1389 DAG, [&](
SDNode *) { NodeInserted =
true; });
1399 if (PCSectionsMD || MMRA) {
1400 auto It = NodeMap.find(&
I);
1401 if (It != NodeMap.end()) {
1403 DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1405 DAG.addMMRAMetadata(It->second.getNode(), MMRA);
1406 }
else if (NodeInserted) {
1409 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1410 <<
I.getModule()->getName() <<
"]\n";
1419void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1429#define HANDLE_INST(NUM, OPCODE, CLASS) \
1430 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1431#include "llvm/IR/Instruction.def"
1443 for (
const Value *V : Values) {
1468 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1473 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1474 DIVariable *DanglingVariable = DDI.getVariable();
1476 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1478 << printDDI(
nullptr, DDI) <<
"\n");
1484 for (
auto &DDIMI : DanglingDebugInfoMap) {
1485 DanglingDebugInfoVector &DDIV = DDIMI.second;
1489 for (
auto &DDI : DDIV)
1490 if (isMatchingDbgValue(DDI))
1493 erase_if(DDIV, isMatchingDbgValue);
1501 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1502 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1505 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1506 for (
auto &DDI : DDIV) {
1508 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1511 assert(Variable->isValidLocationForIntrinsic(
DL) &&
1512 "Expected inlined-at fields to agree");
1522 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1523 FuncArgumentDbgValueKind::Value, Val)) {
1525 << printDDI(V, DDI) <<
"\n");
1532 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1533 << ValSDNodeOrder <<
"\n");
1534 SDV = getDbgValue(Val, Variable, Expr,
DL,
1535 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1536 DAG.AddDbgValue(SDV,
false);
1540 <<
" in EmitFuncArgumentDbgValue\n");
1542 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1546 DAG.getConstantDbgValue(Variable, Expr,
Poison,
DL, DbgSDNodeOrder);
1547 DAG.AddDbgValue(SDV,
false);
1554 DanglingDebugInfo &DDI) {
1559 const Value *OrigV = V;
1563 unsigned SDOrder = DDI.getSDNodeOrder();
1567 bool StackValue =
true;
1592 if (!AdditionalValues.
empty())
1602 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1603 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1611 assert(OrigV &&
"V shouldn't be null");
1613 auto *SDV =
DAG.getConstantDbgValue(Var, Expr,
Poison,
DL, SDNodeOrder);
1614 DAG.AddDbgValue(SDV,
false);
1616 << printDDI(OrigV, DDI) <<
"\n");
1633 unsigned Order,
bool IsVariadic) {
1638 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1643 for (
const Value *V : Values) {
1653 if (CE->getOpcode() == Instruction::IntToPtr) {
1672 N = UnusedArgNodeMap[V];
1677 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1678 FuncArgumentDbgValueKind::Value,
N))
1705 bool IsParamOfFunc =
1713 auto VMI =
FuncInfo.ValueMap.find(V);
1714 if (VMI !=
FuncInfo.ValueMap.end()) {
1719 V->getType(), std::nullopt);
1725 unsigned BitsToDescribe = 0;
1727 BitsToDescribe = *VarSize;
1729 BitsToDescribe = Fragment->SizeInBits;
1732 if (
Offset >= BitsToDescribe)
1735 unsigned RegisterSize = RegAndSize.second;
1736 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1737 ? BitsToDescribe -
Offset
1740 Expr,
Offset, FragmentSize);
1744 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1745 DAG.AddDbgValue(SDV,
false);
1761 DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1762 false, DbgLoc, Order, IsVariadic);
1763 DAG.AddDbgValue(SDV,
false);
1769 for (
auto &Pair : DanglingDebugInfoMap)
1770 for (
auto &DDI : Pair.second)
1781 if (It !=
FuncInfo.ValueMap.end()) {
1785 DAG.getDataLayout(), InReg, Ty,
1802 if (
N.getNode())
return N;
1862 return DAG.getSplatBuildVector(
1865 return DAG.getConstant(*CI,
DL, VT);
1877 getValue(CPA->getAddrDiscriminator()),
1878 getValue(CPA->getDiscriminator()));
1894 visit(CE->getOpcode(), *CE);
1896 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1902 for (
const Use &U :
C->operands()) {
1908 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1909 Constants.push_back(
SDValue(Val, i));
1918 for (
uint64_t i = 0, e = CDS->getNumElements(); i != e; ++i) {
1922 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1931 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1933 "Unknown struct or array constant!");
1937 unsigned NumElts = ValueVTs.
size();
1941 for (
unsigned i = 0; i != NumElts; ++i) {
1942 EVT EltVT = ValueVTs[i];
1944 Constants[i] =
DAG.getUNDEF(EltVT);
1955 return DAG.getBlockAddress(BA, VT);
1958 return getValue(Equiv->getGlobalValue());
1963 if (VT == MVT::aarch64svcount) {
1964 assert(
C->isNullValue() &&
"Can only zero this target type!");
1970 assert(
C->isNullValue() &&
"Can only zero this target type!");
1987 for (
unsigned i = 0; i != NumElements; ++i)
2015 return DAG.getFrameIndex(
2023 Inst->getType(), std::nullopt);
2037void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
2050 if (IsMSVCCXX || IsCoreCLR)
2056 MachineBasicBlock *TargetMBB =
FuncInfo.getMBB(
I.getSuccessor());
2057 FuncInfo.MBB->addSuccessor(TargetMBB);
2064 if (TargetMBB != NextBlock(
FuncInfo.MBB) ||
2073 DAG.getMachineFunction().setHasEHContTarget(
true);
2079 Value *ParentPad =
I.getCatchSwitchParentPad();
2082 SuccessorColor = &
FuncInfo.Fn->getEntryBlock();
2085 assert(SuccessorColor &&
"No parent funclet for catchret!");
2086 MachineBasicBlock *SuccessorColorMBB =
FuncInfo.getMBB(SuccessorColor);
2087 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2092 DAG.getBasicBlock(SuccessorColorMBB));
2096void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2102 FuncInfo.MBB->setIsEHFuncletEntry();
2103 FuncInfo.MBB->setIsCleanupFuncletEntry();
2132 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2138 UnwindDests.emplace_back(FuncInfo.
getMBB(EHPadBB), Prob);
2139 UnwindDests.back().first->setIsEHScopeEntry();
2142 UnwindDests.back().first->setIsEHFuncletEntry();
2146 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2147 UnwindDests.emplace_back(FuncInfo.
getMBB(CatchPadBB), Prob);
2149 if (IsMSVCCXX || IsCoreCLR)
2150 UnwindDests.back().first->setIsEHFuncletEntry();
2152 UnwindDests.back().first->setIsEHScopeEntry();
2154 NewEHPadBB = CatchSwitch->getUnwindDest();
2160 if (BPI && NewEHPadBB)
2162 EHPadBB = NewEHPadBB;
2169 auto UnwindDest =
I.getUnwindDest();
2170 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
2171 BranchProbability UnwindDestProb =
2176 for (
auto &UnwindDest : UnwindDests) {
2177 UnwindDest.first->setIsEHPad();
2178 addSuccessorWithProb(
FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2180 FuncInfo.MBB->normalizeSuccProbs();
2183 MachineBasicBlock *CleanupPadMBB =
2184 FuncInfo.getMBB(
I.getCleanupPad()->getParent());
2190void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2194void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2195 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
2196 auto &
DL =
DAG.getDataLayout();
2208 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2225 SmallVector<uint64_t, 4>
Offsets;
2228 unsigned NumValues = ValueVTs.
size();
2231 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2232 for (
unsigned i = 0; i != NumValues; ++i) {
2239 if (MemVTs[i] != ValueVTs[i])
2241 Chains[i] =
DAG.getStore(
2249 MVT::Other, Chains);
2250 }
else if (
I.getNumOperands() != 0) {
2253 unsigned NumValues =
Types.size();
2257 const Function *
F =
I.getParent()->getParent();
2260 I.getOperand(0)->getType(),
F->getCallingConv(),
2264 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2266 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2269 LLVMContext &
Context =
F->getContext();
2270 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2272 for (
unsigned j = 0;
j != NumValues; ++
j) {
2285 &Parts[0], NumParts, PartVT, &
I, CC, ExtendKind);
2288 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2292 if (
I.getOperand(0)->getType()->isPointerTy()) {
2294 Flags.setPointerAddrSpace(
2298 if (NeedsRegBlock) {
2299 Flags.setInConsecutiveRegs();
2300 if (j == NumValues - 1)
2301 Flags.setInConsecutiveRegsLast();
2309 else if (
F->getAttributes().hasRetAttr(Attribute::NoExt))
2312 for (
unsigned i = 0; i < NumParts; ++i) {
2315 VT, Types[j], 0, 0));
2325 const Function *
F =
I.getParent()->getParent();
2327 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2329 ISD::ArgFlagsTy
Flags = ISD::ArgFlagsTy();
2330 Flags.setSwiftError();
2342 bool isVarArg =
DAG.getMachineFunction().getFunction().isVarArg();
2344 DAG.getMachineFunction().getFunction().getCallingConv();
2345 Chain =
DAG.getTargetLoweringInfo().LowerReturn(
2350 "LowerReturn didn't return a valid chain!");
2361 if (V->getType()->isEmptyTy())
2365 if (VMI !=
FuncInfo.ValueMap.end()) {
2367 "Unused value assigned virtual registers!");
2380 if (
FuncInfo.isExportedInst(V))
return;
2392 if (VI->getParent() == FromBB)
2418 const BasicBlock *SrcBB = Src->getBasicBlock();
2419 const BasicBlock *DstBB = Dst->getBasicBlock();
2423 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2433 Src->addSuccessorWithoutProb(Dst);
2436 Prob = getEdgeProbability(Src, Dst);
2437 Src->addSuccessor(Dst, Prob);
2443 return I->getParent() == BB;
2467 if (CurBB == SwitchBB ||
2473 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2478 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2480 if (FC->hasNoNaNs() ||
2488 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2490 SL->SwitchCases.push_back(CB);
2499 SL->SwitchCases.push_back(CB);
2507 unsigned Depth = 0) {
2516 if (Necessary !=
nullptr) {
2519 if (Necessary->contains(
I))
2547 if (BPI !=
nullptr) {
2553 std::optional<bool> Likely;
2556 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2560 if (
Opc == (*Likely ? Instruction::And : Instruction::Or))
2572 if (CostThresh <= 0)
2593 Value *BrCond =
I.getCondition();
2594 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2595 for (
const auto *U : Ins->users()) {
2598 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2611 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2613 for (
const auto &InsPair : RhsDeps) {
2614 if (!ShouldCountInsn(InsPair.first)) {
2615 ToDrop = InsPair.first;
2619 if (ToDrop ==
nullptr)
2621 RhsDeps.erase(ToDrop);
2624 for (
const auto &InsPair : RhsDeps) {
2629 CostOfIncluding +=
TTI->getInstructionCost(
2632 if (CostOfIncluding > CostThresh)
2658 const Value *BOpOp0, *BOpOp1;
2672 if (BOpc == Instruction::And)
2673 BOpc = Instruction::Or;
2674 else if (BOpc == Instruction::Or)
2675 BOpc = Instruction::And;
2681 bool BOpIsInOrAndTree = BOpc && BOpc ==
Opc && BOp->
hasOneUse();
2686 TProb, FProb, InvertCond);
2696 if (
Opc == Instruction::Or) {
2717 auto NewTrueProb = TProb / 2;
2718 auto NewFalseProb = TProb / 2 + FProb;
2721 NewFalseProb, InvertCond);
2728 Probs[1], InvertCond);
2730 assert(
Opc == Instruction::And &&
"Unknown merge op!");
2750 auto NewTrueProb = TProb + FProb / 2;
2751 auto NewFalseProb = FProb / 2;
2754 NewFalseProb, InvertCond);
2761 Probs[1], InvertCond);
2770 if (Cases.size() != 2)
return true;
2774 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2775 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2776 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2777 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2783 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2784 Cases[0].CC == Cases[1].CC &&
2787 if (Cases[0].CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2789 if (Cases[0].CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2796void SelectionDAGBuilder::visitUncondBr(
const UncondBrInst &
I) {
2806 if (Succ0MBB != NextBlock(BrMBB) ||
2815void SelectionDAGBuilder::visitCondBr(
const CondBrInst &
I) {
2816 MachineBasicBlock *BrMBB =
FuncInfo.MBB;
2818 MachineBasicBlock *Succ0MBB =
FuncInfo.getMBB(
I.getSuccessor(0));
2822 const Value *CondVal =
I.getCondition();
2823 MachineBasicBlock *Succ1MBB =
FuncInfo.getMBB(
I.getSuccessor(1));
2842 bool IsUnpredictable =
I.hasMetadata(LLVMContext::MD_unpredictable);
2844 if (!
DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2847 const Value *BOp0, *BOp1;
2850 Opcode = Instruction::And;
2852 Opcode = Instruction::Or;
2859 DAG.getTargetLoweringInfo().getJumpConditionMergingParams(
2860 Opcode, BOp0, BOp1))) {
2862 getEdgeProbability(BrMBB, Succ0MBB),
2863 getEdgeProbability(BrMBB, Succ1MBB),
2868 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2872 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2879 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2885 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2886 FuncInfo.MF->erase(
SL->SwitchCases[i].ThisBB);
2888 SL->SwitchCases.clear();
2894 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc(),
2915 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2922 auto &TLI =
DAG.getTargetLoweringInfo();
2946 Cond =
DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.
CC);
2958 Cond =
DAG.getSetCC(dl, MVT::i1, CmpOp,
DAG.getConstant(
High, dl, VT),
2962 VT, CmpOp,
DAG.getConstant(
Low, dl, VT));
2963 Cond =
DAG.getSetCC(dl, MVT::i1, SUB,
2978 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2994 BrCond =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrCond,
2997 DAG.setRoot(BrCond);
3003 assert(JT.
SL &&
"Should set SDLoc for SelectionDAG!");
3004 assert(JT.
Reg &&
"Should lower JT Header first!");
3005 EVT PTy =
DAG.getTargetLoweringInfo().getJumpTableRegTy(
DAG.getDataLayout());
3009 Index.getValue(1), Table, Index);
3010 DAG.setRoot(BrJumpTable);
3018 assert(JT.
SL &&
"Should set SDLoc for SelectionDAG!");
3025 DAG.getConstant(JTH.
First, dl, VT));
3040 JT.
Reg = JumpTableReg;
3048 Sub.getValueType()),
3052 MVT::Other, CopyTo, CMP,
3056 if (JT.
MBB != NextBlock(SwitchBB))
3057 BrCond =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrCond,
3058 DAG.getBasicBlock(JT.
MBB));
3060 DAG.setRoot(BrCond);
3063 if (JT.
MBB != NextBlock(SwitchBB))
3065 DAG.getBasicBlock(JT.
MBB)));
3067 DAG.setRoot(CopyTo);
3091 if (PtrTy != PtrMemTy)
3107 auto &
DL =
DAG.getDataLayout();
3116 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3123 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3137 assert(GuardCheckFn &&
"Guard check function is null");
3148 Entry.IsInReg =
true;
3149 Args.push_back(Entry);
3155 getValue(GuardCheckFn), std::move(Args));
3157 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3158 DAG.setRoot(Result.second);
3170 Guard =
DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
3176 Guard =
DAG.getPOISON(PtrMemTy);
3218 auto &
DL =
DAG.getDataLayout();
3226 SDValue StackSlotPtr =
DAG.getFrameIndex(FI, PtrTy);
3232 PtrMemTy, dl,
DAG.getEntryNode(), StackSlotPtr,
3247 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3248 Entry.IsInReg =
true;
3249 Args.push_back(Entry);
3255 getValue(GuardCheckFn), std::move(Args));
3261 Chain = TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3284 DAG.getNode(
ISD::SUB, dl, VT, SwitchOp,
DAG.getConstant(
B.First, dl, VT));
3288 bool UsePtrType =
false;
3312 if (!
B.FallthroughUnreachable)
3313 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3314 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3318 if (!
B.FallthroughUnreachable) {
3327 DAG.getBasicBlock(
B.Default));
3331 if (
MBB != NextBlock(SwitchBB))
3349 if (PopCount == 1) {
3356 }
else if (PopCount == BB.
Range) {
3364 DAG.getConstant(1, dl, VT), ShiftOp);
3368 VT, SwitchVal,
DAG.getConstant(
B.Mask, dl, VT));
3375 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3377 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3385 Cmp,
DAG.getBasicBlock(
B.TargetBB));
3388 if (NextMBB != NextBlock(SwitchBB))
3389 BrAnd =
DAG.getNode(
ISD::BR, dl, MVT::Other, BrAnd,
3390 DAG.getBasicBlock(NextMBB));
3395void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3413 const Value *Callee(
I.getCalledOperand());
3416 visitInlineAsm(
I, EHPadBB);
3421 case Intrinsic::donothing:
3423 case Intrinsic::seh_try_begin:
3424 case Intrinsic::seh_scope_begin:
3425 case Intrinsic::seh_try_end:
3426 case Intrinsic::seh_scope_end:
3432 case Intrinsic::experimental_patchpoint_void:
3433 case Intrinsic::experimental_patchpoint:
3434 visitPatchpoint(
I, EHPadBB);
3436 case Intrinsic::experimental_gc_statepoint:
3442 case Intrinsic::wasm_throw: {
3444 std::array<SDValue, 4>
Ops = {
3455 case Intrinsic::wasm_rethrow: {
3456 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3457 std::array<SDValue, 2>
Ops = {
3466 }
else if (
I.hasDeoptState()) {
3487 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
3488 BranchProbability EHPadBBProb =
3494 addSuccessorWithProb(InvokeMBB, Return);
3495 for (
auto &UnwindDest : UnwindDests) {
3496 UnwindDest.first->setIsEHPad();
3497 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3503 DAG.getBasicBlock(Return)));
3512void SelectionDAGBuilder::visitCallBrIntrinsic(
const CallBrInst &
I) {
3515 DAG.getTargetLoweringInfo().getTgtMemIntrinsic(
3516 Infos,
I,
DAG.getMachineFunction(),
I.getIntrinsicID());
3517 assert(Infos.
empty() &&
"Intrinsic touches memory");
3520 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(
I);
3523 getTargetIntrinsicOperands(
I, HasChain, OnlyLoad);
3524 SDVTList VTs = getTargetIntrinsicVTList(
I, HasChain);
3528 getTargetNonMemIntrinsicNode(*
I.getType(), HasChain,
Ops, VTs);
3529 Result = handleTargetIntrinsicRet(
I, HasChain, OnlyLoad, Result);
3534void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3535 MachineBasicBlock *CallBrMBB =
FuncInfo.MBB;
3537 if (
I.isInlineAsm()) {
3544 assert(!
I.hasOperandBundles() &&
3545 "Can't have operand bundles for intrinsics");
3546 visitCallBrIntrinsic(
I);
3551 SmallPtrSet<BasicBlock *, 8> Dests;
3552 Dests.
insert(
I.getDefaultDest());
3562 if (
I.isInlineAsm()) {
3563 for (BasicBlock *Dest :
I.getIndirectDests()) {
3565 Target->setIsInlineAsmBrIndirectTarget();
3571 Target->setLabelMustBeEmitted();
3573 if (Dests.
insert(Dest).second)
3582 DAG.getBasicBlock(Return)));
3585void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3586 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3589void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3591 "Call to landingpad not in landing pad!");
3595 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
3611 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3616 if (
FuncInfo.ExceptionPointerVirtReg) {
3617 Ops[0] =
DAG.getZExtOrTrunc(
3618 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3625 Ops[1] =
DAG.getZExtOrTrunc(
3626 DAG.getCopyFromReg(
DAG.getEntryNode(), dl,
3633 DAG.getVTList(ValueVTs),
Ops);
3641 if (JTB.first.HeaderBB ==
First)
3642 JTB.first.HeaderBB =
Last;
3655 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3657 bool Inserted =
Done.insert(BB).second;
3662 addSuccessorWithProb(IndirectBrMBB, Succ);
3672 if (!
I.shouldLowerToTrap(
DAG.getTarget().Options.TrapUnreachable,
3673 DAG.getTarget().Options.NoTrapAfterNoreturn))
3679void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3682 Flags.copyFMF(*FPOp);
3690void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3693 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3694 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3697 Flags.setExact(ExactOp->isExact());
3699 Flags.setDisjoint(DisjointOp->isDisjoint());
3701 Flags.copyFMF(*FPOp);
3710void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3714 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
3719 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3721 "Unexpected shift type");
3731 if (
const OverflowingBinaryOperator *OFBinOp =
3733 nuw = OFBinOp->hasNoUnsignedWrap();
3734 nsw = OFBinOp->hasNoSignedWrap();
3736 if (
const PossiblyExactOperator *ExactOp =
3738 exact = ExactOp->isExact();
3741 Flags.setExact(exact);
3742 Flags.setNoSignedWrap(nsw);
3743 Flags.setNoUnsignedWrap(nuw);
3749void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3760void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3766 auto &TLI =
DAG.getTargetLoweringInfo();
3779 Flags.setSameSign(
I.hasSameSign());
3781 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3787void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3794 if (FPMO->hasNoNaNs() ||
3795 (
DAG.isKnownNeverNaN(Op1) &&
DAG.isKnownNeverNaN(Op2)))
3799 Flags.copyFMF(*FPMO);
3801 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3811 return isa<SelectInst>(V);
3815void SelectionDAGBuilder::visitSelect(
const User &
I) {
3819 unsigned NumValues = ValueVTs.
size();
3820 if (NumValues == 0)
return;
3830 bool IsUnaryAbs =
false;
3831 bool Negate =
false;
3835 Flags.copyFMF(*FPOp);
3837 Flags.setUnpredictable(
3842 EVT VT = ValueVTs[0];
3843 LLVMContext &Ctx = *
DAG.getContext();
3844 auto &TLI =
DAG.getTargetLoweringInfo();
3854 bool UseScalarMinMax = VT.
isVector() &&
3863 switch (SPR.Flavor) {
3872 switch (SPR.NaNBehavior) {
3877 Flags.setNoSignedZeros(
true);
3891 switch (SPR.NaNBehavior) {
3896 Flags.setNoSignedZeros(
true);
3938 for (
unsigned i = 0; i != NumValues; ++i) {
3944 Values[i] =
DAG.getNegative(Values[i], dl, VT);
3947 for (
unsigned i = 0; i != NumValues; ++i) {
3951 Values[i] =
DAG.getNode(
3958 DAG.getVTList(ValueVTs), Values));
3961void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3964 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
3968 Flags.setNoSignedWrap(Trunc->hasNoSignedWrap());
3969 Flags.setNoUnsignedWrap(Trunc->hasNoUnsignedWrap());
3975void SelectionDAGBuilder::visitZExt(
const User &
I) {
3979 auto &TLI =
DAG.getTargetLoweringInfo();
3984 Flags.setNonNeg(PNI->hasNonNeg());
3989 if (
Flags.hasNonNeg() &&
3998void SelectionDAGBuilder::visitSExt(
const User &
I) {
4002 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4007void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
4013 Flags.copyFMF(*FPOp);
4014 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4017 DAG.getTargetConstant(
4022void SelectionDAGBuilder::visitFPExt(
const User &
I) {
4025 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4029 Flags.copyFMF(*FPOp);
4033void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
4036 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4041void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
4044 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4049void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
4052 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4056 Flags.setNonNeg(PNI->hasNonNeg());
4061void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
4064 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4069void SelectionDAGBuilder::visitPtrToAddr(
const User &
I) {
4072 const auto &TLI =
DAG.getTargetLoweringInfo();
4080void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
4084 auto &TLI =
DAG.getTargetLoweringInfo();
4085 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4094void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
4098 auto &TLI =
DAG.getTargetLoweringInfo();
4106void SelectionDAGBuilder::visitBitCast(
const User &
I) {
4109 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
4114 if (DestVT !=
N.getValueType())
4122 setValue(&
I,
DAG.getConstant(
C->getValue(), dl, DestVT,
false,
4128void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
4129 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4130 const Value *SV =
I.getOperand(0);
4135 unsigned DestAS =
I.getType()->getPointerAddressSpace();
4137 if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
4143void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
4144 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4151 InVec, InVal, InIdx));
4154void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
4155 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4164void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4169 Mask = SVI->getShuffleMask();
4173 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4181 DAG.getVectorIdxConstant(0,
DL));
4192 unsigned MaskNumElts =
Mask.size();
4194 if (SrcNumElts == MaskNumElts) {
4200 if (SrcNumElts < MaskNumElts) {
4204 if (MaskNumElts % SrcNumElts == 0) {
4208 unsigned NumConcat = MaskNumElts / SrcNumElts;
4209 bool IsConcat =
true;
4210 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
4211 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4217 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
4218 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4219 ConcatSrcs[i / SrcNumElts] != (
int)(Idx / SrcNumElts))) {
4224 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
4231 for (
auto Src : ConcatSrcs) {
4244 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4245 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4261 SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
4262 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4264 if (Idx >= (
int)SrcNumElts)
4265 Idx -= SrcNumElts - PaddedMaskNumElts;
4273 if (MaskNumElts != PaddedMaskNumElts)
4275 DAG.getVectorIdxConstant(0,
DL));
4281 assert(SrcNumElts > MaskNumElts);
4285 int StartIdx[2] = {-1, -1};
4286 bool CanExtract =
true;
4287 for (
int Idx : Mask) {
4292 if (Idx >= (
int)SrcNumElts) {
4300 int NewStartIdx =
alignDown(Idx, MaskNumElts);
4301 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4302 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4306 StartIdx[Input] = NewStartIdx;
4309 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4315 for (
unsigned Input = 0; Input < 2; ++Input) {
4316 SDValue &Src = Input == 0 ? Src1 : Src2;
4317 if (StartIdx[Input] < 0)
4318 Src =
DAG.getUNDEF(VT);
4321 DAG.getVectorIdxConstant(StartIdx[Input],
DL));
4326 SmallVector<int, 8> MappedOps(Mask);
4327 for (
int &Idx : MappedOps) {
4328 if (Idx >= (
int)SrcNumElts)
4329 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4334 setValue(&
I,
DAG.getVectorShuffle(VT,
DL, Src1, Src2, MappedOps));
4343 for (
int Idx : Mask) {
4347 Res =
DAG.getUNDEF(EltVT);
4349 SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
4350 if (Idx >= (
int)SrcNumElts) Idx -= SrcNumElts;
4353 DAG.getVectorIdxConstant(Idx,
DL));
4363 ArrayRef<unsigned> Indices =
I.getIndices();
4364 const Value *Op0 =
I.getOperand(0);
4366 Type *AggTy =
I.getType();
4373 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4379 unsigned NumAggValues = AggValueVTs.
size();
4380 unsigned NumValValues = ValValueVTs.
size();
4384 if (!NumAggValues) {
4392 for (; i != LinearIndex; ++i)
4393 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4398 for (; i != LinearIndex + NumValValues; ++i)
4399 Values[i] = FromUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4403 for (; i != NumAggValues; ++i)
4404 Values[i] = IntoUndef ?
DAG.getUNDEF(AggValueVTs[i]) :
4408 DAG.getVTList(AggValueVTs), Values));
4412 ArrayRef<unsigned> Indices =
I.getIndices();
4413 const Value *Op0 =
I.getOperand(0);
4415 Type *ValTy =
I.getType();
4420 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4424 unsigned NumValValues = ValValueVTs.
size();
4427 if (!NumValValues) {
4436 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4437 Values[i - LinearIndex] =
4443 DAG.getVTList(ValValueVTs), Values));
4446void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4447 Value *Op0 =
I.getOperand(0);
4453 auto &TLI =
DAG.getTargetLoweringInfo();
4458 bool IsVectorGEP =
I.getType()->isVectorTy();
4459 ElementCount VectorElementCount =
4465 const Value *Idx = GTI.getOperand();
4466 if (StructType *StTy = GTI.getStructTypeOrNull()) {
4471 DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(
Field);
4481 N =
DAG.getMemBasePlusOffset(
4482 N,
DAG.getConstant(
Offset, dl,
N.getValueType()), dl, Flags);
4488 unsigned IdxSize =
DAG.getDataLayout().getIndexSizeInBits(AS);
4490 TypeSize ElementSize =
4491 GTI.getSequentialElementStride(
DAG.getDataLayout());
4496 bool ElementScalable = ElementSize.
isScalable();
4502 C =
C->getSplatValue();
4505 if (CI && CI->isZero())
4507 if (CI && !ElementScalable) {
4508 APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4511 if (
N.getValueType().isVector())
4512 OffsVal =
DAG.getConstant(
4515 OffsVal =
DAG.getConstant(Offs, dl, IdxTy);
4522 Flags.setNoUnsignedWrap(
true);
4525 OffsVal =
DAG.getSExtOrTrunc(OffsVal, dl,
N.getValueType());
4527 N =
DAG.getMemBasePlusOffset(
N, OffsVal, dl, Flags);
4535 if (
N.getValueType().isVector()) {
4537 VectorElementCount);
4538 IdxN =
DAG.getSplat(VT, dl, IdxN);
4542 N =
DAG.getSplat(VT, dl,
N);
4548 IdxN =
DAG.getSExtOrTrunc(IdxN, dl,
N.getValueType());
4550 SDNodeFlags ScaleFlags;
4559 if (ElementScalable) {
4560 EVT VScaleTy =
N.getValueType().getScalarType();
4563 DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4564 if (
N.getValueType().isVector())
4565 VScale =
DAG.getSplatVector(
N.getValueType(), dl, VScale);
4566 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, VScale,
4571 if (ElementMul != 1) {
4572 if (ElementMul.isPowerOf2()) {
4573 unsigned Amt = ElementMul.logBase2();
4576 DAG.getShiftAmountConstant(Amt,
N.getValueType(), dl),
4579 SDValue Scale =
DAG.getConstant(ElementMul.getZExtValue(), dl,
4581 IdxN =
DAG.getNode(
ISD::MUL, dl,
N.getValueType(), IdxN, Scale,
4591 SDNodeFlags AddFlags;
4595 N =
DAG.getMemBasePlusOffset(
N, IdxN, dl, AddFlags);
4599 if (IsVectorGEP && !
N.getValueType().isVector()) {
4601 N =
DAG.getSplat(VT, dl,
N);
4612 N =
DAG.getPtrExtendInReg(
N, dl, PtrMemTy);
4617void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4624 Type *Ty =
I.getAllocatedType();
4625 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4626 auto &
DL =
DAG.getDataLayout();
4627 TypeSize TySize =
DL.getTypeAllocSize(Ty);
4628 MaybeAlign Alignment =
I.getAlign();
4634 AllocSize =
DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4636 AllocSize =
DAG.getNode(
4638 DAG.getZExtOrTrunc(
DAG.getTypeSize(dl, MVT::i64, TySize), dl, IntPtr));
4643 Align StackAlign =
DAG.getSubtarget().getFrameLowering()->getStackAlign();
4644 if (*Alignment <= StackAlign)
4645 Alignment = std::nullopt;
4647 const uint64_t StackAlignMask = StackAlign.
value() - 1U;
4652 DAG.getConstant(StackAlignMask, dl, IntPtr),
4657 DAG.getSignedConstant(~StackAlignMask, dl, IntPtr));
4661 DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4671 return I.getMetadata(LLVMContext::MD_range);
4676 if (std::optional<ConstantRange> CR = CB->getRange())
4680 return std::nullopt;
4685 return CB->getRetNoFPClass();
4689void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4691 return visitAtomicLoad(
I);
4693 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4694 const Value *SV =
I.getOperand(0);
4699 if (Arg->hasSwiftErrorAttr())
4700 return visitLoadFromSwiftError(
I);
4704 if (Alloca->isSwiftError())
4705 return visitLoadFromSwiftError(
I);
4711 Type *Ty =
I.getType();
4715 unsigned NumValues = ValueVTs.
size();
4719 Align Alignment =
I.getAlign();
4720 AAMDNodes AAInfo =
I.getAAMetadata();
4722 bool isVolatile =
I.isVolatile();
4727 bool ConstantMemory =
false;
4734 BatchAA->pointsToConstantMemory(MemoryLocation(
4739 Root =
DAG.getEntryNode();
4740 ConstantMemory =
true;
4744 Root =
DAG.getRoot();
4755 unsigned ChainI = 0;
4756 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4772 MachinePointerInfo PtrInfo =
4774 ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4775 : MachinePointerInfo();
4777 SDValue A =
DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4778 SDValue L =
DAG.getLoad(MemVTs[i], dl, Root,
A, PtrInfo, Alignment,
4779 MMOFlags, AAInfo, Ranges);
4780 Chains[ChainI] =
L.getValue(1);
4782 if (MemVTs[i] != ValueVTs[i])
4783 L =
DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4785 if (MDNode *NoFPClassMD =
I.getMetadata(LLVMContext::MD_nofpclass)) {
4786 uint64_t FPTestInt =
4788 cast<ConstantAsMetadata>(NoFPClassMD->getOperand(0))->getValue())
4790 if (FPTestInt != fcNone) {
4791 SDValue FPTestConst =
4792 DAG.getTargetConstant(FPTestInt, SDLoc(), MVT::i32);
4793 L = DAG.getNode(ISD::AssertNoFPClass, dl, L.getValueType(), L,
4800 if (!ConstantMemory) {
4806 PendingLoads.push_back(Chain);
4810 DAG.getVTList(ValueVTs), Values));
4813void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4814 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4815 "call visitStoreToSwiftError when backend supports swifterror");
4818 SmallVector<uint64_t, 4>
Offsets;
4819 const Value *SrcV =
I.getOperand(0);
4821 SrcV->
getType(), ValueVTs,
nullptr, &Offsets, 0);
4822 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4823 "expect a single EVT for swifterror");
4832 SDValue(Src.getNode(), Src.getResNo()));
4833 DAG.setRoot(CopyNode);
4836void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4837 assert(
DAG.getTargetLoweringInfo().supportSwiftError() &&
4838 "call visitLoadFromSwiftError when backend supports swifterror");
4841 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4842 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4843 "Support volatile, non temporal, invariant for load_from_swift_error");
4845 const Value *SV =
I.getOperand(0);
4846 Type *Ty =
I.getType();
4849 !
BatchAA->pointsToConstantMemory(MemoryLocation(
4851 I.getAAMetadata()))) &&
4852 "load_from_swift_error should not be constant memory");
4855 SmallVector<uint64_t, 4>
Offsets;
4857 ValueVTs,
nullptr, &Offsets, 0);
4858 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4859 "expect a single EVT for swifterror");
4869void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4871 return visitAtomicStore(
I);
4873 const Value *SrcV =
I.getOperand(0);
4874 const Value *PtrV =
I.getOperand(1);
4876 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
4881 if (Arg->hasSwiftErrorAttr())
4882 return visitStoreToSwiftError(
I);
4886 if (Alloca->isSwiftError())
4887 return visitStoreToSwiftError(
I);
4894 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4895 unsigned NumValues = ValueVTs.
size();
4908 Align Alignment =
I.getAlign();
4909 AAMDNodes AAInfo =
I.getAAMetadata();
4913 unsigned ChainI = 0;
4914 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4924 MachinePointerInfo PtrInfo =
4926 ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4927 : MachinePointerInfo();
4931 if (MemVTs[i] != ValueVTs[i])
4932 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4934 DAG.getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4935 Chains[ChainI] = St;
4941 DAG.setRoot(StoreNode);
4944void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4945 bool IsCompressing) {
4948 Value *Src0Operand =
I.getArgOperand(0);
4949 Value *PtrOperand =
I.getArgOperand(1);
4950 Value *MaskOperand =
I.getArgOperand(2);
4951 Align Alignment =
I.getParamAlign(1).valueOrOne();
4961 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4964 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
4965 MachinePointerInfo(PtrOperand), MMOFlags,
4969 const auto &TLI =
DAG.getTargetLoweringInfo();
4972 !IsCompressing &&
TTI->hasConditionalLoadStoreForType(
4973 I.getArgOperand(0)->getType(),
true)
4979 DAG.setRoot(StoreNode);
5009 C =
C->getSplatValue();
5023 if (!
GEP ||
GEP->getParent() != CurBB)
5026 if (
GEP->getNumOperands() != 2)
5029 const Value *BasePtr =
GEP->getPointerOperand();
5030 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
5036 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
5041 if (ScaleVal != 1 &&
5053void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
5057 const Value *Ptr =
I.getArgOperand(1);
5061 Align Alignment =
I.getParamAlign(1).valueOrOne();
5062 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5071 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5081 EVT IdxVT =
Index.getValueType();
5089 SDValue Scatter =
DAG.getMaskedScatter(
DAG.getVTList(MVT::Other), VT, sdl,
5091 DAG.setRoot(Scatter);
5095void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
5098 Value *PtrOperand =
I.getArgOperand(0);
5099 Value *MaskOperand =
I.getArgOperand(1);
5100 Value *Src0Operand =
I.getArgOperand(2);
5101 Align Alignment =
I.getParamAlign(0).valueOrOne();
5109 AAMDNodes AAInfo =
I.getAAMetadata();
5116 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
5119 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
5121 if (
I.hasMetadata(LLVMContext::MD_invariant_load))
5124 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5125 MachinePointerInfo(PtrOperand), MMOFlags,
5128 const auto &TLI =
DAG.getTargetLoweringInfo();
5135 TTI->hasConditionalLoadStoreForType(Src0Operand->
getType(),
5140 DAG.getMaskedLoad(VT, sdl, InChain, Ptr,
Offset, Mask, Src0, VT, MMO,
5147void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5151 const Value *Ptr =
I.getArgOperand(0);
5155 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5157 Align Alignment =
I.getParamAlign(0).valueOrOne();
5168 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5180 EVT IdxVT =
Index.getValueType();
5189 DAG.getMaskedGather(
DAG.getVTList(VT, MVT::Other), VT, sdl,
Ops, MMO,
5205 SDVTList VTs =
DAG.getVTList(MemVT, MVT::i1, MVT::Other);
5207 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5210 MachineFunction &MF =
DAG.getMachineFunction();
5212 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5213 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, SuccessOrdering,
5217 dl, MemVT, VTs, InChain,
5225 DAG.setRoot(OutChain);
5228void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5231 switch (
I.getOperation()) {
5279 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5282 MachineFunction &MF =
DAG.getMachineFunction();
5284 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5285 DAG.getEVTAlign(MemVT), AAMDNodes(),
nullptr, SSID, Ordering);
5288 DAG.getAtomic(NT, dl, MemVT, InChain,
5295 DAG.setRoot(OutChain);
5298void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5300 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5303 Ops[1] =
DAG.getTargetConstant((
unsigned)
I.getOrdering(), dl,
5305 Ops[2] =
DAG.getTargetConstant(
I.getSyncScopeID(), dl,
5312void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5319 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5330 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
5331 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5332 I.getAlign(), AAMDNodes(), Ranges, SSID, Order);
5342 L =
DAG.getPtrExtOrTrunc(L, dl, VT);
5345 DAG.setRoot(OutChain);
5348void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5356 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5366 MachineFunction &MF =
DAG.getMachineFunction();
5368 MachinePointerInfo(
I.getPointerOperand()), Flags, MemVT.
getStoreSize(),
5369 I.getAlign(), AAMDNodes(),
nullptr, SSID, Ordering);
5373 Val =
DAG.getPtrExtOrTrunc(Val, dl, MemVT);
5380 DAG.setRoot(OutChain);
5388std::pair<bool, bool>
5389SelectionDAGBuilder::getTargetIntrinsicCallProperties(
const CallBase &
I) {
5391 bool HasChain = !
F->doesNotAccessMemory();
5393 HasChain &&
F->onlyReadsMemory() &&
F->willReturn() &&
F->doesNotThrow();
5395 return {HasChain, OnlyLoad};
5399 const CallBase &
I,
bool HasChain,
bool OnlyLoad,
5401 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5408 Ops.push_back(
DAG.getRoot());
5421 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5422 const Value *Arg =
I.getArgOperand(i);
5423 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5431 assert(CI->getBitWidth() <= 64 &&
5432 "large intrinsic immediates not handled");
5433 Ops.push_back(
DAG.getTargetConstant(*CI, SDLoc(), VT));
5440 if (std::optional<OperandBundleUse> Bundle =
5442 auto *Sym = Bundle->Inputs[0].get();
5445 Ops.push_back(SDSym);
5448 if (std::optional<OperandBundleUse> Bundle =
5450 Value *Token = Bundle->Inputs[0].get();
5452 assert(
Ops.back().getValueType() != MVT::Glue &&
5453 "Did not expect another glue node here.");
5456 Ops.push_back(ConvControlToken);
5464 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5472 return DAG.getVTList(ValueVTs);
5476SDValue SelectionDAGBuilder::getTargetNonMemIntrinsicNode(
5499 if (
I.getType()->isVoidTy())
5514void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5516 auto [HasChain, OnlyLoad] = getTargetIntrinsicCallProperties(
I);
5520 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
5523 TargetLowering::IntrinsicInfo *
Info = !Infos.
empty() ? &Infos[0] :
nullptr;
5526 getTargetIntrinsicOperands(
I, HasChain, OnlyLoad, Info);
5527 SDVTList VTs = getTargetIntrinsicVTList(
I, HasChain);
5532 Flags.copyFMF(*FPMO);
5533 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
5540 if (!Infos.
empty()) {
5543 MachineFunction &MF =
DAG.getMachineFunction();
5545 for (
const auto &Info : Infos) {
5548 MachinePointerInfo MPI;
5550 MPI = MachinePointerInfo(
Info.ptrVal,
Info.offset);
5551 else if (
Info.fallbackAddressSpace)
5552 MPI = MachinePointerInfo(*
Info.fallbackAddressSpace);
5553 EVT MemVT =
Info.memVT;
5555 if (
Size.hasValue() && !
Size.getValue())
5557 Align Alignment =
Info.align.value_or(
DAG.getEVTAlign(MemVT));
5559 MPI,
Info.flags,
Size, Alignment,
I.getAAMetadata(),
5567 Result = getTargetNonMemIntrinsicNode(*
I.getType(), HasChain,
Ops, VTs);
5570 Result = handleTargetIntrinsicRet(
I, HasChain, OnlyLoad, Result);
5627 SDValue TwoToFractionalPartOfX;
5704 if (
Op.getValueType() == MVT::f32 &&
5728 if (
Op.getValueType() == MVT::f32 &&
5827 if (
Op.getValueType() == MVT::f32 &&
5911 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5924 if (
Op.getValueType() == MVT::f32 &&
6001 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
6012 if (
Op.getValueType() == MVT::f32 &&
6025 bool IsExp10 =
false;
6026 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
6030 IsExp10 = LHSC->isExactlyValue(Ten);
6057 unsigned Val = RHSC->getSExtValue();
6086 CurSquare, CurSquare);
6091 if (RHSC->getSExtValue() < 0)
6105 EVT VT =
LHS.getValueType();
6128 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
6132 Opcode, VT, ScaleInt);
6167 switch (
N.getOpcode()) {
6171 Op.getValueType().getSizeInBits());
6196bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
6203 MachineFunction &MF =
DAG.getMachineFunction();
6204 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
6208 auto MakeVRegDbgValue = [&](
Register Reg, DIExpression *FragExpr,
6213 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
6220 auto *NewDIExpr = FragExpr;
6227 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6230 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6231 return BuildMI(MF,
DL, Inst, Indirect,
Reg, Variable, FragExpr);
6235 if (Kind == FuncArgumentDbgValueKind::Value) {
6240 if (!IsInEntryBlock)
6256 bool VariableIsFunctionInputArg =
Variable->isParameter() &&
6257 !
DL->getInlinedAt();
6259 if (!IsInPrologue && !VariableIsFunctionInputArg)
6293 if (VariableIsFunctionInputArg) {
6295 if (ArgNo >=
FuncInfo.DescribedArgs.size())
6296 FuncInfo.DescribedArgs.resize(ArgNo + 1,
false);
6297 else if (!IsInPrologue &&
FuncInfo.DescribedArgs.test(ArgNo))
6298 return !NodeMap[
V].getNode();
6303 bool IsIndirect =
false;
6304 std::optional<MachineOperand>
Op;
6306 int FI =
FuncInfo.getArgumentFrameIndex(Arg);
6307 if (FI != std::numeric_limits<int>::max())
6311 if (!
Op &&
N.getNode()) {
6314 if (ArgRegsAndSizes.
size() == 1)
6315 Reg = ArgRegsAndSizes.
front().first;
6318 MachineRegisterInfo &RegInfo = MF.
getRegInfo();
6325 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6329 if (!
Op &&
N.getNode()) {
6333 if (FrameIndexSDNode *FINode =
6340 auto splitMultiRegDbgValue =
6353 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6356 if (
Offset >= ExprFragmentSizeInBits)
6360 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6361 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6366 Expr,
Offset, RegFragmentSizeInBits);
6370 if (!FragmentExpr) {
6371 SDDbgValue *SDV =
DAG.getConstantDbgValue(
6373 DAG.AddDbgValue(SDV,
false);
6376 MachineInstr *NewMI = MakeVRegDbgValue(
6377 Reg, *FragmentExpr, Kind != FuncArgumentDbgValueKind::Value);
6378 FuncInfo.ArgDbgValues.push_back(NewMI);
6387 if (VMI !=
FuncInfo.ValueMap.end()) {
6388 const auto &TLI =
DAG.getTargetLoweringInfo();
6389 RegsForValue RFV(
V->getContext(), TLI,
DAG.getDataLayout(), VMI->second,
6390 V->getType(), std::nullopt);
6391 if (RFV.occupiesMultipleRegs())
6392 return splitMultiRegDbgValue(RFV.getRegsAndSizes());
6395 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6396 }
else if (ArgRegsAndSizes.
size() > 1) {
6399 return splitMultiRegDbgValue(ArgRegsAndSizes);
6407 "Expected inlined-at fields to agree");
6408 MachineInstr *NewMI =
nullptr;
6411 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6413 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6417 FuncInfo.ArgDbgValues.push_back(NewMI);
6426 unsigned DbgSDNodeOrder) {
6438 return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
6439 false, dl, DbgSDNodeOrder);
6441 return DAG.getDbgValue(Variable, Expr,
N.getNode(),
N.getResNo(),
6442 false, dl, DbgSDNodeOrder);
6447 case Intrinsic::smul_fix:
6449 case Intrinsic::umul_fix:
6451 case Intrinsic::smul_fix_sat:
6453 case Intrinsic::umul_fix_sat:
6455 case Intrinsic::sdiv_fix:
6457 case Intrinsic::udiv_fix:
6459 case Intrinsic::sdiv_fix_sat:
6461 case Intrinsic::udiv_fix_sat:
6474 "expected call_preallocated_setup Value");
6475 for (
const auto *U : PreallocatedSetup->
users()) {
6477 const Function *Fn = UseCall->getCalledFunction();
6478 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6488bool SelectionDAGBuilder::visitEntryValueDbgValue(
6498 auto ArgIt =
FuncInfo.ValueMap.find(Arg);
6499 if (ArgIt ==
FuncInfo.ValueMap.end()) {
6501 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6502 "couldn't find an associated register for the Argument\n");
6505 Register ArgVReg = ArgIt->getSecond();
6507 for (
auto [PhysReg, VirtReg] :
FuncInfo.RegInfo->liveins())
6508 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6509 SDDbgValue *SDV =
DAG.getVRegDbgValue(
6510 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6511 DAG.AddDbgValue(SDV,
false );
6514 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6515 "couldn't find a physical register\n");
6520void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6523 switch (Intrinsic) {
6524 case Intrinsic::experimental_convergence_anchor:
6527 case Intrinsic::experimental_convergence_entry:
6530 case Intrinsic::experimental_convergence_loop: {
6532 auto *Token = Bundle->Inputs[0].get();
6540void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6541 unsigned IntrinsicID) {
6544 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6545 "Tried to lower unsupported histogram type");
6551 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6552 DataLayout TargetDL =
DAG.getDataLayout();
6554 Align Alignment =
DAG.getEVTAlign(VT);
6567 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
6568 MachinePointerInfo(AS),
6579 EVT IdxVT =
Index.getValueType();
6590 SDValue ID =
DAG.getTargetConstant(IntrinsicID, sdl, MVT::i32);
6593 SDValue Histogram =
DAG.getMaskedHistogram(
DAG.getVTList(MVT::Other), VT, sdl,
6597 DAG.setRoot(Histogram);
6600void SelectionDAGBuilder::visitVectorExtractLastActive(
const CallInst &
I,
6602 assert(Intrinsic == Intrinsic::experimental_vector_extract_last_active &&
6603 "Tried lowering invalid vector extract last");
6605 const DataLayout &Layout =
DAG.getDataLayout();
6609 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6619 EVT BoolVT =
Mask.getValueType().getScalarType();
6621 Result =
DAG.getSelect(sdl, ResVT, AnyActive, Result, PassThru);
6628void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6630 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
6637 Flags.copyFMF(*FPOp);
6639 switch (Intrinsic) {
6642 visitTargetIntrinsic(
I, Intrinsic);
6644 case Intrinsic::vscale: {
6649 case Intrinsic::vastart: visitVAStart(
I);
return;
6650 case Intrinsic::vaend: visitVAEnd(
I);
return;
6651 case Intrinsic::vacopy: visitVACopy(
I);
return;
6652 case Intrinsic::returnaddress:
6657 case Intrinsic::addressofreturnaddress:
6662 case Intrinsic::sponentry:
6667 case Intrinsic::frameaddress:
6672 case Intrinsic::read_volatile_register:
6673 case Intrinsic::read_register: {
6674 Value *
Reg =
I.getArgOperand(0);
6680 DAG.getVTList(VT, MVT::Other), Chain,
RegName);
6685 case Intrinsic::write_register: {
6686 Value *
Reg =
I.getArgOperand(0);
6687 Value *RegValue =
I.getArgOperand(1);
6695 case Intrinsic::memcpy:
6696 case Intrinsic::memcpy_inline: {
6702 "memcpy_inline needs constant size");
6704 Align DstAlign = MCI.getDestAlign().valueOrOne();
6705 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6706 Align Alignment = std::min(DstAlign, SrcAlign);
6707 bool isVol = MCI.isVolatile();
6711 SDValue MC =
DAG.getMemcpy(Root, sdl, Dst, Src,
Size, Alignment, isVol,
6712 MCI.isForceInlined(), &
I, std::nullopt,
6713 MachinePointerInfo(
I.getArgOperand(0)),
6714 MachinePointerInfo(
I.getArgOperand(1)),
6716 updateDAGForMaybeTailCall(MC);
6719 case Intrinsic::memset:
6720 case Intrinsic::memset_inline: {
6726 "memset_inline needs constant size");
6728 Align DstAlign = MSII.getDestAlign().valueOrOne();
6729 bool isVol = MSII.isVolatile();
6732 Root, sdl, Dst, Value,
Size, DstAlign, isVol, MSII.isForceInlined(),
6733 &
I, MachinePointerInfo(
I.getArgOperand(0)),
I.getAAMetadata());
6734 updateDAGForMaybeTailCall(MC);
6737 case Intrinsic::memmove: {
6743 Align DstAlign = MMI.getDestAlign().valueOrOne();
6744 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6745 Align Alignment = std::min(DstAlign, SrcAlign);
6746 bool isVol = MMI.isVolatile();
6750 SDValue MM =
DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol, &
I,
6752 MachinePointerInfo(
I.getArgOperand(0)),
6753 MachinePointerInfo(
I.getArgOperand(1)),
6755 updateDAGForMaybeTailCall(MM);
6758 case Intrinsic::memcpy_element_unordered_atomic: {
6764 Type *LengthTy =
MI.getLength()->getType();
6765 unsigned ElemSz =
MI.getElementSizeInBytes();
6769 isTC, MachinePointerInfo(
MI.getRawDest()),
6770 MachinePointerInfo(
MI.getRawSource()));
6771 updateDAGForMaybeTailCall(MC);
6774 case Intrinsic::memmove_element_unordered_atomic: {
6780 Type *LengthTy =
MI.getLength()->getType();
6781 unsigned ElemSz =
MI.getElementSizeInBytes();
6785 isTC, MachinePointerInfo(
MI.getRawDest()),
6786 MachinePointerInfo(
MI.getRawSource()));
6787 updateDAGForMaybeTailCall(MC);
6790 case Intrinsic::memset_element_unordered_atomic: {
6796 Type *LengthTy =
MI.getLength()->getType();
6797 unsigned ElemSz =
MI.getElementSizeInBytes();
6801 isTC, MachinePointerInfo(
MI.getRawDest()));
6802 updateDAGForMaybeTailCall(MC);
6805 case Intrinsic::call_preallocated_setup: {
6807 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6814 case Intrinsic::call_preallocated_arg: {
6816 SDValue SrcValue =
DAG.getSrcValue(PreallocatedCall);
6830 case Intrinsic::eh_typeid_for: {
6833 unsigned TypeID =
DAG.getMachineFunction().getTypeIDFor(GV);
6834 Res =
DAG.getConstant(
TypeID, sdl, MVT::i32);
6839 case Intrinsic::eh_return_i32:
6840 case Intrinsic::eh_return_i64:
6841 DAG.getMachineFunction().setCallsEHReturn(
true);
6848 case Intrinsic::eh_unwind_init:
6849 DAG.getMachineFunction().setCallsUnwindInit(
true);
6851 case Intrinsic::eh_dwarf_cfa:
6856 case Intrinsic::eh_sjlj_callsite: {
6858 assert(
FuncInfo.getCurrentCallSite() == 0 &&
"Overlapping call sites!");
6863 case Intrinsic::eh_sjlj_functioncontext: {
6865 MachineFrameInfo &MFI =
DAG.getMachineFunction().getFrameInfo();
6868 int FI =
FuncInfo.StaticAllocaMap[FnCtx];
6872 case Intrinsic::eh_sjlj_setjmp: {
6877 DAG.getVTList(MVT::i32, MVT::Other),
Ops);
6879 DAG.setRoot(
Op.getValue(1));
6882 case Intrinsic::eh_sjlj_longjmp:
6886 case Intrinsic::eh_sjlj_setup_dispatch:
6890 case Intrinsic::masked_gather:
6891 visitMaskedGather(
I);
6893 case Intrinsic::masked_load:
6896 case Intrinsic::masked_scatter:
6897 visitMaskedScatter(
I);
6899 case Intrinsic::masked_store:
6900 visitMaskedStore(
I);
6902 case Intrinsic::masked_expandload:
6903 visitMaskedLoad(
I,
true );
6905 case Intrinsic::masked_compressstore:
6906 visitMaskedStore(
I,
true );
6908 case Intrinsic::powi:
6912 case Intrinsic::log:
6915 case Intrinsic::log2:
6919 case Intrinsic::log10:
6923 case Intrinsic::exp:
6926 case Intrinsic::exp2:
6930 case Intrinsic::pow:
6934 case Intrinsic::sqrt:
6935 case Intrinsic::fabs:
6936 case Intrinsic::sin:
6937 case Intrinsic::cos:
6938 case Intrinsic::tan:
6939 case Intrinsic::asin:
6940 case Intrinsic::acos:
6941 case Intrinsic::atan:
6942 case Intrinsic::sinh:
6943 case Intrinsic::cosh:
6944 case Intrinsic::tanh:
6945 case Intrinsic::exp10:
6946 case Intrinsic::floor:
6947 case Intrinsic::ceil:
6948 case Intrinsic::trunc:
6949 case Intrinsic::rint:
6950 case Intrinsic::nearbyint:
6951 case Intrinsic::round:
6952 case Intrinsic::roundeven:
6953 case Intrinsic::canonicalize: {
6956 switch (Intrinsic) {
6958 case Intrinsic::sqrt: Opcode =
ISD::FSQRT;
break;
6959 case Intrinsic::fabs: Opcode =
ISD::FABS;
break;
6960 case Intrinsic::sin: Opcode =
ISD::FSIN;
break;
6961 case Intrinsic::cos: Opcode =
ISD::FCOS;
break;
6962 case Intrinsic::tan: Opcode =
ISD::FTAN;
break;
6963 case Intrinsic::asin: Opcode =
ISD::FASIN;
break;
6964 case Intrinsic::acos: Opcode =
ISD::FACOS;
break;
6965 case Intrinsic::atan: Opcode =
ISD::FATAN;
break;
6966 case Intrinsic::sinh: Opcode =
ISD::FSINH;
break;
6967 case Intrinsic::cosh: Opcode =
ISD::FCOSH;
break;
6968 case Intrinsic::tanh: Opcode =
ISD::FTANH;
break;
6969 case Intrinsic::exp10: Opcode =
ISD::FEXP10;
break;
6970 case Intrinsic::floor: Opcode =
ISD::FFLOOR;
break;
6971 case Intrinsic::ceil: Opcode =
ISD::FCEIL;
break;
6972 case Intrinsic::trunc: Opcode =
ISD::FTRUNC;
break;
6973 case Intrinsic::rint: Opcode =
ISD::FRINT;
break;
6975 case Intrinsic::round: Opcode =
ISD::FROUND;
break;
6982 getValue(
I.getArgOperand(0)).getValueType(),
6986 case Intrinsic::atan2:
6988 getValue(
I.getArgOperand(0)).getValueType(),
6992 case Intrinsic::lround:
6993 case Intrinsic::llround:
6994 case Intrinsic::lrint:
6995 case Intrinsic::llrint: {
6998 switch (Intrinsic) {
7000 case Intrinsic::lround: Opcode =
ISD::LROUND;
break;
7002 case Intrinsic::lrint: Opcode =
ISD::LRINT;
break;
7003 case Intrinsic::llrint: Opcode =
ISD::LLRINT;
break;
7012 case Intrinsic::minnum:
7014 getValue(
I.getArgOperand(0)).getValueType(),
7018 case Intrinsic::maxnum:
7020 getValue(
I.getArgOperand(0)).getValueType(),
7024 case Intrinsic::minimum:
7026 getValue(
I.getArgOperand(0)).getValueType(),
7030 case Intrinsic::maximum:
7032 getValue(
I.getArgOperand(0)).getValueType(),
7036 case Intrinsic::minimumnum:
7038 getValue(
I.getArgOperand(0)).getValueType(),
7042 case Intrinsic::maximumnum:
7044 getValue(
I.getArgOperand(0)).getValueType(),
7048 case Intrinsic::copysign:
7050 getValue(
I.getArgOperand(0)).getValueType(),
7054 case Intrinsic::ldexp:
7056 getValue(
I.getArgOperand(0)).getValueType(),
7060 case Intrinsic::modf:
7061 case Intrinsic::sincos:
7062 case Intrinsic::sincospi:
7063 case Intrinsic::frexp: {
7065 switch (Intrinsic) {
7068 case Intrinsic::sincos:
7071 case Intrinsic::sincospi:
7074 case Intrinsic::modf:
7077 case Intrinsic::frexp:
7083 SDVTList VTs =
DAG.getVTList(ValueVTs);
7085 &
I,
DAG.getNode(Opcode, sdl, VTs,
getValue(
I.getArgOperand(0)), Flags));
7088 case Intrinsic::arithmetic_fence: {
7090 getValue(
I.getArgOperand(0)).getValueType(),
7094 case Intrinsic::fma:
7100#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
7101 case Intrinsic::INTRINSIC:
7102#include "llvm/IR/ConstrainedOps.def"
7105#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
7106#include "llvm/IR/VPIntrinsics.def"
7109 case Intrinsic::fptrunc_round: {
7113 std::optional<RoundingMode> RoundMode =
7121 SelectionDAG::FlagInserter FlagsInserter(
DAG, Flags);
7126 DAG.getTargetConstant((
int)*RoundMode, sdl, MVT::i32));
7131 case Intrinsic::fmuladd: {
7136 getValue(
I.getArgOperand(0)).getValueType(),
7143 getValue(
I.getArgOperand(0)).getValueType(),
7159 case Intrinsic::fptosi_sat: {
7166 case Intrinsic::fptoui_sat: {
7173 case Intrinsic::convert_from_arbitrary_fp: {
7178 const fltSemantics *SrcSem =
7181 DAG.getContext()->emitError(
7182 "convert_from_arbitrary_fp: not implemented format '" + FormatStr +
7193 DAG.getTargetConstant(
static_cast<int>(SemEnum), sdl, MVT::i32);
7198 case Intrinsic::set_rounding:
7204 case Intrinsic::is_fpclass: {
7205 const DataLayout DLayout =
DAG.getDataLayout();
7207 EVT ArgVT = TLI.
getValueType(DLayout,
I.getArgOperand(0)->getType());
7210 MachineFunction &MF =
DAG.getMachineFunction();
7214 Flags.setNoFPExcept(
7215 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7231 case Intrinsic::get_fpenv: {
7232 const DataLayout DLayout =
DAG.getDataLayout();
7234 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7249 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7252 Chain =
DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7253 Res =
DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
7259 case Intrinsic::set_fpenv: {
7260 const DataLayout DLayout =
DAG.getDataLayout();
7263 Align TempAlign =
DAG.getEVTAlign(EnvVT);
7276 Chain =
DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7278 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
7281 Chain =
DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
7286 case Intrinsic::reset_fpenv:
7289 case Intrinsic::get_fpmode:
7298 case Intrinsic::set_fpmode:
7303 case Intrinsic::reset_fpmode: {
7308 case Intrinsic::pcmarker: {
7313 case Intrinsic::readcyclecounter: {
7316 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7321 case Intrinsic::readsteadycounter: {
7324 DAG.getVTList(MVT::i64, MVT::Other),
Op);
7329 case Intrinsic::bitreverse:
7331 getValue(
I.getArgOperand(0)).getValueType(),
7334 case Intrinsic::bswap:
7336 getValue(
I.getArgOperand(0)).getValueType(),
7339 case Intrinsic::cttz: {
7347 case Intrinsic::ctlz: {
7355 case Intrinsic::ctpop: {
7361 case Intrinsic::fshl:
7362 case Intrinsic::fshr: {
7363 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7367 EVT VT =
X.getValueType();
7378 case Intrinsic::clmul: {
7384 case Intrinsic::sadd_sat: {
7390 case Intrinsic::uadd_sat: {
7396 case Intrinsic::ssub_sat: {
7402 case Intrinsic::usub_sat: {
7408 case Intrinsic::sshl_sat:
7409 case Intrinsic::ushl_sat: {
7413 EVT ShiftTy =
DAG.getTargetLoweringInfo().getShiftAmountTy(
7418 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
7421 "Unexpected shift type");
7430 case Intrinsic::smul_fix:
7431 case Intrinsic::umul_fix:
7432 case Intrinsic::smul_fix_sat:
7433 case Intrinsic::umul_fix_sat: {
7441 case Intrinsic::sdiv_fix:
7442 case Intrinsic::udiv_fix:
7443 case Intrinsic::sdiv_fix_sat:
7444 case Intrinsic::udiv_fix_sat: {
7449 Op1, Op2, Op3,
DAG, TLI));
7452 case Intrinsic::smax: {
7458 case Intrinsic::smin: {
7464 case Intrinsic::umax: {
7470 case Intrinsic::umin: {
7476 case Intrinsic::abs: {
7482 case Intrinsic::scmp: {
7489 case Intrinsic::ucmp: {
7496 case Intrinsic::stackaddress:
7497 case Intrinsic::stacksave: {
7502 Res =
DAG.getNode(SDOpcode, sdl,
DAG.getVTList(VT, MVT::Other),
Op);
7507 case Intrinsic::stackrestore:
7511 case Intrinsic::get_dynamic_area_offset: {
7520 case Intrinsic::stackguard: {
7521 MachineFunction &MF =
DAG.getMachineFunction();
7527 Res =
DAG.getPtrExtOrTrunc(Res, sdl, PtrTy);
7531 LLVMContext &Ctx = *
DAG.getContext();
7532 Ctx.
diagnose(DiagnosticInfoGeneric(
"unable to lower stackguard"));
7539 MachinePointerInfo(
Global, 0), Align,
7548 case Intrinsic::stackprotector: {
7550 MachineFunction &MF =
DAG.getMachineFunction();
7570 Chain, sdl, Src, FIN,
7577 case Intrinsic::objectsize:
7580 case Intrinsic::is_constant:
7583 case Intrinsic::annotation:
7584 case Intrinsic::ptr_annotation:
7585 case Intrinsic::launder_invariant_group:
7586 case Intrinsic::strip_invariant_group:
7591 case Intrinsic::type_test:
7592 case Intrinsic::public_type_test:
7594 "LowerTypeTests pass before code generation");
7597 case Intrinsic::assume:
7598 case Intrinsic::experimental_noalias_scope_decl:
7599 case Intrinsic::var_annotation:
7600 case Intrinsic::sideeffect:
7605 case Intrinsic::codeview_annotation: {
7607 MachineFunction &MF =
DAG.getMachineFunction();
7616 case Intrinsic::init_trampoline: {
7624 Ops[4] =
DAG.getSrcValue(
I.getArgOperand(0));
7632 case Intrinsic::adjust_trampoline:
7637 case Intrinsic::gcroot: {
7638 assert(
DAG.getMachineFunction().getFunction().hasGC() &&
7639 "only valid in functions with gc specified, enforced by Verifier");
7641 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7648 case Intrinsic::gcread:
7649 case Intrinsic::gcwrite:
7651 case Intrinsic::get_rounding:
7657 case Intrinsic::expect:
7658 case Intrinsic::expect_with_probability:
7664 case Intrinsic::ubsantrap:
7665 case Intrinsic::debugtrap:
7666 case Intrinsic::trap: {
7667 StringRef TrapFuncName =
7668 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7669 if (TrapFuncName.
empty()) {
7670 switch (Intrinsic) {
7671 case Intrinsic::trap:
7674 case Intrinsic::debugtrap:
7677 case Intrinsic::ubsantrap:
7680 DAG.getTargetConstant(
7686 DAG.addNoMergeSiteInfo(
DAG.getRoot().getNode(),
7687 I.hasFnAttr(Attribute::NoMerge));
7691 if (Intrinsic == Intrinsic::ubsantrap) {
7692 Value *Arg =
I.getArgOperand(0);
7696 TargetLowering::CallLoweringInfo CLI(
DAG);
7697 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7699 DAG.getExternalSymbol(TrapFuncName.
data(),
7702 CLI.NoMerge =
I.hasFnAttr(Attribute::NoMerge);
7708 case Intrinsic::allow_runtime_check:
7709 case Intrinsic::allow_ubsan_check:
7713 case Intrinsic::uadd_with_overflow:
7714 case Intrinsic::sadd_with_overflow:
7715 case Intrinsic::usub_with_overflow:
7716 case Intrinsic::ssub_with_overflow:
7717 case Intrinsic::umul_with_overflow:
7718 case Intrinsic::smul_with_overflow: {
7720 switch (Intrinsic) {
7722 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7723 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7724 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7725 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7726 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7727 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7735 SDVTList VTs =
DAG.getVTList(ResultVT, OverflowVT);
7739 case Intrinsic::prefetch: {
7754 std::nullopt, Flags);
7760 DAG.setRoot(Result);
7763 case Intrinsic::lifetime_start:
7764 case Intrinsic::lifetime_end: {
7765 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7771 if (!LifetimeObject)
7776 auto SI =
FuncInfo.StaticAllocaMap.find(LifetimeObject);
7777 if (SI ==
FuncInfo.StaticAllocaMap.end())
7781 Res =
DAG.getLifetimeNode(IsStart, sdl,
getRoot(), FrameIndex);
7785 case Intrinsic::pseudoprobe: {
7793 case Intrinsic::invariant_start:
7798 case Intrinsic::invariant_end:
7801 case Intrinsic::clear_cache: {
7806 {InputChain, StartVal, EndVal});
7811 case Intrinsic::donothing:
7812 case Intrinsic::seh_try_begin:
7813 case Intrinsic::seh_scope_begin:
7814 case Intrinsic::seh_try_end:
7815 case Intrinsic::seh_scope_end:
7818 case Intrinsic::experimental_stackmap:
7821 case Intrinsic::experimental_patchpoint_void:
7822 case Intrinsic::experimental_patchpoint:
7825 case Intrinsic::experimental_gc_statepoint:
7828 case Intrinsic::experimental_gc_result:
7831 case Intrinsic::experimental_gc_relocate:
7834 case Intrinsic::instrprof_cover:
7836 case Intrinsic::instrprof_increment:
7838 case Intrinsic::instrprof_timestamp:
7840 case Intrinsic::instrprof_value_profile:
7842 case Intrinsic::instrprof_mcdc_parameters:
7844 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7846 case Intrinsic::localescape: {
7847 MachineFunction &MF =
DAG.getMachineFunction();
7848 const TargetInstrInfo *
TII =
DAG.getSubtarget().getInstrInfo();
7852 for (
unsigned Idx = 0,
E =
I.arg_size(); Idx <
E; ++Idx) {
7858 "can only escape static allocas");
7863 TII->get(TargetOpcode::LOCAL_ESCAPE))
7871 case Intrinsic::localrecover: {
7873 MachineFunction &MF =
DAG.getMachineFunction();
7879 unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7883 Value *
FP =
I.getArgOperand(1);
7889 SDValue OffsetSym =
DAG.getMCSymbol(FrameAllocSym, PtrVT);
7894 SDValue Add =
DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7900 case Intrinsic::fake_use: {
7901 Value *
V =
I.getArgOperand(0);
7906 auto FakeUseValue = [&]() ->
SDValue {
7920 if (!FakeUseValue || FakeUseValue.isUndef())
7923 Ops[1] = FakeUseValue;
7932 case Intrinsic::reloc_none: {
7937 DAG.getTargetExternalSymbol(
7943 case Intrinsic::cond_loop: {
7953 case Intrinsic::eh_exceptionpointer:
7954 case Intrinsic::eh_exceptioncode: {
7960 SDValue N =
DAG.getCopyFromReg(
DAG.getEntryNode(), sdl, VReg, PtrVT);
7961 if (Intrinsic == Intrinsic::eh_exceptioncode)
7962 N =
DAG.getZExtOrTrunc(
N, sdl, MVT::i32);
7966 case Intrinsic::xray_customevent: {
7969 const auto &Triple =
DAG.getTarget().getTargetTriple();
7978 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
7980 Ops.push_back(LogEntryVal);
7981 Ops.push_back(StrSizeVal);
7982 Ops.push_back(Chain);
7988 MachineSDNode *MN =
DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7991 DAG.setRoot(patchableNode);
7995 case Intrinsic::xray_typedevent: {
7998 const auto &Triple =
DAG.getTarget().getTargetTriple();
8010 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
8012 Ops.push_back(LogTypeId);
8013 Ops.push_back(LogEntryVal);
8014 Ops.push_back(StrSizeVal);
8015 Ops.push_back(Chain);
8021 MachineSDNode *MN =
DAG.getMachineNode(
8022 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys,
Ops);
8024 DAG.setRoot(patchableNode);
8028 case Intrinsic::experimental_deoptimize:
8031 case Intrinsic::stepvector:
8034 case Intrinsic::vector_reduce_fadd:
8035 case Intrinsic::vector_reduce_fmul:
8036 case Intrinsic::vector_reduce_add:
8037 case Intrinsic::vector_reduce_mul:
8038 case Intrinsic::vector_reduce_and:
8039 case Intrinsic::vector_reduce_or:
8040 case Intrinsic::vector_reduce_xor:
8041 case Intrinsic::vector_reduce_smax:
8042 case Intrinsic::vector_reduce_smin:
8043 case Intrinsic::vector_reduce_umax:
8044 case Intrinsic::vector_reduce_umin:
8045 case Intrinsic::vector_reduce_fmax:
8046 case Intrinsic::vector_reduce_fmin:
8047 case Intrinsic::vector_reduce_fmaximum:
8048 case Intrinsic::vector_reduce_fminimum:
8049 visitVectorReduce(
I, Intrinsic);
8052 case Intrinsic::icall_branch_funnel: {
8058 I.getArgOperand(1),
Offset,
DAG.getDataLayout()));
8061 "llvm.icall.branch.funnel operand must be a GlobalValue");
8062 Ops.push_back(
DAG.getTargetGlobalAddress(
Base, sdl, MVT::i64, 0));
8064 struct BranchFunnelTarget {
8070 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
8073 if (ElemBase !=
Base)
8075 "to the same GlobalValue");
8081 "llvm.icall.branch.funnel operand must be a GlobalValue");
8087 [](
const BranchFunnelTarget &
T1,
const BranchFunnelTarget &T2) {
8088 return T1.Offset < T2.Offset;
8091 for (
auto &
T : Targets) {
8092 Ops.push_back(
DAG.getTargetConstant(
T.Offset, sdl, MVT::i32));
8093 Ops.push_back(
T.Target);
8096 Ops.push_back(
DAG.getRoot());
8097 SDValue N(
DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
8106 case Intrinsic::wasm_landingpad_index:
8112 case Intrinsic::aarch64_settag:
8113 case Intrinsic::aarch64_settag_zero: {
8114 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
8115 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
8118 getValue(
I.getArgOperand(1)), MachinePointerInfo(
I.getArgOperand(0)),
8124 case Intrinsic::amdgcn_cs_chain: {
8129 Type *RetTy =
I.getType();
8139 for (
unsigned Idx : {2, 3, 1}) {
8140 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8142 Arg.setAttributes(&
I, Idx);
8143 Args.push_back(Arg);
8146 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
8147 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
8148 Args[2].IsInReg =
true;
8151 for (
unsigned Idx = 4; Idx <
I.arg_size(); ++Idx) {
8152 TargetLowering::ArgListEntry Arg(
getValue(
I.getOperand(Idx)),
8154 Arg.setAttributes(&
I, Idx);
8155 Args.push_back(Arg);
8158 TargetLowering::CallLoweringInfo CLI(
DAG);
8161 .setCallee(CC, RetTy, Callee, std::move(Args))
8164 .setConvergent(
I.isConvergent());
8166 std::pair<SDValue, SDValue>
Result =
8170 "Should've lowered as tail call");
8175 case Intrinsic::amdgcn_call_whole_wave: {
8177 bool isTailCall =
I.isTailCall();
8180 for (
unsigned Idx = 1; Idx <
I.arg_size(); ++Idx) {
8181 TargetLowering::ArgListEntry Arg(
getValue(
I.getArgOperand(Idx)),
8182 I.getArgOperand(Idx)->getType());
8183 Arg.setAttributes(&
I, Idx);
8190 Args.push_back(Arg);
8195 auto *Token = Bundle->Inputs[0].get();
8196 ConvControlToken =
getValue(Token);
8199 TargetLowering::CallLoweringInfo CLI(
DAG);
8203 getValue(
I.getArgOperand(0)), std::move(Args))
8207 .setConvergent(
I.isConvergent())
8208 .setConvergenceControlToken(ConvControlToken);
8211 std::pair<SDValue, SDValue>
Result =
8214 if (
Result.first.getNode())
8218 case Intrinsic::ptrmask: {
8234 auto HighOnes =
DAG.getNode(
8235 ISD::SHL, sdl, PtrVT,
DAG.getAllOnesConstant(sdl, PtrVT),
8236 DAG.getShiftAmountConstant(
Mask.getValueType().getFixedSizeInBits(),
8239 DAG.getZExtOrTrunc(Mask, sdl, PtrVT), HighOnes);
8240 }
else if (
Mask.getValueType() != PtrVT)
8241 Mask =
DAG.getPtrExtOrTrunc(Mask, sdl, PtrVT);
8247 case Intrinsic::threadlocal_address: {
8251 case Intrinsic::get_active_lane_mask: {
8255 EVT ElementVT =
Index.getValueType();
8266 SDValue VectorIndex =
DAG.getSplat(VecTy, sdl, Index);
8267 SDValue VectorTripCount =
DAG.getSplat(VecTy, sdl, TripCount);
8268 SDValue VectorStep =
DAG.getStepVector(sdl, VecTy);
8271 SDValue SetCC =
DAG.getSetCC(sdl, CCVT, VectorInduction,
8276 case Intrinsic::experimental_get_vector_length: {
8278 "Expected positive VF");
8283 EVT CountVT =
Count.getValueType();
8286 visitTargetIntrinsic(
I, Intrinsic);
8295 if (CountVT.
bitsLT(VT)) {
8300 SDValue MaxEVL =
DAG.getElementCount(sdl, CountVT,
8310 case Intrinsic::vector_partial_reduce_add: {
8318 case Intrinsic::vector_partial_reduce_fadd: {
8326 case Intrinsic::experimental_cttz_elts: {
8328 EVT OpVT =
Op.getValueType();
8335 SDValue AllZero =
DAG.getConstant(0, sdl, OpVT);
8344 case Intrinsic::vector_insert: {
8352 if (
Index.getValueType() != VectorIdxTy)
8353 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8360 case Intrinsic::vector_extract: {
8368 if (
Index.getValueType() != VectorIdxTy)
8369 Index =
DAG.getVectorIdxConstant(
Index->getAsZExtVal(), sdl);
8375 case Intrinsic::experimental_vector_match: {
8381 EVT ResVT =
Mask.getValueType();
8387 visitTargetIntrinsic(
I, Intrinsic);
8391 SDValue Ret =
DAG.getConstant(0, sdl, ResVT);
8393 for (
unsigned i = 0; i < SearchSize; ++i) {
8396 DAG.getVectorIdxConstant(i, sdl));
8399 Ret =
DAG.getNode(
ISD::OR, sdl, ResVT, Ret, Cmp);
8405 case Intrinsic::vector_reverse:
8406 visitVectorReverse(
I);
8408 case Intrinsic::vector_splice_left:
8409 case Intrinsic::vector_splice_right:
8410 visitVectorSplice(
I);
8412 case Intrinsic::callbr_landingpad:
8413 visitCallBrLandingPad(
I);
8415 case Intrinsic::vector_interleave2:
8416 visitVectorInterleave(
I, 2);
8418 case Intrinsic::vector_interleave3:
8419 visitVectorInterleave(
I, 3);
8421 case Intrinsic::vector_interleave4:
8422 visitVectorInterleave(
I, 4);
8424 case Intrinsic::vector_interleave5:
8425 visitVectorInterleave(
I, 5);
8427 case Intrinsic::vector_interleave6:
8428 visitVectorInterleave(
I, 6);
8430 case Intrinsic::vector_interleave7:
8431 visitVectorInterleave(
I, 7);
8433 case Intrinsic::vector_interleave8:
8434 visitVectorInterleave(
I, 8);
8436 case Intrinsic::vector_deinterleave2:
8437 visitVectorDeinterleave(
I, 2);
8439 case Intrinsic::vector_deinterleave3:
8440 visitVectorDeinterleave(
I, 3);
8442 case Intrinsic::vector_deinterleave4:
8443 visitVectorDeinterleave(
I, 4);
8445 case Intrinsic::vector_deinterleave5:
8446 visitVectorDeinterleave(
I, 5);
8448 case Intrinsic::vector_deinterleave6:
8449 visitVectorDeinterleave(
I, 6);
8451 case Intrinsic::vector_deinterleave7:
8452 visitVectorDeinterleave(
I, 7);
8454 case Intrinsic::vector_deinterleave8:
8455 visitVectorDeinterleave(
I, 8);
8457 case Intrinsic::experimental_vector_compress:
8459 getValue(
I.getArgOperand(0)).getValueType(),
8464 case Intrinsic::experimental_convergence_anchor:
8465 case Intrinsic::experimental_convergence_entry:
8466 case Intrinsic::experimental_convergence_loop:
8467 visitConvergenceControl(
I, Intrinsic);
8469 case Intrinsic::experimental_vector_histogram_add: {
8470 visitVectorHistogram(
I, Intrinsic);
8473 case Intrinsic::experimental_vector_extract_last_active: {
8474 visitVectorExtractLastActive(
I, Intrinsic);
8477 case Intrinsic::loop_dependence_war_mask:
8482 DAG.getConstant(0, sdl, MVT::i64)));
8484 case Intrinsic::loop_dependence_raw_mask:
8489 DAG.getConstant(0, sdl, MVT::i64)));
8491 case Intrinsic::masked_udiv:
8497 case Intrinsic::masked_sdiv:
8503 case Intrinsic::masked_urem:
8509 case Intrinsic::masked_srem:
8518void SelectionDAGBuilder::pushFPOpOutChain(
SDValue Result,
8534 PendingConstrainedFP.push_back(OutChain);
8537 PendingConstrainedFPStrict.push_back(OutChain);
8542void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8556 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8558 SDVTList VTs =
DAG.getVTList(VT, MVT::Other);
8562 Flags.setNoFPExcept(
true);
8565 Flags.copyFMF(*FPOp);
8570#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8571 case Intrinsic::INTRINSIC: \
8572 Opcode = ISD::STRICT_##DAGN; \
8574#include "llvm/IR/ConstrainedOps.def"
8575 case Intrinsic::experimental_constrained_fmuladd: {
8582 pushFPOpOutChain(
Mul, EB);
8605 if (
DAG.isKnownNeverNaN(Opers[1]) &&
DAG.isKnownNeverNaN(Opers[2]))
8613 pushFPOpOutChain(Result, EB);
8620 std::optional<unsigned> ResOPC;
8622 case Intrinsic::vp_ctlz: {
8624 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8627 case Intrinsic::vp_cttz: {
8629 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8632 case Intrinsic::vp_cttz_elts: {
8634 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8637#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8638 case Intrinsic::VPID: \
8639 ResOPC = ISD::VPSD; \
8641#include "llvm/IR/VPIntrinsics.def"
8646 "Inconsistency: no SDNode available for this VPIntrinsic!");
8648 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8649 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8651 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8652 : ISD::VP_REDUCE_FMUL;
8658void SelectionDAGBuilder::visitVPLoad(
8670 Alignment =
DAG.getEVTAlign(VT);
8673 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8674 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8677 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8678 MachinePointerInfo(PtrOperand), MMOFlags,
8680 LD =
DAG.getLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8687void SelectionDAGBuilder::visitVPLoadFF(
8690 assert(OpValues.
size() == 3 &&
"Unexpected number of operands");
8700 Alignment =
DAG.getEVTAlign(VT);
8703 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8704 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8707 LD =
DAG.getLoadFFVP(VT,
DL, InChain, OpValues[0], OpValues[1], OpValues[2],
8712 setValue(&VPIntrin,
DAG.getMergeValues({LD.getValue(0), Trunc},
DL));
8715void SelectionDAGBuilder::visitVPGather(
8719 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8731 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8733 *Alignment, AAInfo, Ranges);
8743 EVT IdxVT =
Index.getValueType();
8749 LD =
DAG.getGatherVP(
8750 DAG.getVTList(VT, MVT::Other), VT,
DL,
8751 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8757void SelectionDAGBuilder::visitVPStore(
8761 EVT VT = OpValues[0].getValueType();
8766 Alignment =
DAG.getEVTAlign(VT);
8769 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8772 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8773 MachinePointerInfo(PtrOperand), MMOFlags,
8782void SelectionDAGBuilder::visitVPScatter(
8785 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8787 EVT VT = OpValues[0].getValueType();
8797 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8799 *Alignment, AAInfo);
8809 EVT IdxVT =
Index.getValueType();
8815 ST =
DAG.getScatterVP(
DAG.getVTList(MVT::Other), VT,
DL,
8816 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8817 OpValues[2], OpValues[3]},
8823void SelectionDAGBuilder::visitVPStridedLoad(
8835 SDValue InChain = AddToChain ?
DAG.getRoot() :
DAG.getEntryNode();
8837 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8840 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8842 *Alignment, AAInfo, Ranges);
8844 SDValue LD =
DAG.getStridedLoadVP(VT,
DL, InChain, OpValues[0], OpValues[1],
8845 OpValues[2], OpValues[3], MMO,
8853void SelectionDAGBuilder::visitVPStridedStore(
8857 EVT VT = OpValues[0].getValueType();
8863 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8866 MachineMemOperand *MMO =
DAG.getMachineFunction().getMachineMemOperand(
8868 *Alignment, AAInfo);
8872 DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8880void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8881 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8894 "Unexpected target EVL type");
8899 SimplifyQuery SQ(
DAG.getDataLayout(), &VPIntrin);
8906 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
8909 Condition, MaskOp, EVL));
8912void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8920 return visitVPCmp(*CmpI);
8923 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
8925 SDVTList VTs =
DAG.getVTList(ValueVTs);
8931 "Unexpected target EVL type");
8935 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8937 if (
I == EVLParamPos)
8944 SDNodeFlags SDFlags;
8952 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8954 case ISD::VP_LOAD_FF:
8955 visitVPLoadFF(VPIntrin, ValueVTs[0], ValueVTs[1], OpValues);
8957 case ISD::VP_GATHER:
8958 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8960 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8961 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8964 visitVPStore(VPIntrin, OpValues);
8966 case ISD::VP_SCATTER:
8967 visitVPScatter(VPIntrin, OpValues);
8969 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8970 visitVPStridedStore(VPIntrin, OpValues);
8972 case ISD::VP_FMULADD: {
8973 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8974 SDNodeFlags SDFlags;
8979 setValue(&VPIntrin,
DAG.getNode(ISD::VP_FMA,
DL, VTs, OpValues, SDFlags));
8982 ISD::VP_FMUL,
DL, VTs,
8983 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8985 DAG.getNode(ISD::VP_FADD,
DL, VTs,
8986 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8991 case ISD::VP_IS_FPCLASS: {
8992 const DataLayout DLayout =
DAG.getDataLayout();
8994 auto Constant = OpValues[1]->getAsZExtVal();
8997 {OpValues[0],
Check, OpValues[2], OpValues[3]});
9001 case ISD::VP_INTTOPTR: {
9012 case ISD::VP_PTRTOINT: {
9014 EVT DestVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9027 case ISD::VP_CTLZ_ZERO_UNDEF:
9029 case ISD::VP_CTTZ_ZERO_UNDEF:
9030 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
9031 case ISD::VP_CTTZ_ELTS: {
9033 DAG.getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
9051 unsigned CallSiteIndex =
FuncInfo.getCurrentCallSite();
9052 if (CallSiteIndex) {
9066 assert(BeginLabel &&
"BeginLabel should've been set");
9080 assert(
II &&
"II should've been set");
9091std::pair<SDValue, SDValue>
9105 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
9108 "Non-null chain expected with non-tail call!");
9109 assert((Result.second.getNode() || !Result.first.getNode()) &&
9110 "Null value expected with tail call!");
9112 if (!Result.second.getNode()) {
9119 PendingExports.clear();
9121 DAG.setRoot(Result.second);
9139 if (!isMustTailCall &&
9140 Caller->getFnAttribute(
"disable-tail-calls").getValueAsBool())
9146 if (
DAG.getTargetLoweringInfo().supportSwiftError() &&
9147 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
9156 bool isTailCall,
bool isMustTailCall,
9159 auto &
DL =
DAG.getDataLayout();
9166 const Value *SwiftErrorVal =
nullptr;
9173 const Value *V = *
I;
9176 if (V->getType()->isEmptyTy())
9181 Entry.setAttributes(&CB,
I - CB.
arg_begin());
9193 Args.push_back(Entry);
9204 Value *V = Bundle->Inputs[0];
9206 Entry.IsCFGuardTarget =
true;
9207 Args.push_back(Entry);
9220 "Target doesn't support calls with kcfi operand bundles.");
9228 auto *Token = Bundle->Inputs[0].get();
9229 ConvControlToken =
getValue(Token);
9240 .
setCallee(RetTy, FTy, Callee, std::move(Args), CB)
9253 "This target doesn't support calls with ptrauth operand bundles.");
9257 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
9259 if (Result.first.getNode()) {
9274 DAG.setRoot(CopyNode);
9290 LoadTy, Builder.DAG.getDataLayout()))
9291 return Builder.getValue(LoadCst);
9297 bool ConstantMemory =
false;
9300 if (Builder.BatchAA && Builder.BatchAA->pointsToConstantMemory(PtrVal)) {
9301 Root = Builder.DAG.getEntryNode();
9302 ConstantMemory =
true;
9305 Root = Builder.DAG.getRoot();
9310 Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
9313 if (!ConstantMemory)
9314 Builder.PendingLoads.push_back(LoadVal.
getValue(1));
9320void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
9323 EVT VT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9334bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
9335 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
9336 const Value *
Size =
I.getArgOperand(2);
9339 EVT CallVT =
DAG.getTargetLoweringInfo().getValueType(
DAG.getDataLayout(),
9345 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9349 if (Res.first.getNode()) {
9350 processIntegerCallValue(
I, Res.first,
true);
9364 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
9365 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
9387 switch (NumBitsToCompare) {
9399 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
9412 LoadL =
DAG.getBitcast(CmpVT, LoadL);
9413 LoadR =
DAG.getBitcast(CmpVT, LoadR);
9417 processIntegerCallValue(
I, Cmp,
false);
9426bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
9427 const Value *Src =
I.getArgOperand(0);
9428 const Value *
Char =
I.getArgOperand(1);
9429 const Value *
Length =
I.getArgOperand(2);
9431 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9432 std::pair<SDValue, SDValue> Res =
9435 MachinePointerInfo(Src));
9436 if (Res.first.getNode()) {
9450bool SelectionDAGBuilder::visitMemCCpyCall(
const CallInst &
I) {
9451 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9458 processIntegerCallValue(
I, Res.first,
true);
9470bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
9475 Align DstAlign =
DAG.InferPtrAlign(Dst).valueOrOne();
9476 Align SrcAlign =
DAG.InferPtrAlign(Src).valueOrOne();
9478 Align Alignment = std::min(DstAlign, SrcAlign);
9487 Root, sdl, Dst, Src,
Size, Alignment,
false,
false,
nullptr,
9488 std::nullopt, MachinePointerInfo(
I.getArgOperand(0)),
9489 MachinePointerInfo(
I.getArgOperand(1)),
I.getAAMetadata());
9491 "** memcpy should not be lowered as TailCall in mempcpy context **");
9495 Size =
DAG.getSExtOrTrunc(
Size, sdl, Dst.getValueType());
9508bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9509 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9511 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9514 MachinePointerInfo(Arg0), MachinePointerInfo(Arg1), isStpcpy, &
I);
9515 if (Res.first.getNode()) {
9517 DAG.setRoot(Res.second);
9529bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9530 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9532 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9535 MachinePointerInfo(Arg0), MachinePointerInfo(Arg1), &
I);
9536 if (Res.first.getNode()) {
9537 processIntegerCallValue(
I, Res.first,
true);
9550bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9551 const Value *Arg0 =
I.getArgOperand(0);
9553 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9556 if (Res.first.getNode()) {
9557 processIntegerCallValue(
I, Res.first,
false);
9570bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9571 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9573 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9574 std::pair<SDValue, SDValue> Res =
9577 MachinePointerInfo(Arg0));
9578 if (Res.first.getNode()) {
9579 processIntegerCallValue(
I, Res.first,
false);
9592bool SelectionDAGBuilder::visitStrstrCall(
const CallInst &
I) {
9593 const SelectionDAGTargetInfo &TSI =
DAG.getSelectionDAGInfo();
9594 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9598 processIntegerCallValue(
I, Res.first,
false);
9610bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9615 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9632bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9637 if (!
I.onlyReadsMemory() ||
I.isStrictFP())
9650void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9652 if (
I.isInlineAsm()) {
9659 if (Function *
F =
I.getCalledFunction()) {
9660 if (
F->isDeclaration()) {
9662 if (
unsigned IID =
F->getIntrinsicID()) {
9663 visitIntrinsicCall(
I, IID);
9674 if (!
I.isNoBuiltin() && !
F->hasLocalLinkage() &&
F->hasName() &&
9675 LibInfo->getLibFunc(*
F, Func) &&
LibInfo->hasOptimizedCodeGen(Func)) {
9679 if (visitMemCmpBCmpCall(
I))
9682 case LibFunc_copysign:
9683 case LibFunc_copysignf:
9684 case LibFunc_copysignl:
9687 if (
I.onlyReadsMemory()) {
9732 case LibFunc_atan2f:
9733 case LibFunc_atan2l:
9758 case LibFunc_sqrt_finite:
9759 case LibFunc_sqrtf_finite:
9760 case LibFunc_sqrtl_finite:
9777 case LibFunc_exp10f:
9778 case LibFunc_exp10l:
9783 case LibFunc_ldexpf:
9784 case LibFunc_ldexpl:
9788 case LibFunc_strstr:
9789 if (visitStrstrCall(
I))
9792 case LibFunc_memcmp:
9793 if (visitMemCmpBCmpCall(
I))
9796 case LibFunc_memccpy:
9797 if (visitMemCCpyCall(
I))
9800 case LibFunc_mempcpy:
9801 if (visitMemPCpyCall(
I))
9804 case LibFunc_memchr:
9805 if (visitMemChrCall(
I))
9808 case LibFunc_strcpy:
9809 if (visitStrCpyCall(
I,
false))
9812 case LibFunc_stpcpy:
9813 if (visitStrCpyCall(
I,
true))
9816 case LibFunc_strcmp:
9817 if (visitStrCmpCall(
I))
9820 case LibFunc_strlen:
9821 if (visitStrLenCall(
I))
9824 case LibFunc_strnlen:
9825 if (visitStrNLenCall(
I))
9849 if (
I.hasDeoptState())
9866 const Value *Discriminator = PAB->Inputs[1];
9868 assert(
Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9869 assert(Discriminator->getType()->isIntegerTy(64) &&
9870 "Invalid ptrauth discriminator");
9875 if (CalleeCPA->isKnownCompatibleWith(
Key, Discriminator,
9876 DAG.getDataLayout()))
9916 for (
const auto &Code : Codes)
9931 SDISelAsmOperandInfo &MatchingOpInfo,
9933 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9939 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9941 OpInfo.ConstraintVT);
9942 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9944 MatchingOpInfo.ConstraintVT);
9945 const bool OutOpIsIntOrFP =
9946 OpInfo.ConstraintVT.isInteger() || OpInfo.ConstraintVT.isFloatingPoint();
9947 const bool InOpIsIntOrFP = MatchingOpInfo.ConstraintVT.isInteger() ||
9948 MatchingOpInfo.ConstraintVT.isFloatingPoint();
9949 if ((OutOpIsIntOrFP != InOpIsIntOrFP) || (MatchRC.second != InputRC.second)) {
9952 " with a matching output constraint of"
9953 " incompatible type!");
9955 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9962 SDISelAsmOperandInfo &OpInfo,
9975 const Value *OpVal = OpInfo.CallOperandVal;
9993 DL.getPrefTypeAlign(Ty),
false,
9996 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9999 OpInfo.CallOperand = StackSlot;
10012static std::optional<unsigned>
10014 SDISelAsmOperandInfo &OpInfo,
10015 SDISelAsmOperandInfo &RefOpInfo) {
10026 return std::nullopt;
10030 unsigned AssignedReg;
10033 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
10036 return std::nullopt;
10041 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
10043 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
10052 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
10057 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
10062 OpInfo.CallOperand =
10064 OpInfo.ConstraintVT = RegVT;
10068 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
10071 OpInfo.CallOperand =
10073 OpInfo.ConstraintVT = VT;
10080 if (OpInfo.isMatchingInputConstraint())
10081 return std::nullopt;
10083 EVT ValueVT = OpInfo.ConstraintVT;
10084 if (OpInfo.ConstraintVT == MVT::Other)
10088 unsigned NumRegs = 1;
10089 if (OpInfo.ConstraintVT != MVT::Other)
10104 I = std::find(
I, RC->
end(), AssignedReg);
10105 if (
I == RC->
end()) {
10108 return {AssignedReg};
10112 for (; NumRegs; --NumRegs, ++
I) {
10113 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
10118 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
10119 return std::nullopt;
10124 const std::vector<SDValue> &AsmNodeOperands) {
10127 for (; OperandNo; --OperandNo) {
10129 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
10132 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
10133 "Skipped past definitions?");
10134 CurOp +=
F.getNumOperandRegisters() + 1;
10142 unsigned Flags = 0;
10145 explicit ExtraFlags(
const CallBase &
Call) {
10147 if (
IA->hasSideEffects())
10149 if (
IA->isAlignStack())
10151 if (
IA->canThrow())
10158 void update(
const TargetLowering::AsmOperandInfo &OpInfo) {
10174 unsigned get()
const {
return Flags; }
10198struct ConstraintDecisionInfo {
10200 std::vector<SDValue> AsmNodeOperands;
10202 bool HasSideEffect =
false;
10205 SmallVector<char> Buffer;
10206 raw_svector_ostream ErrorMsg;
10208 ConstraintDecisionInfo() : ErrorMsg(Buffer) {}
10218 ExtraFlags &ExtraInfo) {
10219 for (
auto &
T : TargetConstraints) {
10220 Info.ConstraintOperands.push_back(SDISelAsmOperandInfo(
T));
10221 SDISelAsmOperandInfo &OpInfo = Info.ConstraintOperands.back();
10223 if (OpInfo.CallOperandVal)
10224 OpInfo.CallOperand = Builder.getValue(OpInfo.CallOperandVal);
10226 if (!Info.HasSideEffect)
10227 Info.HasSideEffect = OpInfo.hasMemory(TLI);
10239 Info.ErrorMsg <<
"constraint '" <<
T.ConstraintCode
10240 <<
"' expects an integer constant expression";
10244 ExtraInfo.update(
T);
10258 IA->collectAsmStrs(AsmStrs);
10261 for (SDISelAsmOperandInfo &OpInfo : Info.ConstraintOperands) {
10269 if (OpInfo.hasMatchingInput()) {
10270 SDISelAsmOperandInfo &
Input =
10271 Info.ConstraintOperands[OpInfo.MatchingInput];
10302 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
10305 OpInfo.isIndirect =
false;
10312 !OpInfo.isIndirect) {
10313 assert((OpInfo.isMultipleAlternative ||
10315 "Can only indirectify direct input operands!");
10322 OpInfo.CallOperandVal =
nullptr;
10325 OpInfo.isIndirect =
true;
10337 SDLoc DL = Builder.getCurSDLoc();
10338 for (SDISelAsmOperandInfo &OpInfo : Info.ConstraintOperands) {
10340 SDISelAsmOperandInfo &RefOpInfo =
10341 OpInfo.isMatchingInputConstraint()
10342 ? Info.ConstraintOperands[OpInfo.getMatchedOperand()]
10348 const char *
RegName =
TRI.getName(*RegError);
10349 Info.ErrorMsg <<
"register '" <<
RegName <<
"' allocated for constraint '"
10350 << OpInfo.ConstraintCode
10351 <<
"' does not match required type";
10355 auto DetectWriteToReservedRegister = [&]() {
10360 if (
Reg.isPhysical() &&
TRI.isInlineAsmReadOnlyReg(MF,
Reg)) {
10361 Info.ErrorMsg <<
"write to reserved register '"
10362 <<
TRI.getRegAsmName(
Reg) <<
"'";
10371 !OpInfo.isMatchingInputConstraint())) &&
10372 "Only address as input operand is allowed.");
10374 switch (OpInfo.Type) {
10380 "Failed to convert memory constraint code to constraint id.");
10385 Info.AsmNodeOperands.push_back(
10387 Info.AsmNodeOperands.push_back(OpInfo.CallOperand);
10392 if (OpInfo.AssignedRegs.Regs.empty()) {
10393 Info.ErrorMsg <<
"could not allocate output register for "
10394 <<
"constraint '" << OpInfo.ConstraintCode <<
"'";
10398 if (DetectWriteToReservedRegister())
10403 OpInfo.AssignedRegs.AddInlineAsmOperands(
10406 false, 0,
DL, DAG, Info.AsmNodeOperands);
10412 SDValue InOperandVal = OpInfo.CallOperand;
10414 if (OpInfo.isMatchingInputConstraint()) {
10418 Info.AsmNodeOperands);
10420 if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
10421 if (OpInfo.isIndirect) {
10423 Info.ErrorMsg <<
"inline asm not supported yet: cannot handle "
10424 <<
"tied indirect register inputs";
10434 MVT RegVT = R->getSimpleValueType(0);
10438 :
TRI.getMinimalPhysRegClass(TiedReg);
10439 for (
unsigned I = 0,
E = Flag.getNumOperandRegisters();
I !=
E; ++
I)
10446 &Info.Glue, &
Call);
10448 OpInfo.getMatchedOperand(),
DL, DAG,
10449 Info.AsmNodeOperands);
10453 assert(Flag.isMemKind() &&
"Unknown matching constraint!");
10454 assert(Flag.getNumOperandRegisters() == 1 &&
10455 "Unexpected number of operands");
10459 Flag.clearMemConstraint();
10460 Flag.setMatchingOp(OpInfo.getMatchedOperand());
10463 Info.AsmNodeOperands.push_back(Info.AsmNodeOperands[CurOp + 1]);
10474 std::vector<SDValue>
Ops;
10480 Info.ErrorMsg <<
"value out of range for constraint '"
10481 << OpInfo.ConstraintCode <<
"'";
10485 Info.ErrorMsg <<
"invalid operand for inline asm constraint '"
10486 << OpInfo.ConstraintCode <<
"'";
10499 assert((OpInfo.isIndirect ||
10501 "Operand must be indirect to be a mem!");
10504 "Memory operands expect pointer values");
10509 "Failed to convert memory constraint code to constraint id.");
10514 Info.AsmNodeOperands.push_back(
10516 Info.AsmNodeOperands.push_back(InOperandVal);
10524 "Failed to convert memory constraint code to constraint id.");
10528 SDValue AsmOp = InOperandVal;
10540 Info.AsmNodeOperands.push_back(
10542 Info.AsmNodeOperands.push_back(AsmOp);
10548 Info.ErrorMsg <<
"unknown asm constraint '" << OpInfo.ConstraintCode
10554 if (OpInfo.isIndirect) {
10555 Info.ErrorMsg <<
"cannot handle indirect register inputs yet for "
10556 <<
"constraint '" << OpInfo.ConstraintCode <<
"'";
10561 if (OpInfo.AssignedRegs.Regs.empty()) {
10562 Info.ErrorMsg <<
"could not allocate input reg for constraint '"
10563 << OpInfo.ConstraintCode <<
"'";
10567 if (DetectWriteToReservedRegister())
10570 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG,
DL, Info.Chain,
10571 &Info.Glue, &
Call);
10572 OpInfo.AssignedRegs.AddInlineAsmOperands(
10580 if (!OpInfo.AssignedRegs.Regs.empty())
10581 OpInfo.AssignedRegs.AddInlineAsmOperands(
10598 ExtraFlags ExtraInfo(
Call);
10601 Info.HasSideEffect = IA->hasSideEffects();
10607 Info.Chain = Info.HasSideEffect ? Builder.getRoot() : DAG.
getRoot();
10611 if (IsCallBr || EmitEHLabels)
10615 Info.Chain = Builder.getControlRoot();
10618 Info.Chain = Builder.lowerStartEH(Info.Chain, EHPadBB, Info.BeginLabel);
10624 Info.AsmNodeOperands.push_back(
SDValue());
10631 const MDNode *SrcLoc =
Call.getMetadata(
"srcloc");
10632 Info.AsmNodeOperands.push_back(DAG.
getMDNode(SrcLoc));
10636 Info.AsmNodeOperands.push_back(
10645void SelectionDAGBuilder::visitInlineAsm(
const CallBase &
Call,
10647 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10649 DAG.getDataLayout(),
DAG.getSubtarget().getRegisterInfo(),
Call);
10652 "InvokeInst must have an EHPadBB");
10654 ConstraintDecisionInfo
Info;
10657 return emitInlineAsmError(
Call,
Info.ErrorMsg.str());
10665 Info.AsmNodeOperands.push_back(Glue);
10671 Info.AsmNodeOperands);
10683 ResultTypes = StructResult->elements();
10684 else if (!CallResultType->
isVoidTy())
10685 ResultTypes =
ArrayRef(CallResultType);
10687 auto CurResultType = ResultTypes.
begin();
10688 auto handleRegAssign = [&](
SDValue V) {
10689 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10690 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10691 EVT ResultVT = TLI.
getValueType(
DAG.getDataLayout(), *CurResultType);
10703 if (ResultVT !=
V.getValueType() &&
10706 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10707 V.getValueType().isInteger()) {
10713 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10719 for (SDISelAsmOperandInfo &OpInfo :
Info.ConstraintOperands) {
10723 if (OpInfo.AssignedRegs.
Regs.empty())
10726 switch (OpInfo.ConstraintType) {
10730 Chain, &Glue, &
Call);
10742 assert(
false &&
"Unexpected unknown constraint");
10746 if (OpInfo.isIndirect) {
10747 const Value *Ptr = OpInfo.CallOperandVal;
10748 assert(Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10750 MachinePointerInfo(Ptr));
10757 handleRegAssign(V);
10759 handleRegAssign(Val);
10765 if (!ResultValues.
empty()) {
10766 assert(CurResultType == ResultTypes.
end() &&
10767 "Mismatch in number of ResultTypes");
10769 "Mismatch in number of output operands in asm result");
10772 DAG.getVTList(ResultVTs), ResultValues);
10777 if (!OutChains.
empty())
10781 Chain = lowerEndEH(Chain,
II, EHPadBB,
Info.BeginLabel);
10784 if (ResultValues.
empty() ||
Info.HasSideEffect || !OutChains.
empty() ||
10786 DAG.setRoot(Chain);
10789void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &
Call,
10790 const Twine &Message) {
10791 LLVMContext &Ctx = *
DAG.getContext();
10795 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10799 if (ValueVTs.
empty())
10803 for (
const EVT &VT : ValueVTs)
10804 Ops.push_back(
DAG.getUNDEF(VT));
10809void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10813 DAG.getSrcValue(
I.getArgOperand(0))));
10816void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10817 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
10818 const DataLayout &
DL =
DAG.getDataLayout();
10822 DL.getABITypeAlign(
I.getType()).value());
10823 DAG.setRoot(
V.getValue(1));
10825 if (
I.getType()->isPointerTy())
10826 V =
DAG.getPtrExtOrTrunc(
10831void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10835 DAG.getSrcValue(
I.getArgOperand(0))));
10838void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10843 DAG.getSrcValue(
I.getArgOperand(0)),
10844 DAG.getSrcValue(
I.getArgOperand(1))));
10850 std::optional<ConstantRange> CR =
getRange(
I);
10852 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10855 APInt Lo = CR->getUnsignedMin();
10856 if (!
Lo.isMinValue())
10859 APInt Hi = CR->getUnsignedMax();
10860 unsigned Bits = std::max(
Hi.getActiveBits(),
10868 DAG.getValueType(SmallVT));
10869 unsigned NumVals =
Op.getNode()->getNumValues();
10875 Ops.push_back(ZExt);
10876 for (
unsigned I = 1;
I != NumVals; ++
I)
10877 Ops.push_back(
Op.getValue(
I));
10879 return DAG.getMergeValues(
Ops,
SL);
10889 SDValue TestConst =
DAG.getTargetConstant(Classes,
SDLoc(), MVT::i32);
10897 for (
unsigned I = 0, E =
Ops.size();
I != E; ++
I) {
10900 MergeOp, TestConst);
10903 return DAG.getMergeValues(
Ops,
SL);
10914 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10917 Args.reserve(NumArgs);
10921 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10922 ArgI != ArgE; ++ArgI) {
10923 const Value *V =
Call->getOperand(ArgI);
10925 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10928 Entry.setAttributes(
Call, ArgI);
10929 Args.push_back(Entry);
10934 .
setCallee(
Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10963 for (
unsigned I = StartIdx;
I <
Call.arg_size();
I++) {
10972 Ops.push_back(Builder.getValue(
Call.getArgOperand(
I)));
10978void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
11004 Ops.push_back(Chain);
11005 Ops.push_back(InGlue);
11012 assert(
ID.getValueType() == MVT::i64);
11014 DAG.getTargetConstant(
ID->getAsZExtVal(),
DL,
ID.getValueType());
11015 Ops.push_back(IDConst);
11021 Ops.push_back(ShadConst);
11027 SDVTList NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
11031 Chain =
DAG.getCALLSEQ_END(Chain, 0, 0, InGlue,
DL);
11036 DAG.setRoot(Chain);
11039 FuncInfo.MF->getFrameInfo().setHasStackMap();
11043void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
11060 Callee =
DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
11063 Callee =
DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
11064 SDLoc(SymbolicCallee),
11065 SymbolicCallee->getValueType(0));
11075 "Not enough arguments provided to the patchpoint intrinsic");
11078 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
11082 TargetLowering::CallLoweringInfo CLI(
DAG);
11087 SDNode *CallEnd =
Result.second.getNode();
11096 "Expected a callseq node.");
11098 bool HasGlue =
Call->getGluedNode();
11123 Ops.push_back(Callee);
11129 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
11130 Ops.push_back(
DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
11133 Ops.push_back(
DAG.getTargetConstant((
unsigned)CC, dl, MVT::i32));
11138 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
11149 if (IsAnyRegCC && HasDef) {
11151 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11154 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
11159 NodeTys =
DAG.getVTList(ValueVTs);
11161 NodeTys =
DAG.getVTList(MVT::Other, MVT::Glue);
11178 if (IsAnyRegCC && HasDef) {
11181 DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
11187 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
11190void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
11192 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
11195 if (
I.arg_size() > 1)
11200 SDNodeFlags SDFlags;
11204 switch (Intrinsic) {
11205 case Intrinsic::vector_reduce_fadd:
11213 case Intrinsic::vector_reduce_fmul:
11221 case Intrinsic::vector_reduce_add:
11224 case Intrinsic::vector_reduce_mul:
11227 case Intrinsic::vector_reduce_and:
11230 case Intrinsic::vector_reduce_or:
11233 case Intrinsic::vector_reduce_xor:
11236 case Intrinsic::vector_reduce_smax:
11239 case Intrinsic::vector_reduce_smin:
11242 case Intrinsic::vector_reduce_umax:
11245 case Intrinsic::vector_reduce_umin:
11248 case Intrinsic::vector_reduce_fmax:
11251 case Intrinsic::vector_reduce_fmin:
11254 case Intrinsic::vector_reduce_fmaximum:
11257 case Intrinsic::vector_reduce_fminimum:
11271 Attrs.push_back(Attribute::SExt);
11273 Attrs.push_back(Attribute::ZExt);
11275 Attrs.push_back(Attribute::InReg);
11277 return AttributeList::get(CLI.
RetTy->
getContext(), AttributeList::ReturnIndex,
11285std::pair<SDValue, SDValue>
11299 "Only supported for non-aggregate returns");
11302 for (
Type *Ty : RetOrigTys)
11311 RetOrigTys.
swap(OldRetOrigTys);
11312 RetVTs.
swap(OldRetVTs);
11313 Offsets.swap(OldOffsets);
11315 for (
size_t i = 0, e = OldRetVTs.
size(); i != e; ++i) {
11316 EVT RetVT = OldRetVTs[i];
11320 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
11321 RetOrigTys.
append(NumRegs, OldRetOrigTys[i]);
11322 RetVTs.
append(NumRegs, RegisterVT);
11323 for (
unsigned j = 0; j != NumRegs; ++j)
11336 int DemoteStackIdx = -100;
11349 ArgListEntry Entry(DemoteStackSlot, StackSlotPtrType);
11350 Entry.IsSRet =
true;
11351 Entry.Alignment = Alignment;
11363 for (
unsigned I = 0, E = RetVTs.
size();
I != E; ++
I) {
11365 if (NeedsRegBlock) {
11366 Flags.setInConsecutiveRegs();
11367 if (
I == RetVTs.
size() - 1)
11368 Flags.setInConsecutiveRegsLast();
11370 EVT VT = RetVTs[
I];
11374 for (
unsigned i = 0; i != NumRegs; ++i) {
11388 CLI.
Ins.push_back(Ret);
11397 if (Arg.IsSwiftError) {
11403 CLI.
Ins.push_back(Ret);
11411 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
11415 Type *FinalType = Args[i].Ty;
11416 if (Args[i].IsByVal)
11417 FinalType = Args[i].IndirectType;
11420 for (
unsigned Value = 0, NumValues = OrigArgTys.
size();
Value != NumValues;
11423 Type *ArgTy = OrigArgTy;
11424 if (Args[i].Ty != Args[i].OrigTy) {
11425 assert(
Value == 0 &&
"Only supported for non-aggregate arguments");
11426 ArgTy = Args[i].Ty;
11431 Args[i].Node.getResNo() +
Value);
11438 Flags.setOrigAlign(OriginalAlignment);
11443 Flags.setPointer();
11446 if (Args[i].IsZExt)
11448 if (Args[i].IsSExt)
11450 if (Args[i].IsNoExt)
11452 if (Args[i].IsInReg) {
11459 Flags.setHvaStart();
11465 if (Args[i].IsSRet)
11467 if (Args[i].IsSwiftSelf)
11468 Flags.setSwiftSelf();
11469 if (Args[i].IsSwiftAsync)
11470 Flags.setSwiftAsync();
11471 if (Args[i].IsSwiftError)
11472 Flags.setSwiftError();
11473 if (Args[i].IsCFGuardTarget)
11474 Flags.setCFGuardTarget();
11475 if (Args[i].IsByVal)
11477 if (Args[i].IsByRef)
11479 if (Args[i].IsPreallocated) {
11480 Flags.setPreallocated();
11488 if (Args[i].IsInAlloca) {
11489 Flags.setInAlloca();
11498 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
11499 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
11500 Flags.setByValSize(FrameSize);
11503 if (
auto MA = Args[i].Alignment)
11507 }
else if (
auto MA = Args[i].Alignment) {
11510 MemAlign = OriginalAlignment;
11512 Flags.setMemAlign(MemAlign);
11513 if (Args[i].IsNest)
11516 Flags.setInConsecutiveRegs();
11519 unsigned NumParts =
11524 if (Args[i].IsSExt)
11526 else if (Args[i].IsZExt)
11531 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
11536 Args[i].Ty->getPointerAddressSpace())) &&
11537 RetVTs.
size() == NumValues &&
"unexpected use of 'returned'");
11550 CLI.
RetZExt == Args[i].IsZExt))
11551 Flags.setReturned();
11557 for (
unsigned j = 0; j != NumParts; ++j) {
11563 j * Parts[j].
getValueType().getStoreSize().getKnownMinValue());
11564 if (NumParts > 1 && j == 0)
11568 if (j == NumParts - 1)
11572 CLI.
Outs.push_back(MyFlags);
11573 CLI.
OutVals.push_back(Parts[j]);
11576 if (NeedsRegBlock &&
Value == NumValues - 1)
11577 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
11589 "LowerCall didn't return a valid chain!");
11591 "LowerCall emitted a return value for a tail call!");
11593 "LowerCall didn't emit the correct number of values!");
11605 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11606 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11607 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11608 "LowerCall emitted a value with the wrong type!");
11618 unsigned NumValues = RetVTs.
size();
11619 ReturnValues.
resize(NumValues);
11626 for (
unsigned i = 0; i < NumValues; ++i) {
11633 DemoteStackIdx, Offsets[i]),
11635 ReturnValues[i] = L;
11636 Chains[i] = L.getValue(1);
11643 std::optional<ISD::NodeType> AssertOp;
11648 unsigned CurReg = 0;
11649 for (
EVT VT : RetVTs) {
11655 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11663 if (ReturnValues.
empty())
11669 return std::make_pair(Res, CLI.
Chain);
11686 if (
N->getNumValues() == 1) {
11694 "Lowering returned the wrong number of results!");
11697 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11711 "Copy from a reg to the same reg!");
11712 assert(!Reg.isPhysical() &&
"Is a physreg");
11718 RegsForValue RFV(V->getContext(), TLI,
DAG.getDataLayout(), Reg, V->getType(),
11723 auto PreferredExtendIt =
FuncInfo.PreferredExtendType.find(V);
11724 if (PreferredExtendIt !=
FuncInfo.PreferredExtendType.end())
11725 ExtendType = PreferredExtendIt->second;
11728 PendingExports.push_back(Chain);
11740 return A->use_empty();
11742 const BasicBlock &Entry =
A->getParent()->front();
11743 for (
const User *U :
A->users())
11752 std::pair<const AllocaInst *, const StoreInst *>>;
11764 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11766 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11767 StaticAllocas.
reserve(NumArgs * 2);
11769 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11772 V = V->stripPointerCasts();
11774 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11777 return &Iter.first->second;
11794 if (
I.isDebugOrPseudoInst())
11798 for (
const Use &U :
I.operands()) {
11799 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
11800 *Info = StaticAllocaInfo::Clobbered;
11806 if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(
SI->getValueOperand()))
11807 *Info = StaticAllocaInfo::Clobbered;
11810 const Value *Dst =
SI->getPointerOperand()->stripPointerCasts();
11811 StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
11817 if (*Info != StaticAllocaInfo::Unknown)
11825 const Value *Val =
SI->getValueOperand()->stripPointerCasts();
11828 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11830 DL.getTypeStoreSize(Arg->
getType()) != *AllocaSize ||
11831 !
DL.typeSizeEqualsStoreSize(Arg->
getType()) ||
11832 ArgCopyElisionCandidates.count(Arg)) {
11833 *Info = StaticAllocaInfo::Clobbered;
11837 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11841 *Info = StaticAllocaInfo::Elidable;
11842 ArgCopyElisionCandidates.insert({Arg, {AI,
SI}});
11847 if (ArgCopyElisionCandidates.size() == NumArgs)
11871 auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
11872 assert(ArgCopyIter != ArgCopyElisionCandidates.end());
11873 const AllocaInst *AI = ArgCopyIter->second.first;
11874 int FixedIndex = FINode->getIndex();
11876 int OldIndex = AllocaIndex;
11880 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11886 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11887 "greater than stack argument alignment ("
11888 <<
DebugStr(RequiredAlignment) <<
" vs "
11896 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11897 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11903 AllocaIndex = FixedIndex;
11904 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11905 for (
SDValue ArgVal : ArgVals)
11909 const StoreInst *
SI = ArgCopyIter->second.second;
11922void SelectionDAGISel::LowerArguments(
const Function &
F) {
11923 SelectionDAG &DAG =
SDB->DAG;
11924 SDLoc dl =
SDB->getCurSDLoc();
11929 if (
F.hasFnAttribute(Attribute::Naked))
11934 MVT ValueVT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
11936 ISD::ArgFlagsTy
Flags;
11938 MVT RegisterVT =
TLI->getRegisterType(*DAG.
getContext(), ValueVT);
11939 ISD::InputArg RetArg(Flags, RegisterVT, ValueVT,
F.getReturnType(),
true,
11949 ArgCopyElisionCandidates);
11952 for (
const Argument &Arg :
F.args()) {
11953 unsigned ArgNo = Arg.getArgNo();
11956 bool isArgValueUsed = !Arg.
use_empty();
11958 if (Arg.hasAttribute(Attribute::ByVal))
11959 FinalType = Arg.getParamByValType();
11960 bool NeedsRegBlock =
TLI->functionArgumentNeedsConsecutiveRegisters(
11961 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11962 for (
unsigned Value = 0, NumValues =
Types.size();
Value != NumValues;
11965 EVT VT =
TLI->getValueType(
DL, ArgTy);
11966 ISD::ArgFlagsTy
Flags;
11969 Flags.setPointer();
11972 if (Arg.hasAttribute(Attribute::ZExt))
11974 if (Arg.hasAttribute(Attribute::SExt))
11976 if (Arg.hasAttribute(Attribute::InReg)) {
11983 Flags.setHvaStart();
11989 if (Arg.hasAttribute(Attribute::StructRet))
11991 if (Arg.hasAttribute(Attribute::SwiftSelf))
11992 Flags.setSwiftSelf();
11993 if (Arg.hasAttribute(Attribute::SwiftAsync))
11994 Flags.setSwiftAsync();
11995 if (Arg.hasAttribute(Attribute::SwiftError))
11996 Flags.setSwiftError();
11997 if (Arg.hasAttribute(Attribute::ByVal))
11999 if (Arg.hasAttribute(Attribute::ByRef))
12001 if (Arg.hasAttribute(Attribute::InAlloca)) {
12002 Flags.setInAlloca();
12010 if (Arg.hasAttribute(Attribute::Preallocated)) {
12011 Flags.setPreallocated();
12023 const Align OriginalAlignment(
12024 TLI->getABIAlignmentForCallingConv(ArgTy,
DL));
12025 Flags.setOrigAlign(OriginalAlignment);
12028 Type *ArgMemTy =
nullptr;
12029 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
12032 ArgMemTy = Arg.getPointeeInMemoryValueType();
12034 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
12039 if (
auto ParamAlign = Arg.getParamStackAlign())
12040 MemAlign = *ParamAlign;
12041 else if ((ParamAlign = Arg.getParamAlign()))
12042 MemAlign = *ParamAlign;
12044 MemAlign =
TLI->getByValTypeAlignment(ArgMemTy,
DL);
12045 if (
Flags.isByRef())
12046 Flags.setByRefSize(MemSize);
12048 Flags.setByValSize(MemSize);
12049 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
12050 MemAlign = *ParamAlign;
12052 MemAlign = OriginalAlignment;
12054 Flags.setMemAlign(MemAlign);
12056 if (Arg.hasAttribute(Attribute::Nest))
12059 Flags.setInConsecutiveRegs();
12060 if (ArgCopyElisionCandidates.count(&Arg))
12061 Flags.setCopyElisionCandidate();
12062 if (Arg.hasAttribute(Attribute::Returned))
12063 Flags.setReturned();
12065 MVT RegisterVT =
TLI->getRegisterTypeForCallingConv(
12066 *
CurDAG->getContext(),
F.getCallingConv(), VT);
12067 unsigned NumRegs =
TLI->getNumRegistersForCallingConv(
12068 *
CurDAG->getContext(),
F.getCallingConv(), VT);
12069 for (
unsigned i = 0; i != NumRegs; ++i) {
12073 ISD::InputArg MyFlags(
12074 Flags, RegisterVT, VT, ArgTy, isArgValueUsed, ArgNo,
12076 if (NumRegs > 1 && i == 0)
12077 MyFlags.Flags.setSplit();
12080 MyFlags.Flags.setOrigAlign(
Align(1));
12081 if (i == NumRegs - 1)
12082 MyFlags.Flags.setSplitEnd();
12086 if (NeedsRegBlock &&
Value == NumValues - 1)
12087 Ins[Ins.
size() - 1].Flags.setInConsecutiveRegsLast();
12093 SDValue NewRoot =
TLI->LowerFormalArguments(
12094 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
12098 "LowerFormalArguments didn't return a valid chain!");
12100 "LowerFormalArguments didn't emit the correct number of values!");
12102 for (
unsigned i = 0, e = Ins.
size(); i != e; ++i) {
12104 "LowerFormalArguments emitted a null value!");
12106 "LowerFormalArguments emitted a value with the wrong type!");
12118 MVT VT =
TLI->getPointerTy(
DL,
DL.getAllocaAddrSpace());
12119 MVT RegVT =
TLI->getRegisterType(*
CurDAG->getContext(), VT);
12120 std::optional<ISD::NodeType> AssertOp;
12123 F.getCallingConv(), AssertOp);
12125 MachineFunction&
MF =
SDB->DAG.getMachineFunction();
12126 MachineRegisterInfo&
RegInfo =
MF.getRegInfo();
12128 RegInfo.createVirtualRegister(
TLI->getRegClassFor(RegVT));
12129 FuncInfo->DemoteRegister = SRetReg;
12131 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
12139 DenseMap<int, int> ArgCopyElisionFrameIndexMap;
12140 for (
const Argument &Arg :
F.args()) {
12144 unsigned NumValues = ValueVTs.
size();
12145 if (NumValues == 0)
12152 if (Ins[i].
Flags.isCopyElisionCandidate()) {
12153 unsigned NumParts = 0;
12154 for (EVT VT : ValueVTs)
12155 NumParts +=
TLI->getNumRegistersForCallingConv(*
CurDAG->getContext(),
12156 F.getCallingConv(), VT);
12160 ArrayRef(&InVals[i], NumParts), ArgHasUses);
12165 bool isSwiftErrorArg =
12166 TLI->supportSwiftError() &&
12167 Arg.hasAttribute(Attribute::SwiftError);
12168 if (!ArgHasUses && !isSwiftErrorArg) {
12169 SDB->setUnusedArgValue(&Arg, InVals[i]);
12172 if (FrameIndexSDNode *FI =
12174 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12177 for (
unsigned Val = 0; Val != NumValues; ++Val) {
12178 EVT VT = ValueVTs[Val];
12179 MVT PartVT =
TLI->getRegisterTypeForCallingConv(*
CurDAG->getContext(),
12180 F.getCallingConv(), VT);
12181 unsigned NumParts =
TLI->getNumRegistersForCallingConv(
12182 *
CurDAG->getContext(),
F.getCallingConv(), VT);
12187 if (ArgHasUses || isSwiftErrorArg) {
12188 std::optional<ISD::NodeType> AssertOp;
12189 if (Arg.hasAttribute(Attribute::SExt))
12191 else if (Arg.hasAttribute(Attribute::ZExt))
12196 NewRoot,
F.getCallingConv(), AssertOp);
12199 if (NoFPClass !=
fcNone) {
12201 static_cast<uint64_t
>(NoFPClass), dl, MVT::i32);
12203 OutVal, SDNoFPClass);
12212 if (ArgValues.
empty())
12216 if (FrameIndexSDNode *FI =
12218 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12221 SDB->getCurSDLoc());
12223 SDB->setValue(&Arg, Res);
12233 if (LoadSDNode *LNode =
12235 if (FrameIndexSDNode *FI =
12237 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
12265 FuncInfo->InitializeRegForValue(&Arg);
12266 SDB->CopyToExportRegsIfNeeded(&Arg);
12270 if (!Chains.
empty()) {
12277 assert(i == InVals.
size() &&
"Argument register count mismatch!");
12281 if (!ArgCopyElisionFrameIndexMap.
empty()) {
12282 for (MachineFunction::VariableDbgInfo &VI :
12283 MF->getInStackSlotVariableDbgInfo()) {
12284 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
12285 if (
I != ArgCopyElisionFrameIndexMap.
end())
12286 VI.updateStackSlot(
I->second);
12301SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
12302 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12304 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
12310 MachineBasicBlock *SuccMBB =
FuncInfo.getMBB(SuccBB);
12314 if (!SuccsHandled.
insert(SuccMBB).second)
12322 for (
const PHINode &PN : SuccBB->phis()) {
12324 if (PN.use_empty())
12328 if (PN.getType()->isEmptyTy())
12332 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
12337 RegOut =
FuncInfo.CreateRegs(&PN);
12355 "Didn't codegen value into a register!??");
12365 for (EVT VT : ValueVTs) {
12367 for (
unsigned i = 0; i != NumRegisters; ++i)
12369 Reg += NumRegisters;
12389void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
12391 if (MaybeTC.
getNode() !=
nullptr)
12392 DAG.setRoot(MaybeTC);
12397void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W,
Value *
Cond,
12400 MachineFunction *CurMF =
FuncInfo.MF;
12401 MachineBasicBlock *NextMBB =
nullptr;
12406 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
12408 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12410 if (
Size == 2 &&
W.MBB == SwitchMBB) {
12418 CaseCluster &
Small = *
W.FirstCluster;
12419 CaseCluster &
Big = *
W.LastCluster;
12423 const APInt &SmallValue =
Small.Low->getValue();
12424 const APInt &BigValue =
Big.Low->getValue();
12427 APInt CommonBit = BigValue ^ SmallValue;
12434 DAG.getConstant(CommonBit,
DL, VT));
12436 DL, MVT::i1,
Or,
DAG.getConstant(BigValue | SmallValue,
DL, VT),
12442 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
12444 addSuccessorWithProb(
12445 SwitchMBB, DefaultMBB,
12449 addSuccessorWithProb(SwitchMBB, DefaultMBB);
12457 DAG.getBasicBlock(DefaultMBB));
12459 DAG.setRoot(BrCond);
12471 [](
const CaseCluster &a,
const CaseCluster &b) {
12472 return a.Prob != b.Prob ?
12474 a.Low->getValue().slt(b.Low->getValue());
12481 if (
I->Prob >
W.LastCluster->Prob)
12483 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
12491 BranchProbability DefaultProb =
W.DefaultProb;
12492 BranchProbability UnhandledProbs = DefaultProb;
12494 UnhandledProbs +=
I->Prob;
12496 MachineBasicBlock *CurMBB =
W.MBB;
12498 bool FallthroughUnreachable =
false;
12499 MachineBasicBlock *Fallthrough;
12500 if (
I ==
W.LastCluster) {
12502 Fallthrough = DefaultMBB;
12507 CurMF->
insert(BBI, Fallthrough);
12511 UnhandledProbs -=
I->Prob;
12516 JumpTableHeader *JTH = &
SL->JTCases[
I->JTCasesIndex].first;
12517 SwitchCG::JumpTable *JT = &
SL->JTCases[
I->JTCasesIndex].second;
12520 MachineBasicBlock *JumpMBB = JT->
MBB;
12521 CurMF->
insert(BBI, JumpMBB);
12523 auto JumpProb =
I->Prob;
12524 auto FallthroughProb = UnhandledProbs;
12532 if (*SI == DefaultMBB) {
12533 JumpProb += DefaultProb / 2;
12534 FallthroughProb -= DefaultProb / 2;
12552 if (FallthroughUnreachable) {
12559 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
12560 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12569 if (CurMBB == SwitchMBB) {
12577 BitTestBlock *BTB = &
SL->BitTestCases[
I->BTCasesIndex];
12580 for (BitTestCase &BTC : BTB->
Cases)
12592 BTB->
Prob += DefaultProb / 2;
12596 if (FallthroughUnreachable)
12600 if (CurMBB == SwitchMBB) {
12607 const Value *
RHS, *
LHS, *MHS;
12609 if (
I->Low ==
I->High) {
12624 if (FallthroughUnreachable)
12628 CaseBlock CB(CC,
LHS,
RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12631 if (CurMBB == SwitchMBB)
12634 SL->SwitchCases.push_back(CB);
12639 CurMBB = Fallthrough;
12643void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12644 const SwitchWorkListItem &W,
12647 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12648 "Clusters not sorted?");
12649 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12651 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12652 SL->computeSplitWorkItemInfo(W);
12657 assert(PivotCluster >
W.FirstCluster);
12658 assert(PivotCluster <=
W.LastCluster);
12663 const ConstantInt *Pivot = PivotCluster->Low;
12672 MachineBasicBlock *LeftMBB;
12673 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12674 FirstLeft->Low ==
W.GE &&
12675 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12676 LeftMBB = FirstLeft->MBB;
12678 LeftMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12679 FuncInfo.MF->insert(BBI, LeftMBB);
12681 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12689 MachineBasicBlock *RightMBB;
12690 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12691 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12692 RightMBB = FirstRight->MBB;
12694 RightMBB =
FuncInfo.MF->CreateMachineBasicBlock(
W.MBB->getBasicBlock());
12695 FuncInfo.MF->insert(BBI, RightMBB);
12697 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12703 CaseBlock CB(
ISD::SETLT,
Cond, Pivot,
nullptr, LeftMBB, RightMBB,
W.MBB,
12706 if (
W.MBB == SwitchMBB)
12709 SL->SwitchCases.push_back(CB);
12734 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12742 unsigned PeeledCaseIndex = 0;
12743 bool SwitchPeeled =
false;
12744 for (
unsigned Index = 0;
Index < Clusters.size(); ++
Index) {
12745 CaseCluster &CC = Clusters[
Index];
12746 if (CC.
Prob < TopCaseProb)
12748 TopCaseProb = CC.
Prob;
12749 PeeledCaseIndex =
Index;
12750 SwitchPeeled =
true;
12755 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12756 << TopCaseProb <<
"\n");
12761 MachineBasicBlock *PeeledSwitchMBB =
12763 FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
12766 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12767 SwitchWorkListItem
W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
12768 nullptr,
nullptr, TopCaseProb.
getCompl()};
12769 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12771 Clusters.erase(PeeledCaseIt);
12772 for (CaseCluster &CC : Clusters) {
12774 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12775 << CC.
Prob <<
"\n");
12779 PeeledCaseProb = TopCaseProb;
12780 return PeeledSwitchMBB;
12783void SelectionDAGBuilder::visitSwitch(
const SwitchInst &
SI) {
12785 BranchProbabilityInfo *BPI =
FuncInfo.BPI;
12787 Clusters.reserve(
SI.getNumCases());
12788 for (
auto I :
SI.cases()) {
12789 MachineBasicBlock *Succ =
FuncInfo.getMBB(
I.getCaseSuccessor());
12790 const ConstantInt *CaseVal =
I.getCaseValue();
12791 BranchProbability Prob =
12793 : BranchProbability(1,
SI.getNumCases() + 1);
12797 MachineBasicBlock *DefaultMBB =
FuncInfo.getMBB(
SI.getDefaultDest());
12806 MachineBasicBlock *PeeledSwitchMBB =
12807 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12810 MachineBasicBlock *SwitchMBB =
FuncInfo.MBB;
12811 if (Clusters.empty()) {
12812 assert(PeeledSwitchMBB == SwitchMBB);
12814 if (DefaultMBB != NextBlock(SwitchMBB)) {
12821 SL->findJumpTables(Clusters, &SI,
getCurSDLoc(), DefaultMBB,
DAG.getPSI(),
12823 SL->findBitTestClusters(Clusters, &SI);
12826 dbgs() <<
"Case clusters: ";
12827 for (
const CaseCluster &
C : Clusters) {
12833 C.Low->getValue().print(
dbgs(),
true);
12834 if (
C.Low !=
C.High) {
12836 C.High->getValue().print(
dbgs(),
true);
12843 assert(!Clusters.empty());
12847 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12851 DefaultMBB ==
FuncInfo.getMBB(
SI.getDefaultDest()))
12854 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12856 while (!WorkList.
empty()) {
12858 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12863 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12867 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12871void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12872 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12878void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12879 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12884 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12893 SmallVector<int, 8>
Mask;
12895 for (
unsigned i = 0; i != NumElts; ++i)
12896 Mask.push_back(NumElts - 1 - i);
12901void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I,
12910 EVT OutVT = ValueVTs[0];
12914 for (
unsigned i = 0; i != Factor; ++i) {
12915 assert(ValueVTs[i] == OutVT &&
"Expected VTs to be the same");
12917 DAG.getVectorIdxConstant(OutNumElts * i,
DL));
12923 SDValue Even =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12925 SDValue Odd =
DAG.getVectorShuffle(OutVT,
DL, SubVecs[0], SubVecs[1],
12933 DAG.getVTList(ValueVTs), SubVecs);
12937void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I,
12940 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12945 for (
unsigned i = 0; i < Factor; ++i) {
12948 "Expected VTs to be the same");
12966 for (
unsigned i = 0; i < Factor; ++i)
12973void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12977 unsigned NumValues = ValueVTs.
size();
12978 if (NumValues == 0)
return;
12983 for (
unsigned i = 0; i != NumValues; ++i)
12988 DAG.getVTList(ValueVTs), Values));
12991void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12992 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
12998 const bool IsLeft =
I.getIntrinsicID() == Intrinsic::vector_splice_left;
13013 uint64_t Idx = IsLeft ?
Imm : NumElts -
Imm;
13016 SmallVector<int, 8>
Mask;
13017 for (
unsigned i = 0; i < NumElts; ++i)
13018 Mask.push_back(Idx + i);
13046 assert(
MI->getOpcode() == TargetOpcode::COPY &&
13047 "start of copy chain MUST be COPY");
13048 Reg =
MI->getOperand(1).getReg();
13051 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
13055 if (
MI->getOpcode() == TargetOpcode::COPY) {
13056 assert(
Reg.isVirtual() &&
"expected COPY of virtual register");
13057 Reg =
MI->getOperand(1).getReg();
13058 assert(
Reg.isPhysical() &&
"expected COPY of physical register");
13061 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
13062 "end of copy chain MUST be INLINEASM_BR");
13072void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
13078 const TargetLowering &TLI =
DAG.getTargetLoweringInfo();
13079 const TargetRegisterInfo *
TRI =
DAG.getSubtarget().getRegisterInfo();
13080 MachineRegisterInfo &MRI =
DAG.getMachineFunction().getRegInfo();
13088 for (
auto &
T : TargetConstraints) {
13089 SDISelAsmOperandInfo OpInfo(
T);
13097 switch (OpInfo.ConstraintType) {
13108 FuncInfo.MBB->addLiveIn(OriginalDef);
13116 ResultVTs.
push_back(OpInfo.ConstraintVT);
13125 ResultVTs.
push_back(OpInfo.ConstraintVT);
13133 DAG.getVTList(ResultVTs), ResultValues);
static unsigned getIntrinsicID(const SDNode *N)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
static Value * getCondition(Instruction *I)
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
static void computeConstraintToUse(const TargetLowering *TLI, TargetLowering::AsmOperandInfo &OpInfo)
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static const Function * getCalledFunction(const Value *V)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static unsigned getAddressSpace(const Value *V, unsigned MaxLookup)
MachineInstr unsigned OpIdx
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
OptimizedStructLayoutField Field
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static Type * getValueType(Value *V, bool LookThroughCmp=false)
Returns the "element type" of the given value/instruction V.
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static void failForInvalidBundles(const CallBase &I, StringRef Name, ArrayRef< uint32_t > AllowedBundles)
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static bool prepareDAGLevelOperands(ConstraintDecisionInfo &Info, const CallBase &Call, SelectionDAGBuilder &Builder, const TargetLowering &TLI, SelectionDAG &DAG)
Prepare DAG-level operands.
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
DenseMap< const Argument *, std::pair< const AllocaInst *, const StoreInst * > > ArgCopyElisionMapTy
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< Register, TypeSize > > &Regs, const SDValue &N)
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static bool determineConstraints(ConstraintDecisionInfo &Info, TargetLowering::AsmOperandInfoVector &TargetConstraints, const CallBase &Call, SelectionDAGBuilder &Builder, const TargetLowering &TLI, const TargetMachine &TM, SelectionDAG &DAG, const BasicBlock *EHPadBB)
DetermineConstraints - Find the constraints to use for inline asm operands.
static bool constructOperandInfo(ConstraintDecisionInfo &Info, TargetLowering::AsmOperandInfoVector &TargetConstraints, SelectionDAGBuilder &Builder, const TargetLowering &TLI, ExtraFlags &ExtraInfo)
Construct operand info objects.
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
static FPClassTest getNoFPClass(const Instruction &I)
static LLVM_ATTRIBUTE_ALWAYS_INLINE MVT::SimpleValueType getSimpleVT(const uint8_t *MatcherTable, size_t &MatcherIndex)
getSimpleVT - Decode a value in MatcherTable, if it's a VBR encoded value, use GetVBR to decode it.
This file defines the SmallPtrSet class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static SymbolRef::Type getType(const Symbol *Sym)
uint16_t RegSizeInBits(const MCRegisterInfo &MRI, MCRegister RegNo)
static const fltSemantics & IEEEsingle()
static LLVM_ABI Semantics SemanticsToEnum(const llvm::fltSemantics &Sem)
static LLVM_ABI const fltSemantics * getArbitraryFPSemantics(StringRef Format)
Returns the fltSemantics for a given arbitrary FP format string, or nullptr if invalid.
Class for arbitrary precision integers.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
LLVM_ABI std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
This class represents an incoming formal argument to a Function.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
@ USubCond
Subtract only if no unsigned overflow.
@ FMinimum
*p = minimum(old, v) minimum matches the behavior of llvm.minimum.
@ Min
*p = old <signed v ? old : v
@ USubSat
*p = usub.sat(old, v) usub.sat matches the behavior of llvm.usub.sat.
@ FMaximum
*p = maximum(old, v) maximum matches the behavior of llvm.maximum.
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMaximumNum
*p = maximumnum(old, v) maximumnum matches the behavior of llvm.maximumnum.
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
@ FMinimumNum
*p = minimumnum(old, v) minimumnum matches the behavior of llvm.minimumnum.
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
LLVM_ABI InstListType::const_iterator getFirstNonPHIIt() const
Returns an iterator to the first instruction in this block that is not a PHINode instruction.
InstListType::const_iterator const_iterator
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI InstListType::const_iterator getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This class represents a no-op cast from one type to another.
The address of a basic block.
Analysis providing branch probability information.
LLVM_ABI BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
LLVM_ABI bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
static BranchProbability getUnknown()
uint32_t getNumerator() const
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the attributes for this call.
LLVM_ABI bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Conditional Branch instruction.
Class for constant bytes.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
LLVM_ABI std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
LLVM_ABI unsigned getNonMetadataArgCount() const
LLVM_ABI bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static LLVM_ABI DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static LLVM_ABI std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
LLVM_ABI uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static LLVM_ABI std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static LLVM_ABI const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static LLVM_ABI DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static LLVM_ABI DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
Base class for variables.
LLVM_ABI std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
A parsed version of the target data layout string in and methods for querying it.
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
LLVM_ABI Value * getVariableLocationOp(unsigned OpIdx) const
DIExpression * getExpression() const
DILocalVariable * getVariable() const
LLVM_ABI iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
LLVM_ABI DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
Diagnostic information for inline asm reporting.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Lightweight error class with error context and mandatory checking.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
MachineBasicBlock * getMBB(const BasicBlock *BB) const
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
MachineBasicBlock * MBB
MBB - The current block.
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const
check if an attributes is in the list of attributes.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
bool hasNoUnsignedSignedWrap() const
bool hasNoUnsignedWrap() const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
void setMemConstraint(ConstraintCode C)
setMemConstraint - Augment an existing flag with the constraint code for a memory constraint.
This instruction inserts a struct field of array element value into an aggregate value.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
LLVM_ABI AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
The landingpad instruction holds all of the information necessary to generate correct exception handl...
A helper class to return the specified delimiter string after the first invocation of operator String...
An instruction for reading from memory.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
static LocationSize upperBound(uint64_t Value)
LLVM_ABI MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
LLVM_ABI MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
@ INVALID_SIMPLE_VALUE_TYPE
uint64_t getScalarSizeInBits() const
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
SmallVectorImpl< MachineBasicBlock * >::iterator succ_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHContTarget(bool V=true)
Indicates if this is a target of Windows EH Continuation Guard.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
MachineInstrBundleIterator< MachineInstr > iterator
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
bool hasEHFunclets() const
void setHasEHContTarget(bool V)
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
def_iterator def_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI MCRegister getLiveInPhysReg(Register VReg) const
getLiveInPhysReg - If VReg is a live-in virtual register, return the corresponding live-in physical r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(Register VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const CondBrInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
DenseMap< const Constant *, Register > ConstantsOut
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
SDValue lowerStartEH(SDValue Chain, const BasicBlock *EHPadBB, MCSymbol *&BeginLabel)
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
const TargetTransformInfo * TTI
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
SDValue lowerNoFPClassToAssertNoFPClass(SelectionDAG &DAG, const Instruction &I, SDValue Op)
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, Register Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool canTailCall(const CallBase &CB) const
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void CopyValueToVirtualRegister(const Value *V, Register Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
void init(GCFunctionInfo *gfi, BatchAAResults *BatchAA, AssumptionCache *AC, const TargetLibraryInfo *li, const TargetTransformInfo &TTI)
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
SDValue getFPOperationRoot(fp::ExceptionBehavior EB)
Return the current virtual root of the Selection DAG, flushing PendingConstrainedFP or PendingConstra...
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
MachineRegisterInfo * RegInfo
std::unique_ptr< SwiftErrorValueTracking > SwiftError
virtual void emitFunctionEntryCode()
std::unique_ptr< SelectionDAGBuilder > SDB
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemccpy(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Dst, SDValue Src, SDValue C, SDValue Size, const CallInst *CI) const
Emit target-specific code that performs a memccpy, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, const CallInst *CI) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrstr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, const CallInst *CI) const
Emit target-specific code that performs a strstr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo, const CallInst *CI) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, const CallInst *CI) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy, const CallInst *CI) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI bool shouldOptForSize() const
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
LLVM_ABI void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
LLVM_ABI SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
LLVM_ABI void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
LLVM_ABI SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
const LibcallLoweringInfo & getLibcalls() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
MachineBasicBlock * getParentMBB()
bool shouldEmitFunctionBasedCheckStackProtector() const
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Information about stack frame layout on the target.
virtual TargetStackID::Value getStackIDForScalableVectors() const
Returns the StackID that scalable vectors should be associated with.
Provides information about what library functions are available for the current target.
virtual Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Returns the desired alignment for ByVal or InAlloca aggregate function arguments in the caller parame...
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Function * getSSPStackGuardCheck(const Module &M, const LibcallLoweringInfo &Libcalls) const
If the target has a standard stack protection check function that performs validation and error handl...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr, CodeGenOptLevel OptLevel=CodeGenOptLevel::Default) const
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
Returns the type for the shift amount of a shift opcode.
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
MachineMemOperand::Flags getVPIntrinsicMemOperandFlags(const VPIntrinsic &VPIntrin) const
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual bool shouldExpandVectorMatch(EVT VT, unsigned SearchSize) const
Return true if the @llvm.experimental.vector.match intrinsic should be expanded for vector type ‘VT’ ...
virtual bool isProfitableToCombineMinNumMaxNum(EVT VT) const
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual void getTgtMemIntrinsic(SmallVectorImpl< IntrinsicInfo > &Infos, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
virtual Value * getSDagStackGuard(const Module &M, const LibcallLoweringInfo &Libcalls) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual bool useLoadStackGuardNode(const Module &M) const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual MVT getJumpTableRegTy(const DataLayout &DL) const
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &, const Type *RetTy) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
unsigned getID() const
Return the register class ID number.
const MCPhysReg * iterator
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
LLVM_ABI bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
static LLVM_ABI IntegerType * getIntNTy(LLVMContext &C, unsigned N)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Unconditional Branch instruction.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
LLVM_ABI CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static LLVM_ABI std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
LLVM_ABI MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< user_iterator > users()
LLVM_ABI const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
static constexpr bool isKnownLE(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ CONVERGENCECTRL_ANCHOR
The llvm.experimental.convergence.* intrinsics.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SET_FPENV
Sets the current floating-point environment.
@ ATOMIC_LOAD_FMINIMUMNUM
@ LOOP_DEPENDENCE_RAW_MASK
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ COND_LOOP
COND_LOOP is a conditional branch to self, used for implementing efficient conditional traps.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ STACKADDRESS
STACKADDRESS - Represents the llvm.stackaddress intrinsic.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ RESET_FPENV
Set floating-point environment to default state.
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ CTTZ_ELTS
Returns the number of number of trailing (least significant) zero elements in a vector.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ VECTOR_FIND_LAST_ACTIVE
Finds the index of the last active mask element Operands: Mask.
@ FMODF
FMODF - Decomposes the operand into integral and fractional parts, each having the same type and sign...
@ FATAN2
FATAN2 - atan2, inspired by libm.
@ FSINCOSPI
FSINCOSPI - Compute both the sine and cosine times pi more accurately than FSINCOS(pi*x),...
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ FPTRUNC_ROUND
FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
@ FAKE_USE
FAKE_USE represents a use of the operand but does not do anything.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ CLMUL
Carry-less multiplication operations.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ CONVERT_FROM_ARBITRARY_FP
CONVERT_FROM_ARBITRARY_FP - This operator converts from an arbitrary floating-point represented as an...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ SET_ROUNDING
Set rounding mode.
@ CONVERGENCECTRL_GLUE
This does not correspond to any convergence control intrinsic.
@ SIGN_EXTEND
Conversion operators.
@ PREALLOCATED_SETUP
PREALLOCATED_SETUP - This has 2 operands: an input chain and a SRCVALUE with the preallocated call Va...
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ BR
Control flow instructions. These all have token chains.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SSUBO
Same for subtraction.
@ PREALLOCATED_ARG
PREALLOCATED_ARG - This has 3 operands: an input chain, a SRCVALUE with the preallocated call Value,...
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor to...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ GET_ACTIVE_LANE_MASK
GET_ACTIVE_LANE_MASK - this corrosponds to the llvm.get.active.lane.mask intrinsic.
@ BasicBlock
Various leaf nodes.
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ CLEANUPRET
CLEANUPRET - Represents a return from a cleanup block funclet.
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
@ GET_FPENV
Gets the current floating-point environment.
@ SHL
Shift and rotation operations.
@ AssertNoFPClass
AssertNoFPClass - These nodes record if a register contains a float value that is known to be not som...
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ PATCHPOINT
The llvm.experimental.patchpoint.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ VECTOR_SPLICE_LEFT
VECTOR_SPLICE_LEFT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1, VEC2) left by OFFSET elements an...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ MASKED_UDIV
Masked vector arithmetic that returns poison on disabled lanes.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ PCMARKER
PCMARKER - This corresponds to the pcmarker intrinsic.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ ATOMIC_LOAD_FMAXIMUMNUM
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ RELOC_NONE
Issue a no-op relocation against a given symbol at the current location.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ VECTOR_SPLICE_RIGHT
VECTOR_SPLICE_RIGHT(VEC1, VEC2, OFFSET) - Shifts CONCAT_VECTORS(VEC1,VEC2) right by OFFSET elements a...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ STACKMAP
The llvm.experimental.stackmap intrinsic.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ VECTOR_COMPRESS
VECTOR_COMPRESS(Vec, Mask, Passthru) consecutively place vector elements based on mask e....
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ CLEAR_CACHE
llvm.clear_cache intrinsic Operands: Input Chain, Start Addres, End Address Outputs: Output Chain
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ CATCHRET
CATCHRET - Represents a return from a catch block funclet.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2, ...) - Returns N vectors from N input vectors, where N is the factor ...
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
@ LOOP_DEPENDENCE_WAR_MASK
The llvm.loop.dependence.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
IntrinsicID_match m_VScale()
Matches a call to llvm.vscale().
auto m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
std::vector< CaseCluster > CaseClusterVector
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
SmallVector< SwitchWorkListItem, 4 > SwitchWorkList
CaseClusterVector::iterator CaseClusterIt
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
LLVM_ABI void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
auto cast_or_null(const Y &Val)
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
gep_type_iterator gep_type_end(const User *GEP)
constexpr auto equal_to(T &&Arg)
Functor variant of std::equal_to that can be used as a UnaryPredicate in functional algorithms like a...
constexpr int popcount(T Value) noexcept
Count the number of set bits in a value.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
detail::concat_range< ValueT, RangeTs... > concat(RangeTs &&...Ranges)
Returns a concatenated range across two or more ranges.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
void ComputeValueTypes(const DataLayout &DL, Type *Ty, SmallVectorImpl< Type * > &Types, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
Given an LLVM IR type, compute non-aggregate subtypes.
auto dyn_cast_or_null(const Y &Val)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
LLVM_ABI llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
generic_gep_type_iterator<> gep_type_iterator
FunctionAddr VTableAddr Count
auto succ_size(const MachineBasicBlock *BB)
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ATTRIBUTE_VISIBILITY_DEFAULT AnalysisKey InnerAnalysisManagerProxy< AnalysisManagerT, IRUnitT, ExtraArgTs... >::Key
LLVM_ABI Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
FunctionAddr VTableAddr uintptr_t uintptr_t Data
LLVM_ABI bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
LLVM_ABI llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM, bool ReturnsFirstArg=false)
Test if the given instruction is in a position to be optimized with a tail-call.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
@ Default
The result value is uniform if and only if all operands are uniform.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
static LLVM_ABI EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
EVT changeVectorElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
bool isRISCVVectorTuple() const
Return true if this is a vector value type.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setPointerAddrSpace(unsigned AS)
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
A lightweight accessor for an operand bundle meant to be passed around by value.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< std::pair< Register, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< Register, 4 > Regs
This list holds the registers assigned to the values.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
void setUnpredictable(bool b)
bool hasAllowReassociation() const
void setNoUnsignedWrap(bool b)
void setNoSignedWrap(bool b)
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
Register Reg
The virtual register containing the index of the jump table entry to jump to.
MachineBasicBlock * Default
The MBB of the default bb, which is a successor of the range check MBB.
unsigned JTI
The JumpTableIndex for this jump table in the function.
MachineBasicBlock * MBB
The MBB into which to emit the code for the indirect jump.
std::optional< SDLoc > SL
The debug location of the instruction this JumpTable was produced from.
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setDeactivationSymbol(GlobalValue *Sym)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
Type * OrigRetTy
Original unlegalized return type.
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)