79#include "llvm/IR/IntrinsicsAArch64.h"
80#include "llvm/IR/IntrinsicsAMDGPU.h"
81#include "llvm/IR/IntrinsicsWebAssembly.h"
113using namespace PatternMatch;
114using namespace SwitchCG;
116#define DEBUG_TYPE "isel"
124 cl::desc(
"Insert the experimental `assertalign` node."),
129 cl::desc(
"Generate low-precision inline sequences "
130 "for some float libcalls"),
136 cl::desc(
"Set the case probability threshold for peeling the case from a "
137 "switch statement. A value greater than 100 will void this "
157 const SDValue *Parts,
unsigned NumParts,
160 std::optional<CallingConv::ID>
CC);
169 unsigned NumParts,
MVT PartVT,
EVT ValueVT,
const Value *V,
171 std::optional<CallingConv::ID>
CC = std::nullopt,
172 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
176 PartVT, ValueVT,
CC))
183 assert(NumParts > 0 &&
"No parts to assemble!");
194 unsigned RoundBits = PartBits * RoundParts;
195 EVT RoundVT = RoundBits == ValueBits ?
201 if (RoundParts > 2) {
205 PartVT, HalfVT, V, InChain);
216 if (RoundParts < NumParts) {
218 unsigned OddParts = NumParts - RoundParts;
221 OddVT, V, InChain,
CC);
238 assert(ValueVT ==
EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
249 !PartVT.
isVector() &&
"Unexpected split");
261 if (PartEVT == ValueVT)
265 ValueVT.
bitsLT(PartEVT)) {
278 if (ValueVT.
bitsLT(PartEVT)) {
283 Val = DAG.
getNode(*AssertOp,
DL, PartEVT, Val,
298 llvm::Attribute::StrictFP)) {
300 DAG.
getVTList(ValueVT, MVT::Other), InChain, Val,
312 if (PartEVT == MVT::x86mmx && ValueVT.
isInteger() &&
313 ValueVT.
bitsLT(PartEVT)) {
322 const Twine &ErrMsg) {
323 const Instruction *
I = dyn_cast_or_null<Instruction>(V);
327 const char *AsmError =
", possible invalid constraint for vector type";
328 if (
const CallInst *CI = dyn_cast<CallInst>(
I))
329 if (CI->isInlineAsm())
341 const SDValue *Parts,
unsigned NumParts,
344 std::optional<CallingConv::ID> CallConv) {
346 assert(NumParts > 0 &&
"No parts to assemble!");
347 const bool IsABIRegCopy = CallConv.has_value();
356 unsigned NumIntermediates;
361 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT,
362 NumIntermediates, RegisterVT);
366 NumIntermediates, RegisterVT);
369 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
371 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
374 "Part type sizes don't match!");
378 if (NumIntermediates == NumParts) {
381 for (
unsigned i = 0; i != NumParts; ++i)
383 V, InChain, CallConv);
384 }
else if (NumParts > 0) {
387 assert(NumParts % NumIntermediates == 0 &&
388 "Must expand into a divisible number of parts!");
389 unsigned Factor = NumParts / NumIntermediates;
390 for (
unsigned i = 0; i != NumIntermediates; ++i)
392 IntermediateVT, V, InChain, CallConv);
407 DL, BuiltVectorTy, Ops);
413 if (PartEVT == ValueVT)
429 "Cannot narrow, it would be a lossy transformation");
435 if (PartEVT == ValueVT)
460 }
else if (ValueVT.
bitsLT(PartEVT)) {
469 *DAG.
getContext(), V,
"non-trivial scalar-to-vector conversion");
500 std::optional<CallingConv::ID> CallConv);
507 unsigned NumParts,
MVT PartVT,
const Value *V,
508 std::optional<CallingConv::ID> CallConv = std::nullopt,
522 unsigned OrigNumParts = NumParts;
524 "Copying to an illegal type!");
530 EVT PartEVT = PartVT;
531 if (PartEVT == ValueVT) {
532 assert(NumParts == 1 &&
"No-op copy with multiple parts!");
541 assert(NumParts == 1 &&
"Do not know what to promote to!");
552 "Unknown mismatch!");
554 Val = DAG.
getNode(ExtendKind,
DL, ValueVT, Val);
555 if (PartVT == MVT::x86mmx)
560 assert(NumParts == 1 && PartEVT != ValueVT);
566 "Unknown mismatch!");
569 if (PartVT == MVT::x86mmx)
576 "Failed to tile the value with PartVT!");
579 if (PartEVT != ValueVT) {
581 "scalar-to-vector conversion failed");
590 if (NumParts & (NumParts - 1)) {
593 "Do not know what to expand to!");
595 unsigned RoundBits = RoundParts * PartBits;
596 unsigned OddParts = NumParts - RoundParts;
605 std::reverse(Parts + RoundParts, Parts + NumParts);
607 NumParts = RoundParts;
619 for (
unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
620 for (
unsigned i = 0; i < NumParts; i += StepSize) {
621 unsigned ThisBits = StepSize * PartBits / 2;
624 SDValue &Part1 = Parts[i+StepSize/2];
631 if (ThisBits == PartBits && ThisVT != PartVT) {
639 std::reverse(Parts, Parts + OrigNumParts);
656 if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
661 if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
663 "Cannot widen to illegal type");
666 }
else if (PartEVT != ValueEVT) {
681 Ops.
append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
692 std::optional<CallingConv::ID> CallConv) {
696 const bool IsABIRegCopy = CallConv.has_value();
699 EVT PartEVT = PartVT;
700 if (PartEVT == ValueVT) {
719 TargetLowering::TypeWidenVector) {
746 "lossy conversion of vector to scalar type");
761 unsigned NumIntermediates;
765 *DAG.
getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
770 NumIntermediates, RegisterVT);
773 assert(NumRegs == NumParts &&
"Part count doesn't match vector breakdown!");
775 assert(RegisterVT == PartVT &&
"Part type doesn't match vector breakdown!");
778 "Mixing scalable and fixed vectors when copying in parts");
780 std::optional<ElementCount> DestEltCnt;
790 if (ValueVT == BuiltVectorTy) {
814 for (
unsigned i = 0; i != NumIntermediates; ++i) {
829 if (NumParts == NumIntermediates) {
832 for (
unsigned i = 0; i != NumParts; ++i)
834 }
else if (NumParts > 0) {
837 assert(NumIntermediates != 0 &&
"division by zero");
838 assert(NumParts % NumIntermediates == 0 &&
839 "Must expand into a divisible number of parts!");
840 unsigned Factor = NumParts / NumIntermediates;
841 for (
unsigned i = 0; i != NumIntermediates; ++i)
848 EVT valuevt, std::optional<CallingConv::ID>
CC)
849 : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
850 RegCount(1, regs.
size()), CallConv(
CC) {}
854 std::optional<CallingConv::ID>
CC) {
868 for (
unsigned i = 0; i != NumRegs; ++i)
870 RegVTs.push_back(RegisterVT);
899 for (
unsigned i = 0; i != NumRegs; ++i) {
905 *Glue =
P.getValue(2);
908 Chain =
P.getValue(1);
937 EVT FromVT(MVT::Other);
941 }
else if (NumSignBits > 1) {
949 assert(FromVT != MVT::Other);
955 RegisterVT, ValueVT, V, Chain,
CallConv);
985 NumParts, RegisterVT, V,
CallConv, ExtendKind);
991 for (
unsigned i = 0; i != NumRegs; ++i) {
1003 if (NumRegs == 1 || Glue)
1014 Chain = Chains[NumRegs-1];
1020 unsigned MatchingIdx,
const SDLoc &dl,
1022 std::vector<SDValue> &Ops)
const {
1027 Flag.setMatchingOp(MatchingIdx);
1036 Flag.setRegClass(RC->
getID());
1047 "No 1:1 mapping from clobbers to regs?");
1050 for (
unsigned I = 0, E =
ValueVTs.size();
I != E; ++
I) {
1055 "If we clobbered the stack pointer, MFI should know about it.");
1064 for (
unsigned i = 0; i != NumRegs; ++i) {
1066 unsigned TheReg =
Regs[Reg++];
1077 unsigned RegCount = std::get<0>(CountAndVT);
1078 MVT RegisterVT = std::get<1>(CountAndVT);
1102 UnusedArgNodeMap.clear();
1104 PendingExports.clear();
1105 PendingConstrainedFP.clear();
1106 PendingConstrainedFPStrict.clear();
1114 DanglingDebugInfoMap.clear();
1121 if (Pending.
empty())
1127 unsigned i = 0, e = Pending.
size();
1128 for (; i != e; ++i) {
1130 if (Pending[i].
getNode()->getOperand(0) == Root)
1138 if (Pending.
size() == 1)
1157 PendingConstrainedFP.size() +
1158 PendingConstrainedFPStrict.size());
1160 PendingConstrainedFP.end());
1161 PendingLoads.append(PendingConstrainedFPStrict.begin(),
1162 PendingConstrainedFPStrict.end());
1163 PendingConstrainedFP.clear();
1164 PendingConstrainedFPStrict.clear();
1171 PendingExports.append(PendingConstrainedFPStrict.begin(),
1172 PendingConstrainedFPStrict.end());
1173 PendingConstrainedFPStrict.clear();
1174 return updateRoot(PendingExports);
1181 assert(Variable &&
"Missing variable");
1188 <<
"dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1195 if (!
N.getNode() && isa<Argument>(
Address))
1203 auto *FINode = dyn_cast<FrameIndexSDNode>(
N.getNode());
1204 if (IsParameter && FINode) {
1207 true,
DL, SDNodeOrder);
1208 }
else if (isa<Argument>(
Address)) {
1212 FuncArgumentDbgValueKind::Declare,
N);
1216 true,
DL, SDNodeOrder);
1223 FuncArgumentDbgValueKind::Declare,
N)) {
1225 <<
" (could not emit func-arg dbg_value)\n");
1237 for (
auto It = FnVarLocs->locs_begin(&
I),
End = FnVarLocs->locs_end(&
I);
1239 auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1241 if (It->Values.isKillLocation(It->Expr)) {
1247 It->Values.hasArgList())) {
1249 for (
Value *V : It->Values.location_ops())
1252 FnVarLocs->getDILocalVariable(It->VariableID),
1253 It->Expr, Vals.
size() > 1, It->DL, SDNodeOrder);
1269 for (
DbgRecord &DR :
I.getDbgRecordRange()) {
1271 assert(DLR->getLabel() &&
"Missing label");
1273 DAG.
getDbgLabel(DLR->getLabel(), DLR->getDebugLoc(), SDNodeOrder);
1278 if (SkipDbgVariableRecords)
1288 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DVR
1297 if (Values.
empty()) {
1306 [](
Value *V) {
return !V || isa<UndefValue>(V); })) {
1314 SDNodeOrder, IsVariadic)) {
1325 if (
I.isTerminator()) {
1326 HandlePHINodesInSuccessorBlocks(
I.getParent());
1330 if (!isa<DbgInfoIntrinsic>(
I))
1336 bool NodeInserted =
false;
1337 std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1338 MDNode *PCSectionsMD =
I.getMetadata(LLVMContext::MD_pcsections);
1339 MDNode *MMRA =
I.getMetadata(LLVMContext::MD_mmra);
1340 if (PCSectionsMD || MMRA) {
1341 InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1342 DAG, [&](
SDNode *) { NodeInserted =
true; });
1348 !isa<GCStatepointInst>(
I))
1352 if (PCSectionsMD || MMRA) {
1353 auto It = NodeMap.find(&
I);
1354 if (It != NodeMap.end()) {
1359 }
else if (NodeInserted) {
1362 errs() <<
"warning: loosing !pcsections and/or !mmra metadata ["
1363 <<
I.getModule()->getName() <<
"]\n";
1372void SelectionDAGBuilder::visitPHI(
const PHINode &) {
1382#define HANDLE_INST(NUM, OPCODE, CLASS) \
1383 case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1384#include "llvm/IR/Instruction.def"
1396 for (
const Value *V : Values) {
1421 DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr,
DL, Order);
1426 auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1427 DIVariable *DanglingVariable = DDI.getVariable();
1429 if (DanglingVariable == Variable && Expr->
fragmentsOverlap(DanglingExpr)) {
1431 << printDDI(
nullptr, DDI) <<
"\n");
1437 for (
auto &DDIMI : DanglingDebugInfoMap) {
1438 DanglingDebugInfoVector &DDIV = DDIMI.second;
1442 for (
auto &DDI : DDIV)
1443 if (isMatchingDbgValue(DDI))
1446 erase_if(DDIV, isMatchingDbgValue);
1454 auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1455 if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1458 DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1459 for (
auto &DDI : DDIV) {
1462 unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1466 "Expected inlined-at fields to agree");
1475 if (!EmitFuncArgumentDbgValue(V, Variable, Expr,
DL,
1476 FuncArgumentDbgValueKind::Value, Val)) {
1478 << printDDI(V, DDI) <<
"\n");
1485 <<
"changing SDNodeOrder from " << DbgSDNodeOrder <<
" to "
1486 << ValSDNodeOrder <<
"\n");
1487 SDV = getDbgValue(Val, Variable, Expr,
DL,
1488 std::max(DbgSDNodeOrder, ValSDNodeOrder));
1493 <<
" in EmitFuncArgumentDbgValue\n");
1495 LLVM_DEBUG(
dbgs() <<
"Dropping debug info for " << printDDI(V, DDI)
1507 DanglingDebugInfo &DDI) {
1512 const Value *OrigV = V;
1516 unsigned SDOrder = DDI.getSDNodeOrder();
1520 bool StackValue =
true;
1529 while (isa<Instruction>(V)) {
1530 const Instruction &VAsInst = *cast<const Instruction>(V);
1545 if (!AdditionalValues.
empty())
1555 dbgs() <<
"Salvaged debug location info for:\n " << *Var <<
"\n"
1556 << *OrigV <<
"\nBy stripping back to:\n " << *V <<
"\n");
1564 assert(OrigV &&
"V shouldn't be null");
1569 << printDDI(OrigV, DDI) <<
"\n");
1586 unsigned Order,
bool IsVariadic) {
1591 if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1596 for (
const Value *V : Values) {
1598 if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1599 isa<ConstantPointerNull>(V)) {
1605 if (
auto *CE = dyn_cast<ConstantExpr>(V))
1606 if (CE->getOpcode() == Instruction::IntToPtr) {
1613 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1624 if (!
N.getNode() && isa<Argument>(V))
1625 N = UnusedArgNodeMap[V];
1629 EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1630 FuncArgumentDbgValueKind::Value,
N))
1632 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
1657 bool IsParamOfFunc =
1667 unsigned Reg = VMI->second;
1671 V->getType(), std::nullopt);
1677 unsigned BitsToDescribe = 0;
1679 BitsToDescribe = *VarSize;
1681 BitsToDescribe = Fragment->SizeInBits;
1684 if (
Offset >= BitsToDescribe)
1687 unsigned RegisterSize = RegAndSize.second;
1688 unsigned FragmentSize = (
Offset + RegisterSize > BitsToDescribe)
1689 ? BitsToDescribe -
Offset
1692 Expr,
Offset, FragmentSize);
1696 Var, *FragmentExpr, RegAndSize.first,
false, DbgLoc, Order);
1714 false, DbgLoc, Order, IsVariadic);
1721 for (
auto &Pair : DanglingDebugInfoMap)
1722 for (
auto &DDI : Pair.second)
1754 if (
N.getNode())
return N;
1796 if (
const Constant *
C = dyn_cast<Constant>(V)) {
1808 getValue(CPA->getAddrDiscriminator()),
1809 getValue(CPA->getDiscriminator()));
1812 if (isa<ConstantPointerNull>(
C)) {
1813 unsigned AS = V->getType()->getPointerAddressSpace();
1821 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
C))
1824 if (isa<UndefValue>(
C) && !V->getType()->isAggregateType())
1828 visit(CE->getOpcode(), *CE);
1830 assert(N1.
getNode() &&
"visit didn't populate the NodeMap!");
1834 if (isa<ConstantStruct>(
C) || isa<ConstantArray>(
C)) {
1836 for (
const Use &U :
C->operands()) {
1842 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1850 dyn_cast<ConstantDataSequential>(
C)) {
1852 for (
unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1856 for (
unsigned i = 0, e = Val->
getNumValues(); i != e; ++i)
1860 if (isa<ArrayType>(CDS->getType()))
1865 if (
C->getType()->isStructTy() ||
C->getType()->isArrayTy()) {
1866 assert((isa<ConstantAggregateZero>(
C) || isa<UndefValue>(
C)) &&
1867 "Unknown struct or array constant!");
1871 unsigned NumElts = ValueVTs.
size();
1875 for (
unsigned i = 0; i != NumElts; ++i) {
1876 EVT EltVT = ValueVTs[i];
1877 if (isa<UndefValue>(
C))
1891 if (
const auto *Equiv = dyn_cast<DSOLocalEquivalent>(
C))
1892 return getValue(Equiv->getGlobalValue());
1894 if (
const auto *
NC = dyn_cast<NoCFIValue>(
C))
1897 if (VT == MVT::aarch64svcount) {
1898 assert(
C->isNullValue() &&
"Can only zero this target type!");
1903 VectorType *VecTy = cast<VectorType>(V->getType());
1909 unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1910 for (
unsigned i = 0; i != NumElements; ++i)
1916 if (isa<ConstantAggregateZero>(
C)) {
1934 if (
const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1943 if (
const Instruction *Inst = dyn_cast<Instruction>(V)) {
1947 Inst->getType(), std::nullopt);
1955 if (
const auto *BB = dyn_cast<BasicBlock>(V))
1961void SelectionDAGBuilder::visitCatchPad(
const CatchPadInst &
I) {
1970 if (IsMSVCCXX || IsCoreCLR)
1997 Value *ParentPad =
I.getCatchSwitchParentPad();
1999 if (isa<ConstantTokenNone>(ParentPad))
2002 SuccessorColor = cast<Instruction>(ParentPad)->
getParent();
2003 assert(SuccessorColor &&
"No parent funclet for catchret!");
2005 assert(SuccessorColorMBB &&
"No MBB for SuccessorColor!");
2014void SelectionDAGBuilder::visitCleanupPad(
const CleanupPadInst &CPI) {
2058 if (isa<CleanupPadInst>(Pad)) {
2060 UnwindDests.emplace_back(FuncInfo.
MBBMap[EHPadBB], Prob);
2061 UnwindDests.back().first->setIsEHScopeEntry();
2063 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2066 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2067 UnwindDests.emplace_back(FuncInfo.
MBBMap[CatchPadBB], Prob);
2068 UnwindDests.back().first->setIsEHScopeEntry();
2099 assert(UnwindDests.size() <= 1 &&
2100 "There should be at most one unwind destination for wasm");
2107 if (isa<LandingPadInst>(Pad)) {
2109 UnwindDests.emplace_back(FuncInfo.
MBBMap[EHPadBB], Prob);
2111 }
else if (isa<CleanupPadInst>(Pad)) {
2114 UnwindDests.emplace_back(FuncInfo.
MBBMap[EHPadBB], Prob);
2115 UnwindDests.
back().first->setIsEHScopeEntry();
2116 UnwindDests.back().first->setIsEHFuncletEntry();
2118 }
else if (
const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2120 for (
const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2121 UnwindDests.emplace_back(FuncInfo.
MBBMap[CatchPadBB], Prob);
2123 if (IsMSVCCXX || IsCoreCLR)
2124 UnwindDests.back().first->setIsEHFuncletEntry();
2126 UnwindDests.back().first->setIsEHScopeEntry();
2128 NewEHPadBB = CatchSwitch->getUnwindDest();
2134 if (BPI && NewEHPadBB)
2136 EHPadBB = NewEHPadBB;
2143 auto UnwindDest =
I.getUnwindDest();
2150 for (
auto &UnwindDest : UnwindDests) {
2151 UnwindDest.first->setIsEHPad();
2152 addSuccessorWithProb(
FuncInfo.
MBB, UnwindDest.first, UnwindDest.second);
2162void SelectionDAGBuilder::visitCatchSwitch(
const CatchSwitchInst &CSI) {
2166void SelectionDAGBuilder::visitRet(
const ReturnInst &
I) {
2180 if (
I.getParent()->getTerminatingDeoptimizeCall()) {
2187 const Function *
F =
I.getParent()->getParent();
2206 unsigned NumValues = ValueVTs.
size();
2209 Align BaseAlign =
DL.getPrefTypeAlign(
I.getOperand(0)->getType());
2210 for (
unsigned i = 0; i != NumValues; ++i) {
2217 if (MemVTs[i] != ValueVTs[i])
2227 MVT::Other, Chains);
2228 }
else if (
I.getNumOperands() != 0) {
2231 unsigned NumValues = ValueVTs.
size();
2235 const Function *
F =
I.getParent()->getParent();
2238 I.getOperand(0)->getType(),
F->getCallingConv(),
2242 if (
F->getAttributes().hasRetAttr(Attribute::SExt))
2244 else if (
F->getAttributes().hasRetAttr(Attribute::ZExt))
2248 bool RetInReg =
F->getAttributes().hasRetAttr(Attribute::InReg);
2250 for (
unsigned j = 0;
j != NumValues; ++
j) {
2251 EVT VT = ValueVTs[
j];
2263 &Parts[0], NumParts, PartVT, &
I,
CC, ExtendKind);
2270 if (
I.getOperand(0)->getType()->isPointerTy()) {
2272 Flags.setPointerAddrSpace(
2273 cast<PointerType>(
I.getOperand(0)->getType())->getAddressSpace());
2276 if (NeedsRegBlock) {
2277 Flags.setInConsecutiveRegs();
2278 if (j == NumValues - 1)
2279 Flags.setInConsecutiveRegsLast();
2288 for (
unsigned i = 0; i < NumParts; ++i) {
2290 Parts[i].getValueType().getSimpleVT(),
2301 const Function *
F =
I.getParent()->getParent();
2303 F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2306 Flags.setSwiftError();
2325 "LowerReturn didn't return a valid chain!");
2336 if (V->getType()->isEmptyTy())
2341 assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2342 "Unused value assigned virtual registers!");
2352 if (!isa<Instruction>(V) && !isa<Argument>(V))
return;
2365 if (
const Instruction *VI = dyn_cast<Instruction>(V)) {
2367 if (VI->getParent() == FromBB)
2376 if (isa<Argument>(V)) {
2393 const BasicBlock *SrcBB = Src->getBasicBlock();
2394 const BasicBlock *DstBB = Dst->getBasicBlock();
2398 auto SuccSize = std::max<uint32_t>(
succ_size(SrcBB), 1);
2408 Src->addSuccessorWithoutProb(Dst);
2411 Prob = getEdgeProbability(Src, Dst);
2412 Src->addSuccessor(Dst, Prob);
2418 return I->getParent() == BB;
2438 if (
const CmpInst *BOp = dyn_cast<CmpInst>(
Cond)) {
2442 if (CurBB == SwitchBB ||
2448 InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2453 InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2459 CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1),
nullptr,
2461 SL->SwitchCases.push_back(CB);
2470 SL->SwitchCases.push_back(CB);
2478 unsigned Depth = 0) {
2483 auto *
I = dyn_cast<Instruction>(V);
2487 if (Necessary !=
nullptr) {
2490 if (Necessary->contains(
I))
2498 for (
unsigned OpIdx = 0, E =
I->getNumOperands(); OpIdx < E; ++OpIdx)
2509 if (
I.getNumSuccessors() != 2)
2512 if (!
I.isConditional())
2524 if (BPI !=
nullptr) {
2530 std::optional<bool> Likely;
2533 else if (BPI->
isEdgeHot(
I.getParent(), IfFalse))
2537 if (Opc == (*Likely ? Instruction::And : Instruction::Or))
2549 if (CostThresh <= 0)
2563 if (
const auto *RhsI = dyn_cast<Instruction>(Rhs))
2574 Value *BrCond =
I.getCondition();
2575 auto ShouldCountInsn = [&RhsDeps, &BrCond](
const Instruction *Ins) {
2576 for (
const auto *U : Ins->users()) {
2578 if (
auto *UIns = dyn_cast<Instruction>(U))
2579 if (UIns != BrCond && !RhsDeps.
contains(UIns))
2592 for (
unsigned PruneIters = 0; PruneIters < MaxPruneIters; ++PruneIters) {
2594 for (
const auto &InsPair : RhsDeps) {
2595 if (!ShouldCountInsn(InsPair.first)) {
2596 ToDrop = InsPair.first;
2600 if (ToDrop ==
nullptr)
2602 RhsDeps.erase(ToDrop);
2605 for (
const auto &InsPair : RhsDeps) {
2613 if (CostOfIncluding > CostThresh)
2639 const Value *BOpOp0, *BOpOp1;
2653 if (BOpc == Instruction::And)
2654 BOpc = Instruction::Or;
2655 else if (BOpc == Instruction::Or)
2656 BOpc = Instruction::And;
2662 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->
hasOneUse();
2667 TProb, FProb, InvertCond);
2677 if (Opc == Instruction::Or) {
2698 auto NewTrueProb = TProb / 2;
2699 auto NewFalseProb = TProb / 2 + FProb;
2702 NewFalseProb, InvertCond);
2709 Probs[1], InvertCond);
2711 assert(Opc == Instruction::And &&
"Unknown merge op!");
2731 auto NewTrueProb = TProb + FProb / 2;
2732 auto NewFalseProb = FProb / 2;
2735 NewFalseProb, InvertCond);
2742 Probs[1], InvertCond);
2751 if (Cases.size() != 2)
return true;
2755 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2756 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2757 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2758 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2764 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2765 Cases[0].
CC == Cases[1].
CC &&
2766 isa<Constant>(Cases[0].CmpRHS) &&
2767 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2768 if (Cases[0].
CC ==
ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2770 if (Cases[0].
CC ==
ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2777void SelectionDAGBuilder::visitBr(
const BranchInst &
I) {
2783 if (
I.isUnconditional()) {
2789 if (Succ0MBB != NextBlock(BrMBB) ||
2802 const Value *CondVal =
I.getCondition();
2822 const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2824 BOp->
hasOneUse() && !
I.hasMetadata(LLVMContext::MD_unpredictable)) {
2826 const Value *BOp0, *BOp1;
2829 Opcode = Instruction::And;
2831 Opcode = Instruction::Or;
2839 Opcode, BOp0, BOp1))) {
2841 getEdgeProbability(BrMBB, Succ0MBB),
2842 getEdgeProbability(BrMBB, Succ1MBB),
2847 assert(
SL->SwitchCases[0].ThisBB == BrMBB &&
"Unexpected lowering!");
2851 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i) {
2858 SL->SwitchCases.erase(
SL->SwitchCases.begin());
2864 for (
unsigned i = 1, e =
SL->SwitchCases.size(); i != e; ++i)
2867 SL->SwitchCases.clear();
2873 nullptr, Succ0MBB, Succ1MBB, BrMBB,
getCurSDLoc());
2892 if (CB.
TrueBB != NextBlock(SwitchBB)) {
2934 if (cast<ConstantInt>(CB.
CmpLHS)->isMinValue(
true)) {
2955 if (CB.
TrueBB == NextBlock(SwitchBB)) {
2979 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2980 assert(JT.Reg != -1U &&
"Should lower JT Header first!");
2994 assert(JT.SL &&
"Should set SDLoc for SelectionDAG!");
2995 const SDLoc &dl = *JT.SL;
3011 unsigned JumpTableReg =
3014 JumpTableReg, SwitchOp);
3015 JT.Reg = JumpTableReg;
3027 MVT::Other, CopyTo, CMP,
3031 if (JT.MBB != NextBlock(SwitchBB))
3038 if (JT.MBB != NextBlock(SwitchBB))
3066 if (PtrTy != PtrMemTy)
3114 Entry.Node = GuardVal;
3116 if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
3117 Entry.IsInReg =
true;
3118 Args.push_back(Entry);
3124 getValue(GuardCheckFn), std::move(Args));
3126 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
3140 Guard =
DAG.
getLoad(PtrMemTy, dl, Chain, GuardPtr,
3177 TLI.
makeLibCall(
DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
3208 bool UsePtrType =
false;
3212 for (
unsigned i = 0, e =
B.Cases.size(); i != e; ++i)
3232 if (!
B.FallthroughUnreachable)
3233 addSuccessorWithProb(SwitchBB,
B.Default,
B.DefaultProb);
3234 addSuccessorWithProb(SwitchBB,
MBB,
B.Prob);
3238 if (!
B.FallthroughUnreachable) {
3251 if (
MBB != NextBlock(SwitchBB))
3270 if (PopCount == 1) {
3277 }
else if (PopCount == BB.
Range) {
3296 addSuccessorWithProb(SwitchBB,
B.TargetBB,
B.ExtraProb);
3298 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3309 if (NextMBB != NextBlock(SwitchBB))
3316void SelectionDAGBuilder::visitInvoke(
const InvokeInst &
I) {
3327 assert(!
I.hasOperandBundlesOtherThan(
3328 {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3329 LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3330 LLVMContext::OB_cfguardtarget, LLVMContext::OB_ptrauth,
3331 LLVMContext::OB_clang_arc_attachedcall}) &&
3332 "Cannot lower invokes with arbitrary operand bundles yet!");
3334 const Value *Callee(
I.getCalledOperand());
3335 const Function *Fn = dyn_cast<Function>(Callee);
3336 if (isa<InlineAsm>(Callee))
3337 visitInlineAsm(
I, EHPadBB);
3342 case Intrinsic::donothing:
3344 case Intrinsic::seh_try_begin:
3345 case Intrinsic::seh_scope_begin:
3346 case Intrinsic::seh_try_end:
3347 case Intrinsic::seh_scope_end:
3353 case Intrinsic::experimental_patchpoint_void:
3354 case Intrinsic::experimental_patchpoint:
3355 visitPatchpoint(
I, EHPadBB);
3357 case Intrinsic::experimental_gc_statepoint:
3360 case Intrinsic::wasm_rethrow: {
3375 }
else if (
I.hasDeoptState()) {
3391 if (!isa<GCStatepointInst>(
I)) {
3403 addSuccessorWithProb(InvokeMBB, Return);
3404 for (
auto &UnwindDest : UnwindDests) {
3405 UnwindDest.first->setIsEHPad();
3406 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3415void SelectionDAGBuilder::visitCallBr(
const CallBrInst &
I) {
3420 assert(!
I.hasOperandBundlesOtherThan(
3421 {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3422 "Cannot lower callbrs with arbitrary operand bundles yet!");
3424 assert(
I.isInlineAsm() &&
"Only know how to handle inlineasm callbr");
3430 Dests.
insert(
I.getDefaultDest());
3435 for (
unsigned i = 0, e =
I.getNumIndirectDests(); i < e; ++i) {
3438 Target->setIsInlineAsmBrIndirectTarget();
3439 Target->setMachineBlockAddressTaken();
3440 Target->setLabelMustBeEmitted();
3442 if (Dests.
insert(Dest).second)
3453void SelectionDAGBuilder::visitResume(
const ResumeInst &RI) {
3454 llvm_unreachable(
"SelectionDAGBuilder shouldn't visit resume instructions!");
3457void SelectionDAGBuilder::visitLandingPad(
const LandingPadInst &LP) {
3459 "Call to landingpad not in landing pad!");
3479 assert(ValueVTs.
size() == 2 &&
"Only two-valued landingpads are supported");
3509 if (JTB.first.HeaderBB ==
First)
3510 JTB.first.HeaderBB =
Last;
3523 for (
unsigned i = 0, e =
I.getNumSuccessors(); i != e; ++i) {
3525 bool Inserted =
Done.insert(BB).second;
3530 addSuccessorWithProb(IndirectBrMBB, Succ);
3545 if (
const CallInst *Call = dyn_cast_or_null<CallInst>(
I.getPrevNode())) {
3546 if (
Call->doesNotReturn())
3554void SelectionDAGBuilder::visitUnary(
const User &
I,
unsigned Opcode) {
3556 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3557 Flags.copyFMF(*FPOp);
3565void SelectionDAGBuilder::visitBinary(
const User &
I,
unsigned Opcode) {
3567 if (
auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&
I)) {
3568 Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3569 Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3571 if (
auto *ExactOp = dyn_cast<PossiblyExactOperator>(&
I))
3572 Flags.setExact(ExactOp->isExact());
3573 if (
auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&
I))
3574 Flags.setDisjoint(DisjointOp->isDisjoint());
3575 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3576 Flags.copyFMF(*FPOp);
3585void SelectionDAGBuilder::visitShift(
const User &
I,
unsigned Opcode) {
3594 if (!
I.getType()->isVectorTy() && Op2.
getValueType() != ShiftTy) {
3596 "Unexpected shift type");
3607 dyn_cast<const OverflowingBinaryOperator>(&
I)) {
3608 nuw = OFBinOp->hasNoUnsignedWrap();
3609 nsw = OFBinOp->hasNoSignedWrap();
3612 dyn_cast<const PossiblyExactOperator>(&
I))
3613 exact = ExactOp->isExact();
3616 Flags.setExact(exact);
3617 Flags.setNoSignedWrap(nsw);
3618 Flags.setNoUnsignedWrap(nuw);
3624void SelectionDAGBuilder::visitSDiv(
const User &
I) {
3629 Flags.setExact(isa<PossiblyExactOperator>(&
I) &&
3630 cast<PossiblyExactOperator>(&
I)->isExact());
3635void SelectionDAGBuilder::visitICmp(
const ICmpInst &
I) {
3658void SelectionDAGBuilder::visitFCmp(
const FCmpInst &
I) {
3664 auto *FPMO = cast<FPMathOperator>(&
I);
3669 Flags.copyFMF(*FPMO);
3681 return isa<SelectInst>(V);
3685void SelectionDAGBuilder::visitSelect(
const User &
I) {
3689 unsigned NumValues = ValueVTs.
size();
3690 if (NumValues == 0)
return;
3700 bool IsUnaryAbs =
false;
3701 bool Negate =
false;
3704 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
3705 Flags.copyFMF(*FPOp);
3707 Flags.setUnpredictable(
3708 cast<SelectInst>(
I).getMetadata(LLVMContext::MD_unpredictable));
3712 EVT VT = ValueVTs[0];
3724 bool UseScalarMinMax = VT.
isVector() &&
3733 switch (SPR.Flavor) {
3739 switch (SPR.NaNBehavior) {
3752 switch (SPR.NaNBehavior) {
3796 for (
unsigned i = 0; i != NumValues; ++i) {
3805 for (
unsigned i = 0; i != NumValues; ++i) {
3819void SelectionDAGBuilder::visitTrunc(
const User &
I) {
3827void SelectionDAGBuilder::visitZExt(
const User &
I) {
3835 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3836 Flags.setNonNeg(PNI->hasNonNeg());
3841 if (
Flags.hasNonNeg() &&
3850void SelectionDAGBuilder::visitSExt(
const User &
I) {
3859void SelectionDAGBuilder::visitFPTrunc(
const User &
I) {
3870void SelectionDAGBuilder::visitFPExt(
const User &
I) {
3878void SelectionDAGBuilder::visitFPToUI(
const User &
I) {
3886void SelectionDAGBuilder::visitFPToSI(
const User &
I) {
3894void SelectionDAGBuilder::visitUIToFP(
const User &
I) {
3900 if (
auto *PNI = dyn_cast<PossiblyNonNegInst>(&
I))
3901 Flags.setNonNeg(PNI->hasNonNeg());
3906void SelectionDAGBuilder::visitSIToFP(
const User &
I) {
3914void SelectionDAGBuilder::visitPtrToInt(
const User &
I) {
3928void SelectionDAGBuilder::visitIntToPtr(
const User &
I) {
3940void SelectionDAGBuilder::visitBitCast(
const User &
I) {
3948 if (DestVT !=
N.getValueType())
3955 else if(
ConstantInt *
C = dyn_cast<ConstantInt>(
I.getOperand(0)))
3962void SelectionDAGBuilder::visitAddrSpaceCast(
const User &
I) {
3964 const Value *SV =
I.getOperand(0);
3969 unsigned DestAS =
I.getType()->getPointerAddressSpace();
3977void SelectionDAGBuilder::visitInsertElement(
const User &
I) {
3985 InVec, InVal, InIdx));
3988void SelectionDAGBuilder::visitExtractElement(
const User &
I) {
3998void SelectionDAGBuilder::visitShuffleVector(
const User &
I) {
4002 if (
auto *SVI = dyn_cast<ShuffleVectorInst>(&
I))
4003 Mask = SVI->getShuffleMask();
4005 Mask = cast<ConstantExpr>(
I).getShuffleMask();
4011 if (
all_of(Mask, [](
int Elem) {
return Elem == 0; }) &&
4027 unsigned MaskNumElts =
Mask.size();
4029 if (SrcNumElts == MaskNumElts) {
4035 if (SrcNumElts < MaskNumElts) {
4039 if (MaskNumElts % SrcNumElts == 0) {
4043 unsigned NumConcat = MaskNumElts / SrcNumElts;
4044 bool IsConcat =
true;
4046 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4052 if ((
Idx % SrcNumElts != (i % SrcNumElts)) ||
4053 (ConcatSrcs[i / SrcNumElts] >= 0 &&
4054 ConcatSrcs[i / SrcNumElts] != (
int)(
Idx / SrcNumElts))) {
4059 ConcatSrcs[i / SrcNumElts] =
Idx / SrcNumElts;
4066 for (
auto Src : ConcatSrcs) {
4079 unsigned PaddedMaskNumElts =
alignTo(MaskNumElts, SrcNumElts);
4080 unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
4097 for (
unsigned i = 0; i != MaskNumElts; ++i) {
4099 if (
Idx >= (
int)SrcNumElts)
4100 Idx -= SrcNumElts - PaddedMaskNumElts;
4108 if (MaskNumElts != PaddedMaskNumElts)
4116 if (SrcNumElts > MaskNumElts) {
4119 int StartIdx[2] = { -1, -1 };
4120 bool CanExtract =
true;
4121 for (
int Idx : Mask) {
4126 if (
Idx >= (
int)SrcNumElts) {
4135 if (NewStartIdx + MaskNumElts > SrcNumElts ||
4136 (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
4140 StartIdx[Input] = NewStartIdx;
4143 if (StartIdx[0] < 0 && StartIdx[1] < 0) {
4149 for (
unsigned Input = 0; Input < 2; ++Input) {
4150 SDValue &Src = Input == 0 ? Src1 : Src2;
4151 if (StartIdx[Input] < 0)
4161 for (
int &
Idx : MappedOps) {
4162 if (
Idx >= (
int)SrcNumElts)
4163 Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
4178 for (
int Idx : Mask) {
4184 SDValue &Src =
Idx < (int)SrcNumElts ? Src1 : Src2;
4185 if (
Idx >= (
int)SrcNumElts)
Idx -= SrcNumElts;
4199 const Value *Op0 =
I.getOperand(0);
4200 const Value *Op1 =
I.getOperand(1);
4201 Type *AggTy =
I.getType();
4203 bool IntoUndef = isa<UndefValue>(Op0);
4204 bool FromUndef = isa<UndefValue>(Op1);
4214 unsigned NumAggValues = AggValueVTs.
size();
4215 unsigned NumValValues = ValValueVTs.
size();
4219 if (!NumAggValues) {
4227 for (; i != LinearIndex; ++i)
4228 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4233 for (; i != LinearIndex + NumValValues; ++i)
4234 Values[i] = FromUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4238 for (; i != NumAggValues; ++i)
4239 Values[i] = IntoUndef ?
DAG.
getUNDEF(AggValueVTs[i]) :
4248 const Value *Op0 =
I.getOperand(0);
4250 Type *ValTy =
I.getType();
4251 bool OutOfUndef = isa<UndefValue>(Op0);
4259 unsigned NumValValues = ValValueVTs.
size();
4262 if (!NumValValues) {
4271 for (
unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4272 Values[i - LinearIndex] =
4281void SelectionDAGBuilder::visitGetElementPtr(
const User &
I) {
4282 Value *Op0 =
I.getOperand(0);
4292 bool IsVectorGEP =
I.getType()->isVectorTy();
4294 IsVectorGEP ? cast<VectorType>(
I.getType())->getElementCount()
4297 if (IsVectorGEP && !
N.getValueType().isVector()) {
4305 const Value *
Idx = GTI.getOperand();
4306 if (
StructType *StTy = GTI.getStructTypeOrNull()) {
4307 unsigned Field = cast<Constant>(
Idx)->getUniqueInteger().getZExtValue();
4316 if (int64_t(
Offset) >= 0 && cast<GEPOperator>(
I).isInBounds())
4317 Flags.setNoUnsignedWrap(
true);
4333 bool ElementScalable = ElementSize.
isScalable();
4337 const auto *
C = dyn_cast<Constant>(
Idx);
4338 if (
C && isa<VectorType>(
C->getType()))
4339 C =
C->getSplatValue();
4341 const auto *CI = dyn_cast_or_null<ConstantInt>(
C);
4342 if (CI && CI->isZero())
4344 if (CI && !ElementScalable) {
4358 Flags.setNoUnsignedWrap(
true);
4371 VectorElementCount);
4379 if (ElementScalable) {
4380 EVT VScaleTy =
N.getValueType().getScalarType();
4390 if (ElementMul != 1) {
4391 if (ElementMul.isPowerOf2()) {
4392 unsigned Amt = ElementMul.logBase2();
4394 N.getValueType(), IdxN,
4400 N.getValueType(), IdxN, Scale);
4406 N.getValueType(),
N, IdxN);
4417 if (PtrMemTy != PtrTy && !cast<GEPOperator>(
I).isInBounds())
4423void SelectionDAGBuilder::visitAlloca(
const AllocaInst &
I) {
4430 Type *Ty =
I.getAllocatedType();
4434 MaybeAlign Alignment = std::max(
DL.getPrefTypeAlign(Ty),
I.getAlign());
4458 if (*Alignment <= StackAlign)
4459 Alignment = std::nullopt;
4466 Flags.setNoUnsignedWrap(
true);
4476 DAG.
getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4492 if (!
I.hasMetadata(LLVMContext::MD_noundef))
4494 return I.getMetadata(LLVMContext::MD_range);
4498 if (
const auto *CB = dyn_cast<CallBase>(&
I)) {
4500 if (CB->hasRetAttr(Attribute::NoUndef))
4501 return CB->getRange();
4505 return std::nullopt;
4508void SelectionDAGBuilder::visitLoad(
const LoadInst &
I) {
4510 return visitAtomicLoad(
I);
4513 const Value *SV =
I.getOperand(0);
4517 if (
const Argument *Arg = dyn_cast<Argument>(SV)) {
4518 if (Arg->hasSwiftErrorAttr())
4519 return visitLoadFromSwiftError(
I);
4522 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4523 if (Alloca->isSwiftError())
4524 return visitLoadFromSwiftError(
I);
4530 Type *Ty =
I.getType();
4534 unsigned NumValues = ValueVTs.
size();
4538 Align Alignment =
I.getAlign();
4541 bool isVolatile =
I.isVolatile();
4546 bool ConstantMemory =
false;
4559 ConstantMemory =
true;
4574 unsigned ChainI = 0;
4575 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4598 MMOFlags, AAInfo, Ranges);
4599 Chains[ChainI] =
L.getValue(1);
4601 if (MemVTs[i] != ValueVTs[i])
4607 if (!ConstantMemory) {
4620void SelectionDAGBuilder::visitStoreToSwiftError(
const StoreInst &
I) {
4622 "call visitStoreToSwiftError when backend supports swifterror");
4626 const Value *SrcV =
I.getOperand(0);
4628 SrcV->
getType(), ValueVTs, &Offsets, 0);
4629 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4630 "expect a single EVT for swifterror");
4639 SDValue(Src.getNode(), Src.getResNo()));
4643void SelectionDAGBuilder::visitLoadFromSwiftError(
const LoadInst &
I) {
4645 "call visitLoadFromSwiftError when backend supports swifterror");
4648 !
I.hasMetadata(LLVMContext::MD_nontemporal) &&
4649 !
I.hasMetadata(LLVMContext::MD_invariant_load) &&
4650 "Support volatile, non temporal, invariant for load_from_swift_error");
4652 const Value *SV =
I.getOperand(0);
4653 Type *Ty =
I.getType();
4658 I.getAAMetadata()))) &&
4659 "load_from_swift_error should not be constant memory");
4664 ValueVTs, &Offsets, 0);
4665 assert(ValueVTs.
size() == 1 && Offsets[0] == 0 &&
4666 "expect a single EVT for swifterror");
4676void SelectionDAGBuilder::visitStore(
const StoreInst &
I) {
4678 return visitAtomicStore(
I);
4680 const Value *SrcV =
I.getOperand(0);
4681 const Value *PtrV =
I.getOperand(1);
4687 if (
const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4688 if (Arg->hasSwiftErrorAttr())
4689 return visitStoreToSwiftError(
I);
4692 if (
const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4693 if (Alloca->isSwiftError())
4694 return visitStoreToSwiftError(
I);
4701 SrcV->
getType(), ValueVTs, &MemVTs, &Offsets);
4702 unsigned NumValues = ValueVTs.
size();
4715 Align Alignment =
I.getAlign();
4720 unsigned ChainI = 0;
4721 for (
unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4738 if (MemVTs[i] != ValueVTs[i])
4741 DAG.
getStore(Root, dl, Val,
Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4742 Chains[ChainI] = St;
4751void SelectionDAGBuilder::visitMaskedStore(
const CallInst &
I,
4752 bool IsCompressing) {
4758 Src0 =
I.getArgOperand(0);
4759 Ptr =
I.getArgOperand(1);
4760 Alignment = cast<ConstantInt>(
I.getArgOperand(2))->getAlignValue();
4761 Mask =
I.getArgOperand(3);
4766 Src0 =
I.getArgOperand(0);
4767 Ptr =
I.getArgOperand(1);
4768 Mask =
I.getArgOperand(2);
4769 Alignment =
I.getParamAlign(1).valueOrOne();
4772 Value *PtrOperand, *MaskOperand, *Src0Operand;
4775 getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4777 getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4787 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4799 I.getArgOperand(0)->getType()->getScalarType())
4832 assert(
Ptr->getType()->isVectorTy() &&
"Unexpected pointer type");
4835 if (
auto *
C = dyn_cast<Constant>(
Ptr)) {
4836 C =
C->getSplatValue();
4842 ElementCount NumElts = cast<VectorType>(
Ptr->getType())->getElementCount();
4851 if (!
GEP ||
GEP->getParent() != CurBB)
4854 if (
GEP->getNumOperands() != 2)
4857 const Value *BasePtr =
GEP->getPointerOperand();
4858 const Value *IndexVal =
GEP->getOperand(
GEP->getNumOperands() - 1);
4864 TypeSize ScaleVal =
DL.getTypeAllocSize(
GEP->getResultElementType());
4869 if (ScaleVal != 1 &&
4882void SelectionDAGBuilder::visitMaskedScatter(
const CallInst &
I) {
4890 Align Alignment = cast<ConstantInt>(
I.getArgOperand(2))
4891 ->getMaybeAlignValue()
4902 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
4922 Ops, MMO, IndexType,
false);
4927void SelectionDAGBuilder::visitMaskedLoad(
const CallInst &
I,
bool IsExpanding) {
4933 Ptr =
I.getArgOperand(0);
4934 Alignment = cast<ConstantInt>(
I.getArgOperand(1))->getAlignValue();
4935 Mask =
I.getArgOperand(2);
4936 Src0 =
I.getArgOperand(3);
4941 Ptr =
I.getArgOperand(0);
4942 Alignment =
I.getParamAlign(0).valueOrOne();
4943 Mask =
I.getArgOperand(1);
4944 Src0 =
I.getArgOperand(2);
4947 Value *PtrOperand, *MaskOperand, *Src0Operand;
4950 getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4952 getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4970 if (
I.hasMetadata(LLVMContext::MD_nontemporal))
4996void SelectionDAGBuilder::visitMaskedGather(
const CallInst &
I) {
5006 Align Alignment = cast<ConstantInt>(
I.getArgOperand(1))
5007 ->getMaybeAlignValue()
5019 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
5065 AAMDNodes(),
nullptr, SSID, SuccessOrdering, FailureOrdering);
5068 dl, MemVT, VTs, InChain,
5079void SelectionDAGBuilder::visitAtomicRMW(
const AtomicRMWInst &
I) {
5082 switch (
I.getOperation()) {
5132void SelectionDAGBuilder::visitFence(
const FenceInst &
I) {
5146void SelectionDAGBuilder::visitAtomicLoad(
const LoadInst &
I) {
5166 nullptr, SSID, Order);
5182void SelectionDAGBuilder::visitAtomicStore(
const StoreInst &
I) {
5204 nullptr, SSID, Ordering);
5220void SelectionDAGBuilder::visitTargetIntrinsic(
const CallInst &
I,
5221 unsigned Intrinsic) {
5226 bool HasChain = !
F->doesNotAccessMemory();
5227 bool OnlyLoad = HasChain &&
F->onlyReadsMemory();
5254 for (
unsigned i = 0, e =
I.arg_size(); i != e; ++i) {
5255 const Value *Arg =
I.getArgOperand(i);
5256 if (!
I.paramHasAttr(i, Attribute::ImmArg)) {
5263 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5264 assert(CI->getBitWidth() <= 64 &&
5265 "large intrinsic immediates not handled");
5283 if (
auto *FPMO = dyn_cast<FPMathOperator>(&
I))
5284 Flags.copyFMF(*FPMO);
5291 auto *Token = Bundle->Inputs[0].get();
5293 assert(Ops.
back().getValueType() != MVT::Glue &&
5294 "Did not expected another glue node here.");
5302 if (IsTgtIntrinsic) {
5310 else if (
Info.fallbackAddressSpace)
5314 Info.size,
I.getAAMetadata());
5315 }
else if (!HasChain) {
5317 }
else if (!
I.getType()->isVoidTy()) {
5331 if (!
I.getType()->isVoidTy()) {
5332 if (!isa<VectorType>(
I.getType()))
5404 SDValue TwoToFractionalPartOfX;
5481 if (
Op.getValueType() == MVT::f32 &&
5505 if (
Op.getValueType() == MVT::f32 &&
5604 if (
Op.getValueType() == MVT::f32 &&
5688 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5701 if (
Op.getValueType() == MVT::f32 &&
5778 return DAG.
getNode(
ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5789 if (
Op.getValueType() == MVT::f32 &&
5802 bool IsExp10 =
false;
5803 if (
LHS.getValueType() == MVT::f32 &&
RHS.getValueType() == MVT::f32 &&
5807 IsExp10 = LHSC->isExactlyValue(Ten);
5834 unsigned Val = RHSC->getSExtValue();
5863 CurSquare, CurSquare);
5868 if (RHSC->getSExtValue() < 0)
5882 EVT VT =
LHS.getValueType();
5905 if ((ScaleInt > 0 || (Saturating &&
Signed)) &&
5909 Opcode, VT, ScaleInt);
5944 switch (
N.getOpcode()) {
5947 Regs.emplace_back(cast<RegisterSDNode>(
Op)->
getReg(),
5948 Op.getValueType().getSizeInBits());
5973bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5976 const Argument *Arg = dyn_cast<Argument>(V);
5990 auto &Inst =
TII->get(TargetOpcode::DBG_INSTR_REF);
5997 auto *NewDIExpr = FragExpr;
6004 return BuildMI(MF,
DL, Inst,
false, MOs, Variable, NewDIExpr);
6007 auto &Inst =
TII->get(TargetOpcode::DBG_VALUE);
6008 return BuildMI(MF,
DL, Inst, Indirect, Reg, Variable, FragExpr);
6012 if (Kind == FuncArgumentDbgValueKind::Value) {
6017 if (!IsInEntryBlock)
6033 bool VariableIsFunctionInputArg = Variable->
isParameter() &&
6034 !
DL->getInlinedAt();
6036 if (!IsInPrologue && !VariableIsFunctionInputArg)
6070 if (VariableIsFunctionInputArg) {
6075 return !NodeMap[
V].getNode();
6080 bool IsIndirect =
false;
6081 std::optional<MachineOperand>
Op;
6084 if (FI != std::numeric_limits<int>::max())
6088 if (!
Op &&
N.getNode()) {
6091 if (ArgRegsAndSizes.
size() == 1)
6092 Reg = ArgRegsAndSizes.
front().first;
6094 if (Reg &&
Reg.isVirtual()) {
6102 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6106 if (!
Op &&
N.getNode()) {
6111 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
6120 for (
const auto &RegAndSize : SplitRegs) {
6124 int RegFragmentSizeInBits = RegAndSize.second;
6126 uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
6129 if (
Offset >= ExprFragmentSizeInBits)
6133 if (
Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
6134 RegFragmentSizeInBits = ExprFragmentSizeInBits -
Offset;
6139 Expr,
Offset, RegFragmentSizeInBits);
6140 Offset += RegAndSize.second;
6143 if (!FragmentExpr) {
6150 MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
6151 Kind != FuncArgumentDbgValueKind::Value);
6162 V->getType(), std::nullopt);
6163 if (RFV.occupiesMultipleRegs()) {
6164 splitMultiRegDbgValue(RFV.getRegsAndSizes());
6169 IsIndirect =
Kind != FuncArgumentDbgValueKind::Value;
6170 }
else if (ArgRegsAndSizes.
size() > 1) {
6173 splitMultiRegDbgValue(ArgRegsAndSizes);
6182 "Expected inlined-at fields to agree");
6186 NewMI = MakeVRegDbgValue(
Op->getReg(), Expr, IsIndirect);
6188 NewMI =
BuildMI(MF,
DL,
TII->get(TargetOpcode::DBG_VALUE),
true, *
Op,
6201 unsigned DbgSDNodeOrder) {
6202 if (
auto *FISDN = dyn_cast<FrameIndexSDNode>(
N.getNode())) {
6214 false, dl, DbgSDNodeOrder);
6217 false, dl, DbgSDNodeOrder);
6221 switch (Intrinsic) {
6222 case Intrinsic::smul_fix:
6224 case Intrinsic::umul_fix:
6226 case Intrinsic::smul_fix_sat:
6228 case Intrinsic::umul_fix_sat:
6230 case Intrinsic::sdiv_fix:
6232 case Intrinsic::udiv_fix:
6234 case Intrinsic::sdiv_fix_sat:
6236 case Intrinsic::udiv_fix_sat:
6243void SelectionDAGBuilder::lowerCallToExternalSymbol(
const CallInst &
I,
6244 const char *FunctionName) {
6245 assert(FunctionName &&
"FunctionName must not be nullptr");
6255 assert(cast<CallBase>(PreallocatedSetup)
6258 "expected call_preallocated_setup Value");
6259 for (
const auto *U : PreallocatedSetup->
users()) {
6260 auto *UseCall = cast<CallBase>(U);
6261 const Function *Fn = UseCall->getCalledFunction();
6262 if (!Fn || Fn->
getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6272bool SelectionDAGBuilder::visitEntryValueDbgValue(
6279 const Argument *Arg = cast<Argument>(Values[0]);
6285 dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6286 "couldn't find an associated register for the Argument\n");
6289 Register ArgVReg = ArgIt->getSecond();
6292 if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6294 Variable, Expr, PhysReg,
false , DbgLoc, SDNodeOrder);
6298 LLVM_DEBUG(
dbgs() <<
"Dropping dbg.value: expression is entry_value but "
6299 "couldn't find a physical register\n");
6304void SelectionDAGBuilder::visitConvergenceControl(
const CallInst &
I,
6305 unsigned Intrinsic) {
6307 switch (Intrinsic) {
6308 case Intrinsic::experimental_convergence_anchor:
6311 case Intrinsic::experimental_convergence_entry:
6314 case Intrinsic::experimental_convergence_loop: {
6316 auto *Token = Bundle->Inputs[0].get();
6324void SelectionDAGBuilder::visitVectorHistogram(
const CallInst &
I,
6325 unsigned IntrinsicID) {
6328 assert(IntrinsicID == Intrinsic::experimental_vector_histogram_add &&
6329 "Tried to lower unsupported histogram type");
6350 unsigned AS =
Ptr->getType()->getScalarType()->getPointerAddressSpace();
6376 Ops, MMO, IndexType);
6383void SelectionDAGBuilder::visitIntrinsicCall(
const CallInst &
I,
6384 unsigned Intrinsic) {
6391 if (
auto *FPOp = dyn_cast<FPMathOperator>(&
I))
6392 Flags.copyFMF(*FPOp);
6394 switch (Intrinsic) {
6397 visitTargetIntrinsic(
I, Intrinsic);
6399 case Intrinsic::vscale: {
6404 case Intrinsic::vastart: visitVAStart(
I);
return;
6405 case Intrinsic::vaend: visitVAEnd(
I);
return;
6406 case Intrinsic::vacopy: visitVACopy(
I);
return;
6407 case Intrinsic::returnaddress:
6412 case Intrinsic::addressofreturnaddress:
6417 case Intrinsic::sponentry:
6422 case Intrinsic::frameaddress:
6427 case Intrinsic::read_volatile_register:
6428 case Intrinsic::read_register: {
6432 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6440 case Intrinsic::write_register: {
6442 Value *RegValue =
I.getArgOperand(1);
6445 DAG.
getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6450 case Intrinsic::memcpy: {
6451 const auto &MCI = cast<MemCpyInst>(
I);
6456 Align DstAlign = MCI.getDestAlign().valueOrOne();
6457 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6458 Align Alignment = std::min(DstAlign, SrcAlign);
6459 bool isVol = MCI.isVolatile();
6465 Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6468 updateDAGForMaybeTailCall(MC);
6471 case Intrinsic::memcpy_inline: {
6472 const auto &MCI = cast<MemCpyInlineInst>(
I);
6476 assert(isa<ConstantSDNode>(
Size) &&
"memcpy_inline needs constant size");
6478 Align DstAlign = MCI.getDestAlign().valueOrOne();
6479 Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6480 Align Alignment = std::min(DstAlign, SrcAlign);
6481 bool isVol = MCI.isVolatile();
6489 updateDAGForMaybeTailCall(MC);
6492 case Intrinsic::memset: {
6493 const auto &MSI = cast<MemSetInst>(
I);
6498 Align Alignment = MSI.getDestAlign().valueOrOne();
6499 bool isVol = MSI.isVolatile();
6503 Root, sdl, Op1, Op2, Op3, Alignment, isVol,
false,
6505 updateDAGForMaybeTailCall(MS);
6508 case Intrinsic::memset_inline: {
6509 const auto &MSII = cast<MemSetInlineInst>(
I);
6513 assert(isa<ConstantSDNode>(
Size) &&
"memset_inline needs constant size");
6515 Align DstAlign = MSII.getDestAlign().valueOrOne();
6516 bool isVol = MSII.isVolatile();
6523 updateDAGForMaybeTailCall(MC);
6526 case Intrinsic::memmove: {
6527 const auto &MMI = cast<MemMoveInst>(
I);
6532 Align DstAlign = MMI.getDestAlign().valueOrOne();
6533 Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6534 Align Alignment = std::min(DstAlign, SrcAlign);
6535 bool isVol = MMI.isVolatile();
6543 I.getAAMetadata(),
AA);
6544 updateDAGForMaybeTailCall(MM);
6547 case Intrinsic::memcpy_element_unordered_atomic: {
6553 Type *LengthTy =
MI.getLength()->getType();
6554 unsigned ElemSz =
MI.getElementSizeInBytes();
6560 updateDAGForMaybeTailCall(MC);
6563 case Intrinsic::memmove_element_unordered_atomic: {
6564 auto &
MI = cast<AtomicMemMoveInst>(
I);
6569 Type *LengthTy =
MI.getLength()->getType();
6570 unsigned ElemSz =
MI.getElementSizeInBytes();
6576 updateDAGForMaybeTailCall(MC);
6579 case Intrinsic::memset_element_unordered_atomic: {
6580 auto &
MI = cast<AtomicMemSetInst>(
I);
6585 Type *LengthTy =
MI.getLength()->getType();
6586 unsigned ElemSz =
MI.getElementSizeInBytes();
6591 updateDAGForMaybeTailCall(MC);
6594 case Intrinsic::call_preallocated_setup: {
6603 case Intrinsic::call_preallocated_arg: {
6618 case Intrinsic::dbg_declare: {
6619 const auto &DI = cast<DbgDeclareInst>(
I);
6622 if (AssignmentTrackingEnabled ||
6625 LLVM_DEBUG(
dbgs() <<
"SelectionDAG visiting dbg_declare: " << DI <<
"\n");
6631 assert(!DI.hasArgList() &&
"Only dbg.value should currently use DIArgList");
6636 case Intrinsic::dbg_label: {
6639 assert(Label &&
"Missing label");
6646 case Intrinsic::dbg_assign: {
6648 if (AssignmentTrackingEnabled)
6654 case Intrinsic::dbg_value: {
6656 if (AssignmentTrackingEnabled)
6676 SDNodeOrder, IsVariadic))
6682 case Intrinsic::eh_typeid_for: {
6691 case Intrinsic::eh_return_i32:
6692 case Intrinsic::eh_return_i64:
6700 case Intrinsic::eh_unwind_init:
6703 case Intrinsic::eh_dwarf_cfa:
6708 case Intrinsic::eh_sjlj_callsite: {
6710 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(0));
6716 case Intrinsic::eh_sjlj_functioncontext: {
6720 cast<AllocaInst>(
I.getArgOperand(0)->stripPointerCasts());
6725 case Intrinsic::eh_sjlj_setjmp: {
6735 case Intrinsic::eh_sjlj_longjmp:
6739 case Intrinsic::eh_sjlj_setup_dispatch:
6743 case Intrinsic::masked_gather:
6744 visitMaskedGather(
I);
6746 case Intrinsic::masked_load:
6749 case Intrinsic::masked_scatter:
6750 visitMaskedScatter(
I);
6752 case Intrinsic::masked_store:
6753 visitMaskedStore(
I);
6755 case Intrinsic::masked_expandload:
6756 visitMaskedLoad(
I,
true );
6758 case Intrinsic::masked_compressstore:
6759 visitMaskedStore(
I,
true );
6761 case Intrinsic::powi:
6765 case Intrinsic::log:
6768 case Intrinsic::log2:
6772 case Intrinsic::log10:
6776 case Intrinsic::exp:
6779 case Intrinsic::exp2:
6783 case Intrinsic::pow:
6787 case Intrinsic::sqrt:
6788 case Intrinsic::fabs:
6789 case Intrinsic::sin:
6790 case Intrinsic::cos:
6791 case Intrinsic::tan:
6792 case Intrinsic::exp10:
6793 case Intrinsic::floor:
6794 case Intrinsic::ceil:
6795 case Intrinsic::trunc:
6796 case Intrinsic::rint:
6797 case Intrinsic::nearbyint:
6798 case Intrinsic::round:
6799 case Intrinsic::roundeven:
6800 case Intrinsic::canonicalize: {
6803 switch (Intrinsic) {
6805 case Intrinsic::sqrt: Opcode =
ISD::FSQRT;
break;
6806 case Intrinsic::fabs: Opcode =
ISD::FABS;
break;
6807 case Intrinsic::sin: Opcode =
ISD::FSIN;
break;
6808 case Intrinsic::cos: Opcode =
ISD::FCOS;
break;
6809 case Intrinsic::tan: Opcode =
ISD::FTAN;
break;
6810 case Intrinsic::exp10: Opcode =
ISD::FEXP10;
break;
6811 case Intrinsic::floor: Opcode =
ISD::FFLOOR;
break;
6812 case Intrinsic::ceil: Opcode =
ISD::FCEIL;
break;
6813 case Intrinsic::trunc: Opcode =
ISD::FTRUNC;
break;
6814 case Intrinsic::rint: Opcode =
ISD::FRINT;
break;
6816 case Intrinsic::round: Opcode =
ISD::FROUND;
break;
6827 case Intrinsic::lround:
6828 case Intrinsic::llround:
6829 case Intrinsic::lrint:
6830 case Intrinsic::llrint: {
6833 switch (Intrinsic) {
6835 case Intrinsic::lround: Opcode =
ISD::LROUND;
break;
6837 case Intrinsic::lrint: Opcode =
ISD::LRINT;
break;
6838 case Intrinsic::llrint: Opcode =
ISD::LLRINT;
break;
6847 case Intrinsic::minnum:
6853 case Intrinsic::maxnum:
6859 case Intrinsic::minimum:
6865 case Intrinsic::maximum:
6871 case Intrinsic::copysign:
6877 case Intrinsic::ldexp:
6883 case Intrinsic::frexp: {
6891 case Intrinsic::arithmetic_fence: {
6897 case Intrinsic::fma:
6903#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
6904 case Intrinsic::INTRINSIC:
6905#include "llvm/IR/ConstrainedOps.def"
6906 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(
I));
6908#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6909#include "llvm/IR/VPIntrinsics.def"
6910 visitVectorPredicationIntrinsic(cast<VPIntrinsic>(
I));
6912 case Intrinsic::fptrunc_round: {
6915 Metadata *MD = cast<MetadataAsValue>(
I.getArgOperand(1))->getMetadata();
6916 std::optional<RoundingMode> RoundMode =
6923 Flags.copyFMF(*cast<FPMathOperator>(&
I));
6935 case Intrinsic::fmuladd: {
6956 case Intrinsic::convert_to_fp16:
6963 case Intrinsic::convert_from_fp16:
6969 case Intrinsic::fptosi_sat: {
6976 case Intrinsic::fptoui_sat: {
6983 case Intrinsic::set_rounding:
6989 case Intrinsic::is_fpclass: {
6994 cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue());
6999 Flags.setNoFPExcept(
7000 !
F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
7015 case Intrinsic::get_fpenv: {
7030 int SPFI = cast<FrameIndexSDNode>(Temp.
getNode())->getIndex();
7037 Res =
DAG.
getLoad(EnvVT, sdl, Chain, Temp, MPI);
7043 case Intrinsic::set_fpenv: {
7057 int SPFI = cast<FrameIndexSDNode>(Temp.
getNode())->getIndex();
7060 Chain =
DAG.
getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
7070 case Intrinsic::reset_fpenv:
7073 case Intrinsic::get_fpmode:
7082 case Intrinsic::set_fpmode:
7087 case Intrinsic::reset_fpmode: {
7092 case Intrinsic::pcmarker: {
7097 case Intrinsic::readcyclecounter: {
7105 case Intrinsic::readsteadycounter: {
7113 case Intrinsic::bitreverse:
7118 case Intrinsic::bswap:
7123 case Intrinsic::cttz: {
7125 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(1));
7131 case Intrinsic::ctlz: {
7133 ConstantInt *CI = cast<ConstantInt>(
I.getArgOperand(1));
7139 case Intrinsic::ctpop: {
7145 case Intrinsic::fshl:
7146 case Intrinsic::fshr: {
7147 bool IsFSHL =
Intrinsic == Intrinsic::fshl;
7151 EVT VT =
X.getValueType();
7162 case Intrinsic::sadd_sat: {
7168 case Intrinsic::uadd_sat: {
7174 case Intrinsic::ssub_sat: {
7180 case Intrinsic::usub_sat: {
7186 case Intrinsic::sshl_sat: {
7192 case Intrinsic::ushl_sat: {
7198 case Intrinsic::smul_fix:
7199 case Intrinsic::umul_fix:
7200 case Intrinsic::smul_fix_sat:
7201 case Intrinsic::umul_fix_sat: {
7209 case Intrinsic::sdiv_fix:
7210 case Intrinsic::udiv_fix:
7211 case Intrinsic::sdiv_fix_sat:
7212 case Intrinsic::udiv_fix_sat: {
7217 Op1, Op2, Op3,
DAG, TLI));
7220 case Intrinsic::smax: {
7226 case Intrinsic::smin: {
7232 case Intrinsic::umax: {
7238 case Intrinsic::umin: {
7244 case Intrinsic::abs: {
7250 case Intrinsic::scmp: {
7257 case Intrinsic::ucmp: {
7264 case Intrinsic::stacksave: {
7272 case Intrinsic::stackrestore:
7276 case Intrinsic::get_dynamic_area_offset: {
7291 case Intrinsic::stackguard: {
7312 case Intrinsic::stackprotector: {
7333 Chain, sdl, Src, FIN,
7340 case Intrinsic::objectsize:
7343 case Intrinsic::is_constant:
7346 case Intrinsic::annotation:
7347 case Intrinsic::ptr_annotation:
7348 case Intrinsic::launder_invariant_group:
7349 case Intrinsic::strip_invariant_group:
7354 case Intrinsic::assume:
7355 case Intrinsic::experimental_noalias_scope_decl:
7356 case Intrinsic::var_annotation:
7357 case Intrinsic::sideeffect:
7362 case Intrinsic::codeview_annotation: {
7367 Metadata *MD = cast<MetadataAsValue>(
I.getArgOperand(0))->getMetadata();
7374 case Intrinsic::init_trampoline: {
7375 const Function *
F = cast<Function>(
I.getArgOperand(1)->stripPointerCasts());
7390 case Intrinsic::adjust_trampoline:
7395 case Intrinsic::gcroot: {
7397 "only valid in functions with gc specified, enforced by Verifier");
7399 const Value *Alloca =
I.getArgOperand(0)->stripPointerCasts();
7400 const Constant *TypeMap = cast<Constant>(
I.getArgOperand(1));
7406 case Intrinsic::gcread:
7407 case Intrinsic::gcwrite:
7409 case Intrinsic::get_rounding:
7415 case Intrinsic::expect:
7420 case Intrinsic::ubsantrap:
7421 case Intrinsic::debugtrap:
7422 case Intrinsic::trap: {
7424 I.getAttributes().getFnAttr(
"trap-func-name").getValueAsString();
7425 if (TrapFuncName.
empty()) {
7426 switch (Intrinsic) {
7427 case Intrinsic::trap:
7430 case Intrinsic::debugtrap:
7433 case Intrinsic::ubsantrap:
7437 cast<ConstantInt>(
I.getArgOperand(0))->getZExtValue(), sdl,
7445 if (Intrinsic == Intrinsic::ubsantrap) {
7447 Args[0].Val =
I.getArgOperand(0);
7449 Args[0].Ty =
Args[0].Val->getType();
7453 CLI.setDebugLoc(sdl).setChain(
getRoot()).setLibCallee(
7464 case Intrinsic::allow_runtime_check:
7465 case Intrinsic::allow_ubsan_check:
7469 case Intrinsic::uadd_with_overflow:
7470 case Intrinsic::sadd_with_overflow:
7471 case Intrinsic::usub_with_overflow:
7472 case Intrinsic::ssub_with_overflow:
7473 case Intrinsic::umul_with_overflow:
7474 case Intrinsic::smul_with_overflow: {
7476 switch (Intrinsic) {
7478 case Intrinsic::uadd_with_overflow:
Op =
ISD::UADDO;
break;
7479 case Intrinsic::sadd_with_overflow:
Op =
ISD::SADDO;
break;
7480 case Intrinsic::usub_with_overflow:
Op =
ISD::USUBO;
break;
7481 case Intrinsic::ssub_with_overflow:
Op =
ISD::SSUBO;
break;
7482 case Intrinsic::umul_with_overflow:
Op =
ISD::UMULO;
break;
7483 case Intrinsic::smul_with_overflow:
Op =
ISD::SMULO;
break;
7489 EVT OverflowVT = MVT::i1;
7498 case Intrinsic::prefetch: {
7500 unsigned rw = cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue();
7513 std::nullopt, Flags);
7522 case Intrinsic::lifetime_start:
7523 case Intrinsic::lifetime_end: {
7524 bool IsStart = (
Intrinsic == Intrinsic::lifetime_start);
7529 const int64_t ObjectSize =
7530 cast<ConstantInt>(
I.getArgOperand(0))->getSExtValue();
7535 for (
const Value *Alloca : Allocas) {
7536 const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7539 if (!LifetimeObject)
7559 case Intrinsic::pseudoprobe: {
7560 auto Guid = cast<ConstantInt>(
I.getArgOperand(0))->getZExtValue();
7561 auto Index = cast<ConstantInt>(
I.getArgOperand(1))->getZExtValue();
7562 auto Attr = cast<ConstantInt>(
I.getArgOperand(2))->getZExtValue();
7567 case Intrinsic::invariant_start:
7572 case Intrinsic::invariant_end:
7575 case Intrinsic::clear_cache: {
7580 {InputChain, StartVal, EndVal});
7585 case Intrinsic::donothing:
7586 case Intrinsic::seh_try_begin:
7587 case Intrinsic::seh_scope_begin:
7588 case Intrinsic::seh_try_end:
7589 case Intrinsic::seh_scope_end:
7592 case Intrinsic::experimental_stackmap:
7595 case Intrinsic::experimental_patchpoint_void:
7596 case Intrinsic::experimental_patchpoint:
7599 case Intrinsic::experimental_gc_statepoint:
7602 case Intrinsic::experimental_gc_result:
7603 visitGCResult(cast<GCResultInst>(
I));
7605 case Intrinsic::experimental_gc_relocate:
7606 visitGCRelocate(cast<GCRelocateInst>(
I));
7608 case Intrinsic::instrprof_cover:
7610 case Intrinsic::instrprof_increment:
7612 case Intrinsic::instrprof_timestamp:
7614 case Intrinsic::instrprof_value_profile:
7616 case Intrinsic::instrprof_mcdc_parameters:
7618 case Intrinsic::instrprof_mcdc_tvbitmap_update:
7620 case Intrinsic::localescape: {
7626 for (
unsigned Idx = 0, E =
I.arg_size();
Idx < E; ++
Idx) {
7627 Value *Arg =
I.getArgOperand(
Idx)->stripPointerCasts();
7628 if (isa<ConstantPointerNull>(Arg))
7632 "can only escape static allocas");
7638 TII->get(TargetOpcode::LOCAL_ESCAPE))
7646 case Intrinsic::localrecover: {
7651 auto *Fn = cast<Function>(
I.getArgOperand(0)->stripPointerCasts());
7652 auto *
Idx = cast<ConstantInt>(
I.getArgOperand(2));
7654 unsigned(
Idx->getLimitedValue(std::numeric_limits<int>::max()));
7676 case Intrinsic::eh_exceptionpointer:
7677 case Intrinsic::eh_exceptioncode: {
7679 const auto *CPI = cast<CatchPadInst>(
I.getArgOperand(0));
7684 if (Intrinsic == Intrinsic::eh_exceptioncode)
7689 case Intrinsic::xray_customevent: {
7718 case Intrinsic::xray_typedevent: {
7745 TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7751 case Intrinsic::experimental_deoptimize:
7754 case Intrinsic::experimental_stepvector:
7757 case Intrinsic::vector_reduce_fadd:
7758 case Intrinsic::vector_reduce_fmul:
7759 case Intrinsic::vector_reduce_add:
7760 case Intrinsic::vector_reduce_mul:
7761 case Intrinsic::vector_reduce_and:
7762 case Intrinsic::vector_reduce_or:
7763 case Intrinsic::vector_reduce_xor:
7764 case Intrinsic::vector_reduce_smax:
7765 case Intrinsic::vector_reduce_smin:
7766 case Intrinsic::vector_reduce_umax:
7767 case Intrinsic::vector_reduce_umin:
7768 case Intrinsic::vector_reduce_fmax:
7769 case Intrinsic::vector_reduce_fmin:
7770 case Intrinsic::vector_reduce_fmaximum:
7771 case Intrinsic::vector_reduce_fminimum:
7772 visitVectorReduce(
I, Intrinsic);
7775 case Intrinsic::icall_branch_funnel: {
7784 "llvm.icall.branch.funnel operand must be a GlobalValue");
7787 struct BranchFunnelTarget {
7793 for (
unsigned Op = 1,
N =
I.arg_size();
Op !=
N;
Op += 2) {
7796 if (ElemBase !=
Base)
7798 "to the same GlobalValue");
7801 auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7804 "llvm.icall.branch.funnel operand must be a GlobalValue");
7810 [](
const BranchFunnelTarget &T1,
const BranchFunnelTarget &T2) {
7811 return T1.Offset < T2.Offset;
7814 for (
auto &
T : Targets) {
7829 case Intrinsic::wasm_landingpad_index:
7835 case Intrinsic::aarch64_settag:
7836 case Intrinsic::aarch64_settag_zero: {
7838 bool ZeroMemory =
Intrinsic == Intrinsic::aarch64_settag_zero;
7847 case Intrinsic::amdgcn_cs_chain: {
7848 assert(
I.arg_size() == 5 &&
"Additional args not supported yet");
7849 assert(cast<ConstantInt>(
I.getOperand(4))->isZero() &&
7850 "Non-zero flags not supported yet");
7866 for (
unsigned Idx : {2, 3, 1}) {
7869 Arg.
Ty =
I.getOperand(
Idx)->getType();
7871 Args.push_back(Arg);
7874 assert(Args[0].IsInReg &&
"SGPR args should be marked inreg");
7875 assert(!Args[1].IsInReg &&
"VGPR args should not be marked inreg");
7876 Args[2].IsInReg =
true;
7881 .setCallee(
CC,
RetTy, Callee, std::move(Args))
7884 .setConvergent(
I.isConvergent());
7886 std::pair<SDValue, SDValue>
Result =
7890 "Should've lowered as tail call");
7895 case Intrinsic::ptrmask: {
7915 case Intrinsic::threadlocal_address: {
7919 case Intrinsic::get_active_lane_mask: {
7922 EVT ElementVT =
Index.getValueType();
7925 visitTargetIntrinsic(
I, Intrinsic);
7943 case Intrinsic::experimental_get_vector_length: {
7944 assert(cast<ConstantInt>(
I.getOperand(1))->getSExtValue() > 0 &&
7945 "Expected positive VF");
7946 unsigned VF = cast<ConstantInt>(
I.getOperand(1))->getZExtValue();
7947 bool IsScalable = cast<ConstantInt>(
I.getOperand(2))->isOne();
7953 visitTargetIntrinsic(
I, Intrinsic);
7962 if (CountVT.
bitsLT(VT)) {
7977 case Intrinsic::experimental_cttz_elts: {
7980 EVT OpVT =
Op.getValueType();
7983 visitTargetIntrinsic(
I, Intrinsic);
7998 !cast<ConstantSDNode>(
getValue(
I.getOperand(1)))->isZero();
8000 if (isa<ScalableVectorType>(
I.getOperand(0)->getType()))
8028 case Intrinsic::vector_insert: {
8036 if (
Index.getValueType() != VectorIdxTy)
8044 case Intrinsic::vector_extract: {
8052 if (
Index.getValueType() != VectorIdxTy)
8059 case Intrinsic::vector_reverse:
8060 visitVectorReverse(
I);
8062 case Intrinsic::vector_splice:
8063 visitVectorSplice(
I);
8065 case Intrinsic::callbr_landingpad:
8066 visitCallBrLandingPad(
I);
8068 case Intrinsic::vector_interleave2:
8069 visitVectorInterleave(
I);
8071 case Intrinsic::vector_deinterleave2:
8072 visitVectorDeinterleave(
I);
8074 case Intrinsic::experimental_convergence_anchor:
8075 case Intrinsic::experimental_convergence_entry:
8076 case Intrinsic::experimental_convergence_loop:
8077 visitConvergenceControl(
I, Intrinsic);
8079 case Intrinsic::experimental_vector_histogram_add: {
8080 visitVectorHistogram(
I, Intrinsic);
8086void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
8114 PendingConstrainedFP.push_back(OutChain);
8120 PendingConstrainedFPStrict.push_back(OutChain);
8132 Flags.setNoFPExcept(
true);
8134 if (
auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
8135 Flags.copyFMF(*FPOp);
8140#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
8141 case Intrinsic::INTRINSIC: \
8142 Opcode = ISD::STRICT_##DAGN; \
8144#include "llvm/IR/ConstrainedOps.def"
8145 case Intrinsic::experimental_constrained_fmuladd: {
8152 pushOutChain(
Mul, EB);
8173 auto *
FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
8183 pushOutChain(Result, EB);
8190 std::optional<unsigned> ResOPC;
8192 case Intrinsic::vp_ctlz: {
8193 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8194 ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
8197 case Intrinsic::vp_cttz: {
8198 bool IsZeroUndef = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8199 ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
8202 case Intrinsic::vp_cttz_elts: {
8203 bool IsZeroPoison = cast<ConstantInt>(VPIntrin.
getArgOperand(1))->isOne();
8204 ResOPC = IsZeroPoison ? ISD::VP_CTTZ_ELTS_ZERO_UNDEF : ISD::VP_CTTZ_ELTS;
8207#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD) \
8208 case Intrinsic::VPID: \
8209 ResOPC = ISD::VPSD; \
8211#include "llvm/IR/VPIntrinsics.def"
8216 "Inconsistency: no SDNode available for this VPIntrinsic!");
8218 if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
8219 *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
8221 return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
8222 : ISD::VP_REDUCE_FMUL;
8228void SelectionDAGBuilder::visitVPLoad(
8254void SelectionDAGBuilder::visitVPGather(
8290 {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
8296void SelectionDAGBuilder::visitVPStore(
8300 EVT VT = OpValues[0].getValueType();
8318void SelectionDAGBuilder::visitVPScatter(
8323 EVT VT = OpValues[0].getValueType();
8353 {getMemoryRoot(), OpValues[0], Base, Index, Scale,
8354 OpValues[2], OpValues[3]},
8360void SelectionDAGBuilder::visitVPStridedLoad(
8379 OpValues[2], OpValues[3], MMO,
8387void SelectionDAGBuilder::visitVPStridedStore(
8391 EVT VT = OpValues[0].getValueType();
8403 DAG.
getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8411void SelectionDAGBuilder::visitVPCmp(
const VPCmpIntrinsic &VPIntrin) {
8436 "Unexpected target EVL type");
8445void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8452 if (
const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8453 return visitVPCmp(*CmpI);
8464 "Unexpected target EVL type");
8468 for (
unsigned I = 0;
I < VPIntrin.
arg_size(); ++
I) {
8470 if (
I == EVLParamPos)
8478 if (
auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8485 visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8487 case ISD::VP_GATHER:
8488 visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8490 case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8491 visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8494 visitVPStore(VPIntrin, OpValues);
8496 case ISD::VP_SCATTER:
8497 visitVPScatter(VPIntrin, OpValues);
8499 case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8500 visitVPStridedStore(VPIntrin, OpValues);
8502 case ISD::VP_FMULADD: {
8503 assert(OpValues.
size() == 5 &&
"Unexpected number of operands");
8505 if (
auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8512 ISD::VP_FMUL,
DL, VTs,
8513 {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8516 {
Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8521 case ISD::VP_IS_FPCLASS: {
8524 auto Constant = OpValues[1]->getAsZExtVal();
8527 {OpValues[0],
Check, OpValues[2], OpValues[3]});
8531 case ISD::VP_INTTOPTR: {
8542 case ISD::VP_PTRTOINT: {
8557 case ISD::VP_CTLZ_ZERO_UNDEF:
8559 case ISD::VP_CTTZ_ZERO_UNDEF:
8560 case ISD::VP_CTTZ_ELTS_ZERO_UNDEF:
8561 case ISD::VP_CTTZ_ELTS: {
8563 DAG.
getNode(Opcode,
DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8583 if (CallSiteIndex) {
8597 assert(BeginLabel &&
"BeginLabel should've been set");
8612 assert(
II &&
"II should've been set");
8623std::pair<SDValue, SDValue>
8637 std::pair<SDValue, SDValue> Result = TLI.
LowerCallTo(CLI);
8640 "Non-null chain expected with non-tail call!");
8641 assert((Result.second.getNode() || !Result.first.getNode()) &&
8642 "Null value expected with tail call!");
8644 if (!Result.second.getNode()) {
8651 PendingExports.clear();
8666 bool isTailCall,
bool isMustTailCall,
8676 const Value *SwiftErrorVal =
nullptr;
8682 auto *Caller = CB.
getParent()->getParent();
8683 if (Caller->getFnAttribute(
"disable-tail-calls").getValueAsString() ==
8684 "true" && !isMustTailCall)
8691 Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8700 if (V->getType()->isEmptyTy())
8704 Entry.Node = ArgNode; Entry.Ty = V->getType();
8706 Entry.setAttributes(&CB,
I - CB.
arg_begin());
8718 Args.push_back(Entry);
8722 if (Entry.IsSRet && isa<Instruction>(V))
8730 Value *V = Bundle->Inputs[0];
8732 Entry.Node = ArgNode;
8733 Entry.Ty = V->getType();
8734 Entry.IsCFGuardTarget =
true;
8735 Args.push_back(Entry);
8753 "Target doesn't support calls with kcfi operand bundles.");
8754 CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8761 auto *Token = Bundle->Inputs[0].get();
8762 ConvControlToken =
getValue(Token);
8780 "This target doesn't support calls with ptrauth operand bundles.");
8784 std::pair<SDValue, SDValue> Result =
lowerInvokable(CLI, EHPadBB);
8786 if (Result.first.getNode()) {
8808 if (
const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
8827 bool ConstantMemory =
false;
8832 ConstantMemory =
true;
8843 if (!ConstantMemory)
8850void SelectionDAGBuilder::processIntegerCallValue(
const Instruction &
I,
8864bool SelectionDAGBuilder::visitMemCmpBCmpCall(
const CallInst &
I) {
8865 const Value *
LHS =
I.getArgOperand(0), *
RHS =
I.getArgOperand(1);
8879 if (Res.first.getNode()) {
8880 processIntegerCallValue(
I, Res.first,
true);
8894 auto hasFastLoadsAndCompare = [&](
unsigned NumBits) {
8917 switch (NumBitsToCompare) {
8929 LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
8947 processIntegerCallValue(
I, Cmp,
false);
8956bool SelectionDAGBuilder::visitMemChrCall(
const CallInst &
I) {
8957 const Value *Src =
I.getArgOperand(0);
8962 std::pair<SDValue, SDValue> Res =
8966 if (Res.first.getNode()) {
8980bool SelectionDAGBuilder::visitMemPCpyCall(
const CallInst &
I) {
8988 Align Alignment = std::min(DstAlign, SrcAlign);
9002 "** memcpy should not be lowered as TailCall in mempcpy context **");
9020bool SelectionDAGBuilder::visitStrCpyCall(
const CallInst &
I,
bool isStpcpy) {
9021 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9024 std::pair<SDValue, SDValue> Res =
9029 if (Res.first.getNode()) {
9043bool SelectionDAGBuilder::visitStrCmpCall(
const CallInst &
I) {
9044 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9047 std::pair<SDValue, SDValue> Res =
9052 if (Res.first.getNode()) {
9053 processIntegerCallValue(
I, Res.first,
true);
9066bool SelectionDAGBuilder::visitStrLenCall(
const CallInst &
I) {
9067 const Value *Arg0 =
I.getArgOperand(0);
9070 std::pair<SDValue, SDValue> Res =
9073 if (Res.first.getNode()) {
9074 processIntegerCallValue(
I, Res.first,
false);
9087bool SelectionDAGBuilder::visitStrNLenCall(
const CallInst &
I) {
9088 const Value *Arg0 =
I.getArgOperand(0), *Arg1 =
I.getArgOperand(1);
9091 std::pair<SDValue, SDValue> Res =
9095 if (Res.first.getNode()) {
9096 processIntegerCallValue(
I, Res.first,
false);
9109bool SelectionDAGBuilder::visitUnaryFloatCall(
const CallInst &
I,
9112 if (!
I.onlyReadsMemory())
9116 Flags.copyFMF(cast<FPMathOperator>(
I));
9129bool SelectionDAGBuilder::visitBinaryFloatCall(
const CallInst &
I,
9132 if (!
I.onlyReadsMemory())
9136 Flags.copyFMF(cast<FPMathOperator>(
I));
9145void SelectionDAGBuilder::visitCall(
const CallInst &
I) {
9147 if (
I.isInlineAsm()) {
9155 if (
F->isDeclaration()) {
9157 unsigned IID =
F->getIntrinsicID();
9160 IID =
II->getIntrinsicID(
F);
9163 visitIntrinsicCall(
I, IID);
9172 if (!
I.isNoBuiltin() && !
I.isStrictFP() && !
F->hasLocalLinkage() &&
9178 if (visitMemCmpBCmpCall(
I))
9181 case LibFunc_copysign:
9182 case LibFunc_copysignf:
9183 case LibFunc_copysignl:
9186 if (
I.onlyReadsMemory()) {
9190 LHS.getValueType(), LHS, RHS));
9233 case LibFunc_sqrt_finite:
9234 case LibFunc_sqrtf_finite:
9235 case LibFunc_sqrtl_finite:
9240 case LibFunc_floorf:
9241 case LibFunc_floorl:
9245 case LibFunc_nearbyint:
9246 case LibFunc_nearbyintf:
9247 case LibFunc_nearbyintl:
9264 case LibFunc_roundf:
9265 case LibFunc_roundl:
9270 case LibFunc_truncf:
9271 case LibFunc_truncl:
9288 case LibFunc_exp10f:
9289 case LibFunc_exp10l:
9294 case LibFunc_ldexpf:
9295 case LibFunc_ldexpl:
9299 case LibFunc_memcmp:
9300 if (visitMemCmpBCmpCall(
I))
9303 case LibFunc_mempcpy:
9304 if (visitMemPCpyCall(
I))
9307 case LibFunc_memchr:
9308 if (visitMemChrCall(
I))
9311 case LibFunc_strcpy:
9312 if (visitStrCpyCall(
I,
false))
9315 case LibFunc_stpcpy:
9316 if (visitStrCpyCall(
I,
true))
9319 case LibFunc_strcmp:
9320 if (visitStrCmpCall(
I))
9323 case LibFunc_strlen:
9324 if (visitStrLenCall(
I))
9327 case LibFunc_strnlen:
9328 if (visitStrNLenCall(
I))
9343 assert(!
I.hasOperandBundlesOtherThan(
9344 {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
9345 LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
9346 LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi,
9347 LLVMContext::OB_convergencectrl}) &&
9348 "Cannot lower calls with arbitrary operand bundles!");
9352 if (
I.hasDeoptState())
9368 const auto *Key = cast<ConstantInt>(PAB->Inputs[0]);
9369 const Value *Discriminator = PAB->Inputs[1];
9371 assert(Key->getType()->isIntegerTy(32) &&
"Invalid ptrauth key");
9372 assert(Discriminator->getType()->isIntegerTy(64) &&
9373 "Invalid ptrauth discriminator");
9376 assert(!isa<Function>(CalleeV) &&
"invalid direct ptrauth call");
9411 for (
const auto &Code : Codes)
9426 SDISelAsmOperandInfo &MatchingOpInfo,
9428 if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9434 std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9436 OpInfo.ConstraintVT);
9437 std::pair<unsigned, const TargetRegisterClass *> InputRC =
9439 MatchingOpInfo.ConstraintVT);
9440 if ((OpInfo.ConstraintVT.isInteger() !=
9441 MatchingOpInfo.ConstraintVT.isInteger()) ||
9442 (MatchRC.second != InputRC.second)) {
9445 " with a matching output constraint of"
9446 " incompatible type!");
9448 MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9455 SDISelAsmOperandInfo &OpInfo,
9468 const Value *OpVal = OpInfo.CallOperandVal;
9469 if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9470 isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9482 TySize,
DL.getPrefTypeAlign(Ty),
false);
9484 Chain = DAG.
getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9487 OpInfo.CallOperand = StackSlot;
9500static std::optional<unsigned>
9502 SDISelAsmOperandInfo &OpInfo,
9503 SDISelAsmOperandInfo &RefOpInfo) {
9514 return std::nullopt;
9518 unsigned AssignedReg;
9521 &
TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9524 return std::nullopt;
9529 const MVT RegVT = *
TRI.legalclasstypes_begin(*RC);
9531 if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9540 !
TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9545 if (RegVT.
getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9550 OpInfo.CallOperand =
9552 OpInfo.ConstraintVT = RegVT;
9556 }
else if (RegVT.
isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9559 OpInfo.CallOperand =
9561 OpInfo.ConstraintVT = VT;
9568 if (OpInfo.isMatchingInputConstraint())
9569 return std::nullopt;
9571 EVT ValueVT = OpInfo.ConstraintVT;
9572 if (OpInfo.ConstraintVT == MVT::Other)
9576 unsigned NumRegs = 1;
9577 if (OpInfo.ConstraintVT != MVT::Other)
9592 I = std::find(
I, RC->
end(), AssignedReg);
9593 if (
I == RC->
end()) {
9596 return {AssignedReg};
9600 for (; NumRegs; --NumRegs, ++
I) {
9601 assert(
I != RC->
end() &&
"Ran out of registers to allocate!");
9606 OpInfo.AssignedRegs =
RegsForValue(Regs, RegVT, ValueVT);
9607 return std::nullopt;
9612 const std::vector<SDValue> &AsmNodeOperands) {
9615 for (; OperandNo; --OperandNo) {
9617 unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9620 (
F.isRegDefKind() ||
F.isRegDefEarlyClobberKind() ||
F.isMemKind()) &&
9621 "Skipped past definitions?");
9622 CurOp +=
F.getNumOperandRegisters() + 1;
9633 explicit ExtraFlags(
const CallBase &Call) {
9635 if (
IA->hasSideEffects())
9637 if (
IA->isAlignStack())
9639 if (
Call.isConvergent())
9660 unsigned get()
const {
return Flags; }
9667 if (
auto *GA = dyn_cast<GlobalAddressSDNode>(
Op)) {
9668 auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9683void SelectionDAGBuilder::visitInlineAsm(
const CallBase &Call,
9696 bool HasSideEffect =
IA->hasSideEffects();
9697 ExtraFlags ExtraInfo(Call);
9699 for (
auto &
T : TargetConstraints) {
9700 ConstraintOperands.
push_back(SDISelAsmOperandInfo(
T));
9701 SDISelAsmOperandInfo &OpInfo = ConstraintOperands.
back();
9703 if (OpInfo.CallOperandVal)
9704 OpInfo.CallOperand =
getValue(OpInfo.CallOperandVal);
9707 HasSideEffect = OpInfo.hasMemory(TLI);
9716 OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9719 return emitInlineAsmError(Call,
"constraint '" +
Twine(
T.ConstraintCode) +
9720 "' expects an integer constant "
9723 ExtraInfo.update(
T);
9730 bool EmitEHLabels = isa<InvokeInst>(Call);
9732 assert(EHPadBB &&
"InvokeInst must have an EHPadBB");
9734 bool IsCallBr = isa<CallBrInst>(Call);
9736 if (IsCallBr || EmitEHLabels) {
9745 Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
9750 IA->collectAsmStrs(AsmStrs);
9753 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9761 if (OpInfo.hasMatchingInput()) {
9762 SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
9793 if (OpInfo.isIndirect &&
isFunction(OpInfo.CallOperand) &&
9796 OpInfo.isIndirect =
false;
9803 !OpInfo.isIndirect) {
9804 assert((OpInfo.isMultipleAlternative ||
9806 "Can only indirectify direct input operands!");
9812 OpInfo.CallOperandVal =
nullptr;
9815 OpInfo.isIndirect =
true;
9821 std::vector<SDValue> AsmNodeOperands;
9822 AsmNodeOperands.push_back(
SDValue());
9829 const MDNode *SrcLoc =
Call.getMetadata(
"srcloc");
9839 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9841 SDISelAsmOperandInfo &RefOpInfo =
9842 OpInfo.isMatchingInputConstraint()
9843 ? ConstraintOperands[OpInfo.getMatchedOperand()]
9845 const auto RegError =
9850 const char *
RegName =
TRI.getName(*RegError);
9851 emitInlineAsmError(Call,
"register '" +
Twine(
RegName) +
9852 "' allocated for constraint '" +
9853 Twine(OpInfo.ConstraintCode) +
9854 "' does not match required type");
9858 auto DetectWriteToReservedRegister = [&]() {
9861 for (
unsigned Reg : OpInfo.AssignedRegs.Regs) {
9863 TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
9865 emitInlineAsmError(Call,
"write to reserved register '" +
9874 !OpInfo.isMatchingInputConstraint())) &&
9875 "Only address as input operand is allowed.");
9877 switch (OpInfo.Type) {
9883 "Failed to convert memory constraint code to constraint id.");
9887 OpFlags.setMemConstraint(ConstraintID);
9890 AsmNodeOperands.push_back(OpInfo.CallOperand);
9895 if (OpInfo.AssignedRegs.Regs.empty()) {
9897 Call,
"couldn't allocate output register for constraint '" +
9898 Twine(OpInfo.ConstraintCode) +
"'");
9902 if (DetectWriteToReservedRegister())
9907 OpInfo.AssignedRegs.AddInlineAsmOperands(
9916 SDValue InOperandVal = OpInfo.CallOperand;
9918 if (OpInfo.isMatchingInputConstraint()) {
9924 if (
Flag.isRegDefKind() ||
Flag.isRegDefEarlyClobberKind()) {
9925 if (OpInfo.isIndirect) {
9927 emitInlineAsmError(Call,
"inline asm not supported yet: "
9928 "don't know how to handle tied "
9929 "indirect register inputs");
9937 auto *
R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
9939 MVT RegVT =
R->getSimpleValueType(0);
9943 :
TRI.getMinimalPhysRegClass(TiedReg);
9944 for (
unsigned i = 0, e =
Flag.getNumOperandRegisters(); i != e; ++i)
9951 MatchedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue, &Call);
9953 OpInfo.getMatchedOperand(), dl,
DAG,
9958 assert(
Flag.isMemKind() &&
"Unknown matching constraint!");
9960 "Unexpected number of operands");
9963 Flag.clearMemConstraint();
9964 Flag.setMatchingOp(OpInfo.getMatchedOperand());
9967 AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
9978 std::vector<SDValue> Ops;
9983 if (isa<ConstantSDNode>(InOperandVal)) {
9984 emitInlineAsmError(Call,
"value out of range for constraint '" +
9985 Twine(OpInfo.ConstraintCode) +
"'");
9989 emitInlineAsmError(Call,
9990 "invalid operand for inline asm constraint '" +
9991 Twine(OpInfo.ConstraintCode) +
"'");
10004 assert((OpInfo.isIndirect ||
10006 "Operand must be indirect to be a mem!");
10009 "Memory operands expect pointer values");
10014 "Failed to convert memory constraint code to constraint id.");
10018 ResOpType.setMemConstraint(ConstraintID);
10022 AsmNodeOperands.push_back(InOperandVal);
10030 "Failed to convert memory constraint code to constraint id.");
10034 SDValue AsmOp = InOperandVal;
10036 auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
10044 ResOpType.setMemConstraint(ConstraintID);
10046 AsmNodeOperands.push_back(
10049 AsmNodeOperands.push_back(AsmOp);
10055 emitInlineAsmError(Call,
"unknown asm constraint '" +
10056 Twine(OpInfo.ConstraintCode) +
"'");
10061 if (OpInfo.isIndirect) {
10062 emitInlineAsmError(
10063 Call,
"Don't know how to handle indirect register inputs yet "
10064 "for constraint '" +
10065 Twine(OpInfo.ConstraintCode) +
"'");
10070 if (OpInfo.AssignedRegs.Regs.empty()) {
10071 emitInlineAsmError(Call,
10072 "couldn't allocate input reg for constraint '" +
10073 Twine(OpInfo.ConstraintCode) +
"'");
10077 if (DetectWriteToReservedRegister())
10082 OpInfo.AssignedRegs.getCopyToRegs(InOperandVal,
DAG, dl, Chain, &Glue,
10086 0, dl,
DAG, AsmNodeOperands);
10092 if (!OpInfo.AssignedRegs.Regs.empty())
10102 if (Glue.
getNode()) AsmNodeOperands.push_back(Glue);
10106 DAG.
getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
10117 if (
StructType *StructResult = dyn_cast<StructType>(CallResultType))
10118 ResultTypes = StructResult->elements();
10119 else if (!CallResultType->
isVoidTy())
10120 ResultTypes =
ArrayRef(CallResultType);
10122 auto CurResultType = ResultTypes.
begin();
10123 auto handleRegAssign = [&](
SDValue V) {
10124 assert(CurResultType != ResultTypes.
end() &&
"Unexpected value");
10125 assert((*CurResultType)->isSized() &&
"Unexpected unsized type");
10138 if (ResultVT !=
V.getValueType() &&
10141 else if (ResultVT !=
V.getValueType() && ResultVT.
isInteger() &&
10142 V.getValueType().isInteger()) {
10148 assert(ResultVT ==
V.getValueType() &&
"Asm result value mismatch!");
10154 for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
10158 if (OpInfo.AssignedRegs.Regs.empty())
10161 switch (OpInfo.ConstraintType) {
10165 Chain, &Glue, &Call);
10177 assert(
false &&
"Unexpected unknown constraint");
10181 if (OpInfo.isIndirect) {
10182 const Value *
Ptr = OpInfo.CallOperandVal;
10183 assert(
Ptr &&
"Expected value CallOperandVal for indirect asm operand");
10189 assert(!
Call.getType()->isVoidTy() &&
"Bad inline asm!");
10192 handleRegAssign(V);
10194 handleRegAssign(Val);
10200 if (!ResultValues.
empty()) {
10201 assert(CurResultType == ResultTypes.
end() &&
10202 "Mismatch in number of ResultTypes");
10204 "Mismatch in number of output operands in asm result");
10212 if (!OutChains.
empty())
10215 if (EmitEHLabels) {
10216 Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
10220 if (ResultValues.
empty() || HasSideEffect || !OutChains.
empty() || IsCallBr ||
10225void SelectionDAGBuilder::emitInlineAsmError(
const CallBase &Call,
10226 const Twine &Message) {
10235 if (ValueVTs.
empty())
10239 for (
unsigned i = 0, e = ValueVTs.
size(); i != e; ++i)
10245void SelectionDAGBuilder::visitVAStart(
const CallInst &
I) {
10252void SelectionDAGBuilder::visitVAArg(
const VAArgInst &
I) {
10258 DL.getABITypeAlign(
I.getType()).value());
10261 if (
I.getType()->isPointerTy())
10267void SelectionDAGBuilder::visitVAEnd(
const CallInst &
I) {
10274void SelectionDAGBuilder::visitVACopy(
const CallInst &
I) {
10286 std::optional<ConstantRange> CR =
getRange(
I);
10288 if (!CR || CR->isFullSet() || CR->isEmptySet() || CR->isUpperWrapped())
10291 APInt Lo = CR->getUnsignedMin();
10292 if (!
Lo.isMinValue())
10295 APInt Hi = CR->getUnsignedMax();
10296 unsigned Bits = std::max(
Hi.getActiveBits(),
10305 unsigned NumVals =
Op.getNode()->getNumValues();
10312 for (
unsigned I = 1;
I != NumVals; ++
I)
10326 unsigned ArgIdx,
unsigned NumArgs,
SDValue Callee,
Type *ReturnTy,
10329 Args.reserve(NumArgs);
10333 for (
unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
10334 ArgI != ArgE; ++ArgI) {
10335 const Value *V = Call->getOperand(ArgI);
10337 assert(!V->getType()->isEmptyTy() &&
"Empty type passed to intrinsic.");
10341 Entry.Ty = V->getType();
10342 Entry.setAttributes(Call, ArgI);
10343 Args.push_back(Entry);
10348 .
setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
10377 for (
unsigned I = StartIdx;
I < Call.arg_size();
I++) {
10392void SelectionDAGBuilder::visitStackmap(
const CallInst &CI) {
10426 assert(
ID.getValueType() == MVT::i64);
10457void SelectionDAGBuilder::visitPatchpoint(
const CallBase &CB,
10473 if (
auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10476 else if (
auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10478 SDLoc(SymbolicCallee),
10479 SymbolicCallee->getValueType(0));
10489 "Not enough arguments provided to the patchpoint intrinsic");
10492 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10510 "Expected a callseq node.");
10512 bool HasGlue =
Call->getGluedNode();
10542 unsigned NumCallRegArgs =
Call->getNumOperands() - (HasGlue ? 4 : 3);
10543 NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10552 for (
unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i !=
e; ++i)
10563 if (IsAnyRegCC && HasDef) {
10568 assert(ValueVTs.
size() == 1 &&
"Expected only one return value type.");
10592 if (IsAnyRegCC && HasDef) {
10604void SelectionDAGBuilder::visitVectorReduce(
const CallInst &
I,
10605 unsigned Intrinsic) {
10609 if (
I.arg_size() > 1)
10615 if (
auto *FPMO = dyn_cast<FPMathOperator>(&
I))
10618 switch (Intrinsic) {
10619 case Intrinsic::vector_reduce_fadd:
10627 case Intrinsic::vector_reduce_fmul:
10635 case Intrinsic::vector_reduce_add:
10638 case Intrinsic::vector_reduce_mul:
10641 case Intrinsic::vector_reduce_and:
10644 case Intrinsic::vector_reduce_or:
10647 case Intrinsic::vector_reduce_xor:
10650 case Intrinsic::vector_reduce_smax:
10653 case Intrinsic::vector_reduce_smin:
10656 case Intrinsic::vector_reduce_umax:
10659 case Intrinsic::vector_reduce_umin:
10662 case Intrinsic::vector_reduce_fmax:
10665 case Intrinsic::vector_reduce_fmin:
10668 case Intrinsic::vector_reduce_fmaximum:
10671 case Intrinsic::vector_reduce_fminimum:
10685 Attrs.push_back(Attribute::SExt);
10687 Attrs.push_back(Attribute::ZExt);
10689 Attrs.push_back(Attribute::InReg);
10699std::pair<SDValue, SDValue>
10713 RetTys.
swap(OldRetTys);
10714 Offsets.swap(OldOffsets);
10716 for (
size_t i = 0, e = OldRetTys.
size(); i != e; ++i) {
10717 EVT RetVT = OldRetTys[i];
10721 unsigned RegisterVTByteSZ = RegisterVT.
getSizeInBits() / 8;
10722 RetTys.
append(NumRegs, RegisterVT);
10723 for (
unsigned j = 0; j != NumRegs; ++j)
10736 int DemoteStackIdx = -100;
10747 DL.getAllocaAddrSpace());
10751 Entry.Node = DemoteStackSlot;
10752 Entry.Ty = StackSlotPtrType;
10753 Entry.IsSExt =
false;
10754 Entry.IsZExt =
false;
10755 Entry.IsInReg =
false;
10756 Entry.IsSRet =
true;
10757 Entry.IsNest =
false;
10758 Entry.IsByVal =
false;
10759 Entry.IsByRef =
false;
10760 Entry.IsReturned =
false;
10761 Entry.IsSwiftSelf =
false;
10762 Entry.IsSwiftAsync =
false;
10763 Entry.IsSwiftError =
false;
10764 Entry.IsCFGuardTarget =
false;
10765 Entry.Alignment = Alignment;
10777 for (
unsigned I = 0, E = RetTys.
size();
I != E; ++
I) {
10779 if (NeedsRegBlock) {
10780 Flags.setInConsecutiveRegs();
10781 if (
I == RetTys.
size() - 1)
10782 Flags.setInConsecutiveRegsLast();
10784 EVT VT = RetTys[
I];
10789 for (
unsigned i = 0; i != NumRegs; ++i) {
10791 MyFlags.
Flags = Flags;
10792 MyFlags.
VT = RegisterVT;
10793 MyFlags.
ArgVT = VT;
10798 cast<PointerType>(CLI.
RetTy)->getAddressSpace());
10806 CLI.
Ins.push_back(MyFlags);
10820 CLI.
Ins.push_back(MyFlags);
10828 for (
unsigned i = 0, e = Args.size(); i != e; ++i) {
10832 Type *FinalType = Args[i].Ty;
10833 if (Args[i].IsByVal)
10834 FinalType = Args[i].IndirectType;
10837 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
Value != NumValues;
10842 Args[i].Node.getResNo() +
Value);
10849 Flags.setOrigAlign(OriginalAlignment);
10851 if (Args[i].Ty->isPointerTy()) {
10852 Flags.setPointer();
10853 Flags.setPointerAddrSpace(
10854 cast<PointerType>(Args[i].Ty)->getAddressSpace());
10856 if (Args[i].IsZExt)
10858 if (Args[i].IsSExt)
10860 if (Args[i].IsInReg) {
10864 isa<StructType>(FinalType)) {
10867 Flags.setHvaStart();
10873 if (Args[i].IsSRet)
10875 if (Args[i].IsSwiftSelf)
10876 Flags.setSwiftSelf();
10877 if (Args[i].IsSwiftAsync)
10878 Flags.setSwiftAsync();
10879 if (Args[i].IsSwiftError)
10880 Flags.setSwiftError();
10881 if (Args[i].IsCFGuardTarget)
10882 Flags.setCFGuardTarget();
10883 if (Args[i].IsByVal)
10885 if (Args[i].IsByRef)
10887 if (Args[i].IsPreallocated) {
10888 Flags.setPreallocated();
10896 if (Args[i].IsInAlloca) {
10897 Flags.setInAlloca();
10906 if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
10907 unsigned FrameSize =
DL.getTypeAllocSize(Args[i].IndirectType);
10908 Flags.setByValSize(FrameSize);
10911 if (
auto MA = Args[i].Alignment)
10915 }
else if (
auto MA = Args[i].Alignment) {
10918 MemAlign = OriginalAlignment;
10920 Flags.setMemAlign(MemAlign);
10921 if (Args[i].IsNest)
10924 Flags.setInConsecutiveRegs();
10933 if (Args[i].IsSExt)
10935 else if (Args[i].IsZExt)
10940 if (Args[i].IsReturned && !
Op.getValueType().isVector() &&
10945 Args[i].Ty->getPointerAddressSpace())) &&
10946 RetTys.
size() == NumValues &&
"unexpected use of 'returned'");
10959 CLI.
RetZExt == Args[i].IsZExt))
10960 Flags.setReturned();
10966 for (
unsigned j = 0; j != NumParts; ++j) {
10973 j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
10974 if (NumParts > 1 && j == 0)
10978 if (j == NumParts - 1)
10982 CLI.
Outs.push_back(MyFlags);
10983 CLI.
OutVals.push_back(Parts[j]);
10986 if (NeedsRegBlock &&
Value == NumValues - 1)
10987 CLI.
Outs[CLI.
Outs.size() - 1].Flags.setInConsecutiveRegsLast();
10999 "LowerCall didn't return a valid chain!");
11001 "LowerCall emitted a return value for a tail call!");
11003 "LowerCall didn't emit the correct number of values!");
11015 for (
unsigned i = 0, e = CLI.
Ins.size(); i != e; ++i) {
11016 assert(InVals[i].
getNode() &&
"LowerCall emitted a null value!");
11017 assert(
EVT(CLI.
Ins[i].VT) == InVals[i].getValueType() &&
11018 "LowerCall emitted a value with the wrong type!");
11031 assert(PVTs.
size() == 1 &&
"Pointers should fit in one register");
11032 EVT PtrVT = PVTs[0];
11034 unsigned NumValues = RetTys.
size();
11035 ReturnValues.
resize(NumValues);
11041 Flags.setNoUnsignedWrap(
true);
11045 for (
unsigned i = 0; i < NumValues; ++i) {
11052 DemoteStackIdx, Offsets[i]),
11054 ReturnValues[i] = L;
11055 Chains[i] = L.getValue(1);
11062 std::optional<ISD::NodeType> AssertOp;
11067 unsigned CurReg = 0;
11068 for (
EVT VT : RetTys) {
11075 CLI.
DAG, CLI.
DL, &InVals[CurReg], NumRegs, RegisterVT, VT,
nullptr,
11083 if (ReturnValues.
empty())
11089 return std::make_pair(Res, CLI.
Chain);
11106 if (
N->getNumValues() == 1) {
11114 "Lowering returned the wrong number of results!");
11117 for (
unsigned I = 0, E =
N->getNumValues();
I != E; ++
I)
11130 cast<RegisterSDNode>(
Op.getOperand(1))->getReg() != Reg) &&
11131 "Copy from a reg to the same reg!");
11145 ExtendType = PreferredExtendIt->second;
11148 PendingExports.push_back(Chain);
11160 return A->use_empty();
11162 const BasicBlock &Entry =
A->getParent()->front();
11163 for (
const User *U :
A->users())
11164 if (cast<Instruction>(U)->
getParent() != &Entry || isa<SwitchInst>(U))
11172 std::pair<const AllocaInst *, const StoreInst *>>;
11184 enum StaticAllocaInfo {
Unknown, Clobbered, Elidable };
11186 unsigned NumArgs = FuncInfo->
Fn->
arg_size();
11187 StaticAllocas.
reserve(NumArgs * 2);
11189 auto GetInfoIfStaticAlloca = [&](
const Value *V) -> StaticAllocaInfo * {
11192 V = V->stripPointerCasts();
11193 const auto *AI = dyn_cast<AllocaInst>(V);
11194 if (!AI || !AI->isStaticAlloca() || !FuncInfo->
StaticAllocaMap.count(AI))
11197 return &Iter.first->second;
11207 const auto *SI = dyn_cast<StoreInst>(&
I);
11214 if (
I.isDebugOrPseudoInst())
11218 for (
const Use &U :
I.operands()) {
11219 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(U))
11220 *
Info = StaticAllocaInfo::Clobbered;
11226 if (StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
11227 *
Info = StaticAllocaInfo::Clobbered;
11230 const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
11231 StaticAllocaInfo *
Info = GetInfoIfStaticAlloca(Dst);
11234 const AllocaInst *AI = cast<AllocaInst>(Dst);
11237 if (*
Info != StaticAllocaInfo::Unknown)
11245 const Value *Val = SI->getValueOperand()->stripPointerCasts();
11246 const auto *Arg = dyn_cast<Argument>(Val);
11247 if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
11248 Arg->getType()->isEmptyTy() ||
11249 DL.getTypeStoreSize(Arg->getType()) !=
11251 !
DL.typeSizeEqualsStoreSize(Arg->getType()) ||
11252 ArgCopyElisionCandidates.
count(Arg)) {
11253 *
Info = StaticAllocaInfo::Clobbered;
11257 LLVM_DEBUG(
dbgs() <<
"Found argument copy elision candidate: " << *AI
11261 *
Info = StaticAllocaInfo::Elidable;
11262 ArgCopyElisionCandidates.
insert({Arg, {AI, SI}});
11267 if (ArgCopyElisionCandidates.
size() == NumArgs)
11281 auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
11284 auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
11291 auto ArgCopyIter = ArgCopyElisionCandidates.
find(&Arg);
11292 assert(ArgCopyIter != ArgCopyElisionCandidates.
end());
11293 const AllocaInst *AI = ArgCopyIter->second.first;
11294 int FixedIndex = FINode->getIndex();
11296 int OldIndex = AllocaIndex;
11300 dbgs() <<
" argument copy elision failed due to bad fixed stack "
11306 LLVM_DEBUG(
dbgs() <<
" argument copy elision failed: alignment of alloca "
11307 "greater than stack argument alignment ("
11308 <<
DebugStr(RequiredAlignment) <<
" vs "
11316 dbgs() <<
"Eliding argument copy from " << Arg <<
" to " << *AI <<
'\n'
11317 <<
" Replacing frame index " << OldIndex <<
" with " << FixedIndex
11323 AllocaIndex = FixedIndex;
11324 ArgCopyElisionFrameIndexMap.
insert({OldIndex, FixedIndex});
11325 for (
SDValue ArgVal : ArgVals)
11329 const StoreInst *SI = ArgCopyIter->second.second;
11330 ElidedArgCopyInstrs.
insert(SI);
11342void SelectionDAGISel::LowerArguments(
const Function &
F) {
11349 if (
F.hasFnAttribute(Attribute::Naked))
11367 Ins.push_back(RetArg);
11375 ArgCopyElisionCandidates);
11379 unsigned ArgNo = Arg.getArgNo();
11382 bool isArgValueUsed = !Arg.use_empty();
11383 unsigned PartBase = 0;
11384 Type *FinalType = Arg.getType();
11385 if (Arg.hasAttribute(Attribute::ByVal))
11386 FinalType = Arg.getParamByValType();
11388 FinalType,
F.getCallingConv(),
F.isVarArg(),
DL);
11389 for (
unsigned Value = 0, NumValues = ValueVTs.
size();
11396 if (Arg.getType()->isPointerTy()) {
11397 Flags.setPointer();
11398 Flags.setPointerAddrSpace(
11399 cast<PointerType>(Arg.getType())->getAddressSpace());
11401 if (Arg.hasAttribute(Attribute::ZExt))
11403 if (Arg.hasAttribute(Attribute::SExt))
11405 if (Arg.hasAttribute(Attribute::InReg)) {
11409 isa<StructType>(Arg.getType())) {
11412 Flags.setHvaStart();
11418 if (Arg.hasAttribute(Attribute::StructRet))
11420 if (Arg.hasAttribute(Attribute::SwiftSelf))
11421 Flags.setSwiftSelf();
11422 if (Arg.hasAttribute(Attribute::SwiftAsync))
11423 Flags.setSwiftAsync();
11424 if (Arg.hasAttribute(Attribute::SwiftError))
11425 Flags.setSwiftError();
11426 if (Arg.hasAttribute(Attribute::ByVal))
11428 if (Arg.hasAttribute(Attribute::ByRef))
11430 if (Arg.hasAttribute(Attribute::InAlloca)) {
11431 Flags.setInAlloca();
11439 if (Arg.hasAttribute(Attribute::Preallocated)) {
11440 Flags.setPreallocated();
11452 const Align OriginalAlignment(
11454 Flags.setOrigAlign(OriginalAlignment);
11457 Type *ArgMemTy =
nullptr;
11458 if (
Flags.isByVal() ||
Flags.isInAlloca() ||
Flags.isPreallocated() ||
11461 ArgMemTy = Arg.getPointeeInMemoryValueType();
11463 uint64_t MemSize =
DL.getTypeAllocSize(ArgMemTy);
11468 if (
auto ParamAlign = Arg.getParamStackAlign())
11469 MemAlign = *ParamAlign;
11470 else if ((ParamAlign = Arg.getParamAlign()))
11471 MemAlign = *ParamAlign;
11474 if (
Flags.isByRef())
11475 Flags.setByRefSize(MemSize);
11477 Flags.setByValSize(MemSize);
11478 }
else if (
auto ParamAlign = Arg.getParamStackAlign()) {
11479 MemAlign = *ParamAlign;
11481 MemAlign = OriginalAlignment;
11483 Flags.setMemAlign(MemAlign);
11485 if (Arg.hasAttribute(Attribute::Nest))
11488 Flags.setInConsecutiveRegs();
11489 if (ArgCopyElisionCandidates.
count(&Arg))
11490 Flags.setCopyElisionCandidate();
11491 if (Arg.hasAttribute(Attribute::Returned))
11492 Flags.setReturned();
11498 for (
unsigned i = 0; i != NumRegs; ++i) {
11503 Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11505 if (NumRegs > 1 && i == 0)
11506 MyFlags.Flags.setSplit();
11509 MyFlags.Flags.setOrigAlign(
Align(1));
11510 if (i == NumRegs - 1)
11511 MyFlags.Flags.setSplitEnd();
11513 Ins.push_back(MyFlags);
11515 if (NeedsRegBlock &&
Value == NumValues - 1)
11516 Ins[
Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11524 DAG.
getRoot(),
F.getCallingConv(),
F.isVarArg(), Ins, dl, DAG, InVals);
11528 "LowerFormalArguments didn't return a valid chain!");
11530 "LowerFormalArguments didn't emit the correct number of values!");
11532 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
11533 assert(InVals[i].getNode() &&
11534 "LowerFormalArguments emitted a null value!");
11535 assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11536 "LowerFormalArguments emitted a value with the wrong type!");
11553 MVT VT = ValueVTs[0].getSimpleVT();
11555 std::optional<ISD::NodeType> AssertOp;
11558 F.getCallingConv(), AssertOp);
11564 FuncInfo->DemoteRegister = SRetReg;
11566 SDB->DAG.getCopyToReg(NewRoot,
SDB->getCurSDLoc(), SRetReg, ArgValue);
11579 unsigned NumValues = ValueVTs.
size();
11580 if (NumValues == 0)
11583 bool ArgHasUses = !Arg.use_empty();
11587 if (Ins[i].
Flags.isCopyElisionCandidate()) {
11588 unsigned NumParts = 0;
11589 for (
EVT VT : ValueVTs)
11591 F.getCallingConv(), VT);
11595 ArrayRef(&InVals[i], NumParts), ArgHasUses);
11600 bool isSwiftErrorArg =
11602 Arg.hasAttribute(Attribute::SwiftError);
11603 if (!ArgHasUses && !isSwiftErrorArg) {
11604 SDB->setUnusedArgValue(&Arg, InVals[i]);
11608 dyn_cast<FrameIndexSDNode>(InVals[i].
getNode()))
11609 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11612 for (
unsigned Val = 0; Val != NumValues; ++Val) {
11613 EVT VT = ValueVTs[Val];
11615 F.getCallingConv(), VT);
11622 if (ArgHasUses || isSwiftErrorArg) {
11623 std::optional<ISD::NodeType> AssertOp;
11624 if (Arg.hasAttribute(Attribute::SExt))
11626 else if (Arg.hasAttribute(Attribute::ZExt))
11630 PartVT, VT,
nullptr, NewRoot,
11631 F.getCallingConv(), AssertOp));
11638 if (ArgValues.
empty())
11643 dyn_cast<FrameIndexSDNode>(ArgValues[0].
getNode()))
11644 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11647 SDB->getCurSDLoc());
11649 SDB->setValue(&Arg, Res);
11662 dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11663 FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11672 unsigned Reg = cast<RegisterSDNode>(Res.
getOperand(1))->getReg();
11684 unsigned Reg = cast<RegisterSDNode>(Res.
getOperand(1))->getReg();
11691 FuncInfo->InitializeRegForValue(&Arg);
11692 SDB->CopyToExportRegsIfNeeded(&Arg);
11696 if (!Chains.
empty()) {
11703 assert(i == InVals.
size() &&
"Argument register count mismatch!");
11707 if (!ArgCopyElisionFrameIndexMap.
empty()) {
11710 auto I = ArgCopyElisionFrameIndexMap.
find(
VI.getStackSlot());
11711 if (
I != ArgCopyElisionFrameIndexMap.
end())
11712 VI.updateStackSlot(
I->second);
11727SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(
const BasicBlock *LLVMBB) {
11735 if (!isa<PHINode>(SuccBB->begin()))
continue;
11740 if (!SuccsHandled.
insert(SuccMBB).second)
11748 for (
const PHINode &PN : SuccBB->phis()) {
11750 if (PN.use_empty())
11754 if (PN.getType()->isEmptyTy())
11758 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
11760 if (
const auto *
C = dyn_cast<Constant>(PHIOp)) {
11767 if (
auto *CI = dyn_cast<ConstantInt>(
C))
11779 assert(isa<AllocaInst>(PHIOp) &&
11781 "Didn't codegen value into a register!??");
11791 for (
EVT VT : ValueVTs) {
11793 for (
unsigned i = 0; i != NumRegisters; ++i)
11795 std::make_pair(&*
MBBI++, Reg + i));
11796 Reg += NumRegisters;
11816void SelectionDAGBuilder::updateDAGForMaybeTailCall(
SDValue MaybeTC) {
11818 if (MaybeTC.
getNode() !=
nullptr)
11833 unsigned Size =
W.LastCluster -
W.FirstCluster + 1;
11837 if (
Size == 2 &&
W.MBB == SwitchMBB) {
11850 const APInt &SmallValue =
Small.Low->getValue();
11851 const APInt &BigValue =
Big.Low->getValue();
11854 APInt CommonBit = BigValue ^ SmallValue;
11869 addSuccessorWithProb(SwitchMBB,
Small.MBB,
Small.Prob +
Big.Prob);
11871 addSuccessorWithProb(
11872 SwitchMBB, DefaultMBB,
11876 addSuccessorWithProb(SwitchMBB, DefaultMBB);
11899 return a.Prob != b.Prob ?
11901 a.Low->getValue().slt(b.Low->getValue());
11908 if (
I->Prob >
W.LastCluster->Prob)
11910 if (
I->Kind ==
CC_Range &&
I->MBB == NextMBB) {
11921 UnhandledProbs +=
I->Prob;
11925 bool FallthroughUnreachable =
false;
11927 if (
I ==
W.LastCluster) {
11929 Fallthrough = DefaultMBB;
11930 FallthroughUnreachable = isa<UnreachableInst>(
11934 CurMF->
insert(BBI, Fallthrough);
11938 UnhandledProbs -=
I->Prob;
11948 CurMF->
insert(BBI, JumpMBB);
11950 auto JumpProb =
I->Prob;
11951 auto FallthroughProb = UnhandledProbs;
11959 if (*SI == DefaultMBB) {
11960 JumpProb += DefaultProb / 2;
11961 FallthroughProb -= DefaultProb / 2;
11979 if (FallthroughUnreachable) {
11981 bool HasBranchTargetEnforcement =
false;
11983 HasBranchTargetEnforcement =
11987 HasBranchTargetEnforcement =
11989 "branch-target-enforcement");
11991 if (!HasBranchTargetEnforcement)
11996 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
11997 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
12003 JT->Default = Fallthrough;
12006 if (CurMBB == SwitchMBB) {
12029 BTB->
Prob += DefaultProb / 2;
12033 if (FallthroughUnreachable)
12037 if (CurMBB == SwitchMBB) {
12046 if (
I->Low ==
I->High) {
12061 if (FallthroughUnreachable)
12065 CaseBlock CB(
CC, LHS, RHS, MHS,
I->MBB, Fallthrough, CurMBB,
12068 if (CurMBB == SwitchMBB)
12071 SL->SwitchCases.push_back(CB);
12076 CurMBB = Fallthrough;
12080void SelectionDAGBuilder::splitWorkItem(
SwitchWorkList &WorkList,
12084 assert(
W.FirstCluster->Low->getValue().slt(
W.LastCluster->Low->getValue()) &&
12085 "Clusters not sorted?");
12086 assert(
W.LastCluster -
W.FirstCluster + 1 >= 2 &&
"Too small to split!");
12088 auto [LastLeft, FirstRight, LeftProb, RightProb] =
12089 SL->computeSplitWorkItemInfo(W);
12094 assert(PivotCluster >
W.FirstCluster);
12095 assert(PivotCluster <=
W.LastCluster);
12110 if (FirstLeft == LastLeft && FirstLeft->Kind ==
CC_Range &&
12111 FirstLeft->Low ==
W.GE &&
12112 (FirstLeft->High->getValue() + 1LL) == Pivot->
getValue()) {
12113 LeftMBB = FirstLeft->MBB;
12118 {LeftMBB, FirstLeft, LastLeft,
W.GE, Pivot,
W.DefaultProb / 2});
12127 if (FirstRight == LastRight && FirstRight->Kind ==
CC_Range &&
12128 W.LT && (FirstRight->High->getValue() + 1ULL) ==
W.LT->getValue()) {
12129 RightMBB = FirstRight->MBB;
12134 {RightMBB, FirstRight, LastRight, Pivot,
W.LT,
W.DefaultProb / 2});
12143 if (
W.MBB == SwitchMBB)
12146 SL->SwitchCases.push_back(CB);
12179 unsigned PeeledCaseIndex = 0;
12180 bool SwitchPeeled =
false;
12183 if (
CC.Prob < TopCaseProb)
12185 TopCaseProb =
CC.Prob;
12186 PeeledCaseIndex =
Index;
12187 SwitchPeeled =
true;
12192 LLVM_DEBUG(
dbgs() <<
"Peeled one top case in switch stmt, prob: "
12193 << TopCaseProb <<
"\n");
12203 auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
12205 nullptr,
nullptr, TopCaseProb.
getCompl()};
12206 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
12208 Clusters.erase(PeeledCaseIt);
12211 dbgs() <<
"Scale the probablity for one cluster, before scaling: "
12212 <<
CC.Prob <<
"\n");
12216 PeeledCaseProb = TopCaseProb;
12217 return PeeledSwitchMBB;
12220void SelectionDAGBuilder::visitSwitch(
const SwitchInst &SI) {
12224 Clusters.reserve(
SI.getNumCases());
12225 for (
auto I :
SI.cases()) {
12244 peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
12248 if (Clusters.empty()) {
12249 assert(PeeledSwitchMBB == SwitchMBB);
12251 if (DefaultMBB != NextBlock(SwitchMBB)) {
12260 SL->findBitTestClusters(Clusters, &SI);
12263 dbgs() <<
"Case clusters: ";
12270 C.Low->getValue().print(
dbgs(),
true);
12271 if (
C.Low !=
C.High) {
12273 C.High->getValue().print(
dbgs(),
true);
12280 assert(!Clusters.empty());
12284 auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
12291 {PeeledSwitchMBB,
First,
Last,
nullptr,
nullptr, DefaultProb});
12293 while (!WorkList.
empty()) {
12295 unsigned NumClusters =
W.LastCluster -
W.FirstCluster + 1;
12300 splitWorkItem(WorkList, W,
SI.getCondition(), SwitchMBB);
12304 lowerWorkItem(W,
SI.getCondition(), SwitchMBB, DefaultMBB);
12308void SelectionDAGBuilder::visitStepVector(
const CallInst &
I) {
12315void SelectionDAGBuilder::visitVectorReverse(
const CallInst &
I) {
12321 assert(VT ==
V.getValueType() &&
"Malformed vector.reverse!");
12332 for (
unsigned i = 0; i != NumElts; ++i)
12333 Mask.push_back(NumElts - 1 - i);
12338void SelectionDAGBuilder::visitVectorDeinterleave(
const CallInst &
I) {
12369void SelectionDAGBuilder::visitVectorInterleave(
const CallInst &
I) {
12394void SelectionDAGBuilder::visitFreeze(
const FreezeInst &
I) {
12398 unsigned NumValues = ValueVTs.
size();
12399 if (NumValues == 0)
return;
12404 for (
unsigned i = 0; i != NumValues; ++i)
12412void SelectionDAGBuilder::visitVectorSplice(
const CallInst &
I) {
12419 int64_t
Imm = cast<ConstantInt>(
I.getOperand(2))->getSExtValue();
12434 for (
unsigned i = 0; i < NumElts; ++i)
12463 assert(
MI->getOpcode() == TargetOpcode::COPY &&
12464 "start of copy chain MUST be COPY");
12465 Reg =
MI->getOperand(1).getReg();
12466 MI =
MRI.def_begin(Reg)->getParent();
12468 if (
MI->getOpcode() == TargetOpcode::COPY) {
12469 assert(Reg.isVirtual() &&
"expected COPY of virtual register");
12470 Reg =
MI->getOperand(1).getReg();
12471 assert(Reg.isPhysical() &&
"expected COPY of physical register");
12472 MI =
MRI.def_begin(Reg)->getParent();
12475 assert(
MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12476 "end of copy chain MUST be INLINEASM_BR");
12484void SelectionDAGBuilder::visitCallBrLandingPad(
const CallInst &
I) {
12488 cast<CallBrInst>(
I.getParent()->getUniquePredecessor()->getTerminator());
12500 for (
auto &
T : TargetConstraints) {
12501 SDISelAsmOperandInfo OpInfo(
T);
12509 switch (OpInfo.ConstraintType) {
12517 for (
size_t i = 0, e = OpInfo.AssignedRegs.Regs.size(); i != e; ++i) {
12522 OpInfo.AssignedRegs.Regs[i] = OriginalDef;
12525 SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12528 ResultVTs.
push_back(OpInfo.ConstraintVT);
12537 ResultVTs.
push_back(OpInfo.ConstraintVT);
unsigned const MachineRegisterInfo * MRI
static unsigned getIntrinsicID(const SDNode *N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
This file implements the BitVector class.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI)
Returns an AttributeList representing the attributes applied to the return value of the given call.
const HexagonInstrInfo * TII
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static void getRegistersForValue(MachineFunction &MF, MachineIRBuilder &MIRBuilder, GISelAsmOperandInfo &OpInfo, GISelAsmOperandInfo &RefOpInfo)
Assign virtual/physical registers for the specified register operand.
This file defines an InstructionCost class that is used when calculating the cost of an instruction,...
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
unsigned const TargetRegisterInfo * TRI
static const Function * getCalledFunction(const Value *V, bool &IsNoBuiltin)
This file provides utility analysis objects describing memory locations.
This file provides utility for Memory Model Relaxation Annotations (MMRAs).
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool hasOnlySelectUsers(const Value *Cond)
static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain)
Create a LOAD_STACK_GUARD node, and let it carry the target specific global variable if there exists ...
static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl< SDValue > &Ops, SelectionDAGBuilder &Builder)
Add a stack map intrinsic call's live variable operands to a stackmap or patchpoint target node's ope...
static const unsigned MaxParallelChains
static void getUnderlyingArgRegs(SmallVectorImpl< std::pair< unsigned, TypeSize > > &Regs, const SDValue &N)
static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
visitPow - Lower a pow intrinsic.
static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index, ISD::MemIndexType &IndexType, SDValue &Scale, SelectionDAGBuilder *SDB, const BasicBlock *CurBB, uint64_t ElemSize)
static const CallBase * FindPreallocatedCall(const Value *PreallocatedSetup)
Given a @llvm.call.preallocated.setup, return the corresponding preallocated call.
static cl::opt< unsigned > SwitchPeelThreshold("switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization"))
static cl::opt< bool > InsertAssertAlign("insert-assert-align", cl::init(true), cl::desc("Insert the experimental `assertalign` node."), cl::ReallyHidden)
static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin)
static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG, DILocalVariable *Variable, DebugLoc DL, unsigned Order, SmallVectorImpl< Value * > &Values, DIExpression *Expression)
static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector< SDValue > &AsmNodeOperands)
static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG)
Make sure that the output operand OpInfo and its corresponding input operand MatchingOpInfo have comp...
static void findUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
When an invoke or a cleanupret unwinds to the next EH pad, there are many places it could ultimately ...
static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic)
static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb)
static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp2 - Lower an exp2 intrinsic.
static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue Scale, SelectionDAG &DAG, const TargetLowering &TLI)
static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl)
getF32Constant - Get 32-bit floating point constant.
static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT)
static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog10 - Lower a log10 intrinsic.
static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv)
getCopyToPartsVector - Create a series of nodes that contain the specified value split into legal par...
static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, std::optional< CallingConv::ID > CallConv=std::nullopt, ISD::NodeType ExtendKind=ISD::ANY_EXTEND)
getCopyToParts - Create a series of nodes that contain the specified value split into legal parts.
static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder)
static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog2 - Lower a log2 intrinsic.
static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG)
Get a direct memory input to behave well as an indirect operand.
static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel)
isOnlyUsedInEntryBlock - If the specified argument is only used in the entry block,...
static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg)
static bool collectInstructionDeps(SmallMapVector< const Instruction *, bool, 8 > *Deps, const Value *V, SmallMapVector< const Instruction *, bool, 8 > *Necessary=nullptr, unsigned Depth=0)
static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates)
Scan the entry block of the function in FuncInfo for arguments that look like copies into a local all...
static bool isFunction(SDValue Op)
static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl)
GetExponent - Get the exponent:
static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg)
static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG)
ExpandPowI - Expand a llvm.powi intrinsic.
static void findWasmUnwindDestinations(FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl< std::pair< MachineBasicBlock *, BranchProbability > > &UnwindDests)
static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandLog - Lower a log intrinsic.
static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC=std::nullopt, std::optional< ISD::NodeType > AssertOp=std::nullopt)
getCopyFromParts - Create a value that contains the specified legal parts combined into the value the...
static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG)
static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl)
GetSignificand - Get the significand and build it into a floating-point number with exponent of 1:
static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI, SDNodeFlags Flags)
expandExp - Lower an exp intrinsic.
static const MDNode * getRangeMetadata(const Instruction &I)
static cl::opt< unsigned, true > LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0))
static void tryToElideArgumentCopy(FunctionLoweringInfo &FuncInfo, SmallVectorImpl< SDValue > &Chains, DenseMap< int, int > &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl< const Instruction * > &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, ArrayRef< SDValue > ArgVals, bool &ArgHasUses)
Try to elide argument copies from memory into a local alloca.
static unsigned LimitFloatPrecision
LimitFloatPrecision - Generate low-precision inline sequences for some float libcalls (6,...
static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, SDValue InChain, std::optional< CallingConv::ID > CC)
getCopyFromPartsVector - Create a value that contains the specified legal parts combined into the val...
static bool InBlock(const Value *V, const BasicBlock *BB)
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
static SymbolRef::Type getType(const Symbol *Sym)
support::ulittle16_t & Lo
support::ulittle16_t & Hi
bool pointsToConstantMemory(const MemoryLocation &Loc, bool OrLocal=false)
Checks whether the given location points to constant memory, or if OrLocal is true whether it points ...
Class for arbitrary precision integers.
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
an instruction to allocate memory on the stack
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
This class represents an incoming formal argument to a Function.
bool hasAttribute(Attribute::AttrKind Kind) const
Check if an argument has a given attribute.
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
This class represents the atomic memcpy intrinsic i.e.
an instruction that atomically reads a memory location, combines it with another value,...
@ Min
*p = old <signed v ? old : v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
@ UMax
*p = old >unsigned v ? old : v
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
@ UDecWrap
Decrement one until a minimum value or zero.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
bool getValueAsBool() const
Return the attribute's value as a boolean.
LLVM Basic Block Representation.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getFirstNonPHIOrDbg(bool SkipPseudoOp=true) const
Returns a pointer to the first instruction in this block that is not a PHINode or a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
const Instruction & back() const
This class represents a no-op cast from one type to another.
bool test(unsigned Idx) const
void resize(unsigned N, bool t=false)
resize - Grow or shrink the bitvector.
size_type size() const
size - Returns the number of bits in this bitvector.
The address of a basic block.
Conditional or Unconditional Branch instruction.
Analysis providing branch probability information.
BranchProbability getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const
Get an edge's probability, relative to other out-edges of the Src.
bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const
Test if an edge is hot relative to other out-edges of the Src.
static uint32_t getDenominator()
static BranchProbability getOne()
uint32_t getNumerator() const
uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
static BranchProbability getZero()
static void normalizeProbabilities(ProbabilityIter Begin, ProbabilityIter End)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
unsigned countOperandBundlesOfType(StringRef Name) const
Return the number of operand bundles with the tag Name attached to this instruction.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
bool isTailCall() const
Tests if this call site is marked as a tail call.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This class is the base class for the comparison instructions.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
A constant value that is initialized with an expression using other constant values.
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
A signed pointer, in the ptrauth sense.
This class represents a range of values.
uint64_t getZExtValue() const
Constant Vector Declarations.
This is an important base class in LLVM.
This is the common base class for constrained floating point intrinsics.
std::optional< fp::ExceptionBehavior > getExceptionBehavior() const
unsigned getNonMetadataArgCount() const
bool isEntryValue() const
Check if the expression consists of exactly one entry value operand.
static bool fragmentsOverlap(const FragmentInfo &A, const FragmentInfo &B)
Check if fragments overlap between a pair of FragmentInfos.
static DIExpression * appendOpsToArg(const DIExpression *Expr, ArrayRef< uint64_t > Ops, unsigned ArgNo, bool StackValue=false)
Create a copy of Expr by appending the given list of Ops to each instance of the operand DW_OP_LLVM_a...
static std::optional< FragmentInfo > getFragmentInfo(expr_op_iterator Start, expr_op_iterator End)
Retrieve the details of this fragment expression.
uint64_t getNumLocationOperands() const
Return the number of unique location operands referred to (via DW_OP_LLVM_arg) in this expression; th...
static std::optional< DIExpression * > createFragmentExpression(const DIExpression *Expr, unsigned OffsetInBits, unsigned SizeInBits)
Create a DIExpression to describe one part of an aggregate variable that is fragmented across multipl...
static const DIExpression * convertToUndefExpression(const DIExpression *Expr)
Removes all elements from Expr that do not apply to an undef debug value, which includes every operat...
static DIExpression * prepend(const DIExpression *Expr, uint8_t Flags, int64_t Offset=0)
Prepend DIExpr with a deref and offset operation and optionally turn it into a stack value or/and an ...
static DIExpression * prependOpcodes(const DIExpression *Expr, SmallVectorImpl< uint64_t > &Ops, bool StackValue=false, bool EntryValue=false)
Prepend DIExpr with the given opcodes and optionally turn it into a stack value.
bool isValidLocationForIntrinsic(const DILocation *DL) const
Check that a location is valid for this variable.
Base class for variables.
std::optional< uint64_t > getSizeInBits() const
Determines the size of the variable's type.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
unsigned getAllocaAddrSpace() const
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeStoreSize(Type *Ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
This represents the llvm.dbg.label instruction.
DILabel * getLabel() const
Records a position in IR for a source label (DILabel).
Base class for non-instruction debug metadata records that have positions within IR.
DebugLoc getDebugLoc() const
This represents the llvm.dbg.value instruction.
iterator_range< location_op_iterator > getValues() const
DILocalVariable * getVariable() const
DIExpression * getExpression() const
bool isKillLocation() const
Record of a variable value-assignment, aka a non instruction representation of the dbg....
LocationType getType() const
DIExpression * getExpression() const
Value * getVariableLocationOp(unsigned OpIdx) const
DILocalVariable * getVariable() const
iterator_range< location_op_iterator > location_ops() const
Get the locations corresponding to the variable referenced by the debug info intrinsic.
DILocation * getInlinedAt() const
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
static constexpr ElementCount getFixed(ScalarTy MinVal)
static constexpr ElementCount get(ScalarTy MinVal, bool Scalable)
constexpr bool isScalar() const
Exactly one element.
Class representing an expression and its matching format.
This instruction compares its operands according to the predicate given to the constructor.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
bool allowReassoc() const
Flag queries.
An instruction for ordering other memory operations.
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
BranchProbabilityInfo * BPI
Register CreateRegs(const Value *V)
SmallPtrSet< const DbgVariableRecord *, 8 > PreprocessedDVRDeclares
Register DemoteRegister
DemoteRegister - if CanLowerReturn is false, DemoteRegister is a vreg allocated to hold a pointer to ...
BitVector DescribedArgs
Bitvector with a bit set if corresponding argument is described in ArgDbgValues.
DenseMap< const AllocaInst *, int > StaticAllocaMap
StaticAllocaMap - Keep track of frame indices for fixed sized allocas in the entry block.
int getArgumentFrameIndex(const Argument *A)
getArgumentFrameIndex - Get frame index for the byval argument.
DenseMap< const BasicBlock *, MachineBasicBlock * > MBBMap
MBBMap - A mapping from LLVM basic blocks to their machine code entry.
bool isExportedInst(const Value *V) const
isExportedInst - Return true if the specified value is an instruction exported from its block.
const LiveOutInfo * GetLiveOutRegInfo(Register Reg)
GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the register is a PHI destinat...
Register InitializeRegForValue(const Value *V)
unsigned ExceptionPointerVirtReg
If the current MBB is a landing pad, the exception pointer and exception selector registers are copie...
SmallPtrSet< const DbgDeclareInst *, 8 > PreprocessedDbgDeclares
Collection of dbg.declare instructions handled after argument lowering and before ISel proper.
DenseMap< const Value *, Register > ValueMap
ValueMap - Since we emit code for the function a basic block at a time, we must remember which virtua...
MachineBasicBlock::iterator InsertPt
MBB - The current insert position inside the current block.
MachineBasicBlock * MBB
MBB - The current block.
std::vector< std::pair< MachineInstr *, unsigned > > PHINodesToUpdate
PHINodesToUpdate - A list of phi instructions whose operand list will be updated after processing the...
unsigned ExceptionSelectorVirtReg
SmallVector< MachineInstr *, 8 > ArgDbgValues
ArgDbgValues - A list of DBG_VALUE instructions created during isel for function arguments that are i...
MachineRegisterInfo * RegInfo
Register CreateReg(MVT VT, bool isDivergent=false)
CreateReg - Allocate a single virtual register for the given type.
bool CanLowerReturn
CanLowerReturn - true iff the function's return value can be lowered to registers.
DenseMap< const Value *, ISD::NodeType > PreferredExtendType
Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND) for a value.
Register getCatchPadExceptionPointerVReg(const Value *CPI, const TargetRegisterClass *RC)
Class to represent function types.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Type * getParamType(unsigned i) const
Parameter type accessors.
Type * getReturnType() const
Data structure describing the variable locations in a function.
const BasicBlock & getEntryBlock() const
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Garbage collection metadata for a single function.
void addStackRoot(int Num, const Constant *Metadata)
addStackRoot - Registers a root that lives on the stack.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
Indirect Branch Instruction.
This instruction inserts a struct field of array element value into an aggregate value.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
AAMDNodes getAAMetadata() const
Returns the AA metadata for this instruction.
@ MIN_INT_BITS
Minimum number of bits that can be specified.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
This is an important class for using LLVM in a threaded context.
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
The landingpad instruction holds all of the information necessary to generate correct exception handl...
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static LocationSize precise(uint64_t Value)
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
MCSymbol * createTempSymbol()
Create a temporary symbol with a unique name.
MCSymbol * getOrCreateFrameAllocSymbol(const Twine &FuncName, unsigned Idx)
Gets a symbol that will be defined to the final stack offset of a local variable after codegen.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
uint64_t getScalarSizeInBits() const
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool bitsGE(MVT VT) const
Return true if this has no less bits than VT.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
bool isEHPad() const
Returns true if the block is a landing pad.
void setIsEHCatchretTarget(bool V=true)
Indicates if this is a target block of a catchret.
void setIsCleanupFuncletEntry(bool V=true)
Indicates if this is the entry block of a cleanup funclet.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void setSuccProbability(succ_iterator I, BranchProbability Prob)
Set successor probability of a given iterator.
succ_iterator succ_begin()
std::vector< MachineBasicBlock * >::iterator succ_iterator
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void setIsEHFuncletEntry(bool V=true)
Indicates if this is the entry block of an EH funclet.
void setIsEHScopeEntry(bool V=true)
Indicates if this is the entry block of an EH scope, i.e., the block that that used to have a catchpa...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
void setIsImmutableObjectIndex(int ObjectIdx, bool IsImmutable)
Marks the immutability of an object.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setHasPatchPoint(bool s=true)
void setHasStackMap(bool s=true)
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
int getStackProtectorIndex() const
Return the index for the stack protector object.
void setStackProtectorIndex(int I)
void setIsAliasedObjectIndex(int ObjectIdx, bool IsAliased)
Set "maybe pointed to by an LLVM IR value" for an object.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
void RemoveStackObject(int ObjectIdx)
Remove or mark dead a statically sized stack object.
void setFunctionContextIndex(int I)
Description of the location of a variable whose Address is valid and unchanging during function execu...
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
void setCallsUnwindInit(bool b)
bool useDebugInstrRef() const
Returns true if the function's variable locations are tracked with instruction referencing.
void setCallSiteBeginLabel(MCSymbol *BeginLabel, unsigned Site)
Map the begin label for a call site.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
void setHasEHCatchret(bool V)
void setCallsEHReturn(bool b)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
unsigned getTypeIDFor(const GlobalValue *TI)
Return the type id for the specified typeinfo. This is function wide.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
auto getInStackSlotVariableDbgInfo()
Returns the collection of variables for which we have debug info and that have been assigned a stack ...
void addCodeViewAnnotation(MCSymbol *Label, MDNode *MD)
Record annotations associated with a particular label.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineModuleInfo & getMMI() const
const MachineBasicBlock & front() const
bool hasEHFunclets() const
void addInvoke(MachineBasicBlock *LandingPad, MCSymbol *BeginLabel, MCSymbol *EndLabel)
Provide the begin and end labels of an invoke style call and associate it with a try landing pad bloc...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void erase(iterator MBBI)
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
Representation of each machine instruction.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
const MCContext & getContext() const
const Module * getModule() const
void setCurrentCallSite(unsigned Site)
Set the call site currently being processed.
unsigned getCurrentCallSite()
Get the call site currently being processed, if any.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
static MachineOperand CreateFI(int Idx)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
ArrayRef< std::pair< MCRegister, Register > > liveins() const
An SDNode that represents everything that will be needed to construct a MachineInstr.
bool contains(const KeyT &Key) const
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
Representation for a specific memory location.
static MemoryLocation getAfter(const Value *Ptr, const AAMDNodes &AATags=AAMDNodes())
Return a location that may access any location after Ptr, while remaining within the underlying objec...
A Module instance is used to store all the information related to an LLVM module.
Metadata * getModuleFlag(StringRef Key) const
Return the corresponding value if Key appears in module flags, otherwise return null.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Holds the information from a dbg_label node through SDISel.
static SDDbgOperand fromNode(SDNode *Node, unsigned ResNo)
static SDDbgOperand fromFrameIdx(unsigned FrameIdx)
static SDDbgOperand fromVReg(unsigned VReg)
static SDDbgOperand fromConst(const Value *Const)
Holds the information from a dbg_value node through SDISel.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
iterator_range< value_op_iterator > op_values() const
unsigned getIROrder() const
Return the node ordering.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
SelectionDAGBuilder - This is the common target-independent lowering implementation that is parameter...
SDValue getValue(const Value *V)
getValue - Return an SDValue for the given Value.
void addDanglingDebugInfo(SmallVectorImpl< Value * > &Values, DILocalVariable *Var, DIExpression *Expr, bool IsVariadic, DebugLoc DL, unsigned Order)
Register a dbg_value which relies on a Value which we have not yet seen.
void visitDbgInfo(const Instruction &I)
void clearDanglingDebugInfo()
Clear the dangling debug information map.
void LowerCallTo(const CallBase &CB, SDValue Callee, bool IsTailCall, bool IsMustTailCall, const BasicBlock *EHPadBB=nullptr, const TargetLowering::PtrAuthInfo *PAI=nullptr)
void clear()
Clear out the current SelectionDAG and the associated state and prepare this SelectionDAGBuilder obje...
void visitBitTestHeader(SwitchCG::BitTestBlock &B, MachineBasicBlock *SwitchBB)
visitBitTestHeader - This function emits necessary code to produce value suitable for "bit tests"
void LowerStatepoint(const GCStatepointInst &I, const BasicBlock *EHPadBB=nullptr)
std::unique_ptr< SDAGSwitchLowering > SL
SDValue lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op)
bool HasTailCall
This is set to true if a call in the current block has been translated as a tail call.
bool ShouldEmitAsBranches(const std::vector< SwitchCG::CaseBlock > &Cases)
If the set of cases should be emitted as a series of branches, return true.
void EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
EmitBranchForMergedCondition - Helper method for FindMergedConditions.
void LowerDeoptimizeCall(const CallInst *CI)
void LowerCallSiteWithDeoptBundle(const CallBase *Call, SDValue Callee, const BasicBlock *EHPadBB)
SwiftErrorValueTracking & SwiftError
Information about the swifterror values used throughout the function.
SDValue getNonRegisterValue(const Value *V)
getNonRegisterValue - Return an SDValue for the given Value, but don't look in FuncInfo....
void CopyValueToVirtualRegister(const Value *V, unsigned Reg, ISD::NodeType ExtendType=ISD::ANY_EXTEND)
DenseMap< MachineBasicBlock *, SmallVector< unsigned, 4 > > LPadToCallSiteMap
Map a landing pad to the call site indexes.
void handleDebugDeclare(Value *Address, DILocalVariable *Variable, DIExpression *Expression, DebugLoc DL)
void visitBitTestCase(SwitchCG::BitTestBlock &BB, MachineBasicBlock *NextMBB, BranchProbability BranchProbToNext, unsigned Reg, SwitchCG::BitTestCase &B, MachineBasicBlock *SwitchBB)
visitBitTestCase - this function produces one "bit test"
bool shouldKeepJumpConditionsTogether(const FunctionLoweringInfo &FuncInfo, const BranchInst &I, Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs, TargetLoweringBase::CondMergingParams Params) const
StatepointLoweringState StatepointLowering
State used while lowering a statepoint sequence (gc_statepoint, gc_relocate, and gc_result).
void init(GCFunctionInfo *gfi, AAResults *AA, AssumptionCache *AC, const TargetLibraryInfo *li)
DenseMap< const Constant *, unsigned > ConstantsOut
void populateCallLoweringInfo(TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, AttributeSet RetAttrs, bool IsPatchPoint)
Populate a CallLowerinInfo (into CLI) based on the properties of the call being lowered.
void salvageUnresolvedDbgValue(const Value *V, DanglingDebugInfo &DDI)
For the given dangling debuginfo record, perform last-ditch efforts to resolve the debuginfo to somet...
SmallVector< SDValue, 8 > PendingLoads
Loads are not emitted to the program immediately.
GCFunctionInfo * GFI
Garbage collection metadata for the function.
SDValue getRoot()
Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict) items.
void ExportFromCurrentBlock(const Value *V)
ExportFromCurrentBlock - If this condition isn't known to be exported from the current basic block,...
DebugLoc getCurDebugLoc() const
void resolveOrClearDbgInfo()
Evict any dangling debug information, attempting to salvage it first.
std::pair< SDValue, SDValue > lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB=nullptr)
SDValue getMemoryRoot()
Return the current virtual root of the Selection DAG, flushing any PendingLoad items.
void resolveDanglingDebugInfo(const Value *V, SDValue Val)
If we saw an earlier dbg_value referring to V, generate the debug data structures now that we've seen...
SDLoc getCurSDLoc() const
void visit(const Instruction &I)
void dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr)
If we have dangling debug info that describes Variable, or an overlapping part of variable considerin...
SDValue getCopyFromRegs(const Value *V, Type *Ty)
If there was virtual register allocated for the value V emit CopyFromReg of the specified type Ty.
void CopyToExportRegsIfNeeded(const Value *V)
CopyToExportRegsIfNeeded - If the given value has virtual registers created for it,...
void handleKillDebugValue(DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order)
Create a record for a kill location debug intrinsic.
void visitJumpTable(SwitchCG::JumpTable &JT)
visitJumpTable - Emit JumpTable node in the current MBB
void visitJumpTableHeader(SwitchCG::JumpTable &JT, SwitchCG::JumpTableHeader &JTH, MachineBasicBlock *SwitchBB)
visitJumpTableHeader - This function emits necessary code to produce index in the JumpTable from swit...
void LowerCallSiteWithPtrAuthBundle(const CallBase &CB, const BasicBlock *EHPadBB)
static const unsigned LowestSDNodeOrder
Lowest valid SDNodeOrder.
void LowerDeoptimizingReturn()
FunctionLoweringInfo & FuncInfo
Information about the function as a whole.
void setValue(const Value *V, SDValue NewN)
void FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond)
const TargetLibraryInfo * LibInfo
bool isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB)
void visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB)
Codegen a new tail for a stack protector check ParentMBB which has had its tail spliced into a stack ...
bool handleDebugValue(ArrayRef< const Value * > Values, DILocalVariable *Var, DIExpression *Expr, DebugLoc DbgLoc, unsigned Order, bool IsVariadic)
For a given list of Values, attempt to create and record a SDDbgValue in the SelectionDAG.
SDValue getControlRoot()
Similar to getRoot, but instead of flushing all the PendingLoad items, flush all the PendingExports (...
void UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last)
When an MBB was split during scheduling, update the references that need to refer to the last resulti...
SDValue getValueImpl(const Value *V)
getValueImpl - Helper function for getValue and getNonRegisterValue.
void visitSwitchCase(SwitchCG::CaseBlock &CB, MachineBasicBlock *SwitchBB)
visitSwitchCase - Emits the necessary code to represent a single node in the binary search tree resul...
void visitSPDescriptorFailure(StackProtectorDescriptor &SPD)
Codegen the failure basic block for a stack protector check.
std::unique_ptr< FunctionLoweringInfo > FuncInfo
SmallPtrSet< const Instruction *, 4 > ElidedArgCopyInstrs
const TargetLowering * TLI
virtual void emitFunctionEntryCode()
SwiftErrorValueTracking * SwiftError
std::unique_ptr< SelectionDAGBuilder > SDB
Targets can subclass this to parameterize the SelectionDAG lowering and instruction selection process...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrnlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, SDValue MaxLength, MachinePointerInfo SrcPtrInfo) const
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, SDValue Op3, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a memcmp/bcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcpy(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Dest, SDValue Src, MachinePointerInfo DestPtrInfo, MachinePointerInfo SrcPtrInfo, bool isStpcpy) const
Emit target-specific code that performs a strcpy or stpcpy, in cases where that is faster than a libc...
virtual std::pair< SDValue, SDValue > EmitTargetCodeForMemchr(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Src, SDValue Char, SDValue Length, MachinePointerInfo SrcPtrInfo) const
Emit target-specific code that performs a memchr, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrcmp(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Op1, SDValue Op2, MachinePointerInfo Op1PtrInfo, MachinePointerInfo Op2PtrInfo) const
Emit target-specific code that performs a strcmp, in cases where that is faster than a libcall.
virtual std::pair< SDValue, SDValue > EmitTargetCodeForStrlen(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src, MachinePointerInfo SrcPtrInfo) const
virtual SDValue EmitTargetCodeForSetTag(SelectionDAG &DAG, const SDLoc &dl, SDValue Chain, SDValue Addr, SDValue Size, MachinePointerInfo DstPtrInfo, bool ZeroData) const
Help to insert SDNodeFlags automatically in transforming.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
SDValue getLabelNode(unsigned Opcode, const SDLoc &dl, SDValue Root, MCSymbol *Label)
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
SDValue getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getMaskedGather(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, ISD::LoadExtType ExtTy)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
BlockFrequencyInfo * getBFI() const
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
SDValue getAtomicMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Value, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo)
SDValue getVScale(const SDLoc &DL, EVT VT, APInt MulImm, bool ConstantFold=true)
Return a node that represents the runtime scaling 'MulImm * RuntimeVL'.
SDValue getPseudoProbeNode(const SDLoc &Dl, SDValue Chain, uint64_t Guid, uint64_t Index, uint32_t Attr)
Creates a PseudoProbeSDNode with function GUID Guid and the index of the block Index it is probing,...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDValue getStridedLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &DL, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, bool IsExpanding=false)
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain, SDValue Ptr, SDValue Cmp, SDValue Swp, MachineMemOperand *MMO)
Gets a node for an atomic cmpxchg op.
SDDbgValue * getVRegDbgValue(DIVariable *Var, DIExpression *Expr, unsigned VReg, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a VReg SDDbgValue node.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To, unsigned Num)
Like ReplaceAllUsesOfValueWith, but for multiple values at once.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
void addMMRAMetadata(const SDNode *Node, MDNode *MMRA)
Set MMRAMetadata to be associated with Node.
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getElementCount(const SDLoc &DL, EVT VT, ElementCount EC, bool ConstantFold=true)
SDValue getGetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getAssertAlign(const SDLoc &DL, SDValue V, Align A)
Return an AssertAlignSDNode.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getStepVector(const SDLoc &DL, EVT ResVT, const APInt &StepVal)
Returns a vector of type ResVT whose elements contain the linear sequence <0, Step,...
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
bool shouldOptForSize() const
SDValue getVPZExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be an integer vector, to the vector-type VT,...
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getStridedStoreVP(SDValue Chain, const SDLoc &DL, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Stride, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
void AddDbgValue(SDDbgValue *DB, bool isParameter)
Add a dbg_value SDNode.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getGatherVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
void DeleteNode(SDNode *N)
Remove the specified node from the system.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDDbgValue * getDbgValueList(DIVariable *Var, DIExpression *Expr, ArrayRef< SDDbgOperand > Locs, ArrayRef< SDNode * > Dependencies, bool IsIndirect, const DebugLoc &DL, unsigned O, bool IsVariadic)
Creates a SDDbgValue node from a list of locations.
SDValue getNegative(SDValue Val, const SDLoc &DL, EVT VT)
Create negative operation as (SUB 0, Val).
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
const DataLayout & getDataLayout() const
ProfileSummaryInfo * getPSI() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
const SelectionDAGTargetInfo & getSelectionDAGInfo() const
SDValue getMaskedHistogram(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDDbgLabel * getDbgLabel(DILabel *Label, const DebugLoc &DL, unsigned O)
Creates a SDDbgLabel node.
SDValue getStoreVP(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, bool isTargetGA=false, unsigned TargetFlags=0)
SDValue getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getVAArg(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue SV, unsigned Align)
VAArg produces a result and token chain, and takes a pointer and a source value as input.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSrcValue(const Value *v)
Construct a node to track a Value* through the backend.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getAtomicMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
void AddDbgLabel(SDDbgLabel *DB)
Add a dbg_label SDNode.
SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getEHLabel(const SDLoc &dl, SDValue Root, MCSymbol *Label)
SDValue getSetFPEnv(SDValue Chain, const SDLoc &dl, SDValue Ptr, EVT MemVT, MachineMemOperand *MMO)
SDValue getMaskedStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Base, SDValue Offset, SDValue Mask, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, bool IsTruncating=false, bool IsCompressing=false)
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either truncating it or perform...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, bool isTarget=false, unsigned TargetFlags=0)
SDValue getLoadVP(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, SDValue Offset, SDValue Mask, SDValue EVL, MachinePointerInfo PtrInfo, EVT MemVT, Align Alignment, MachineMemOperand::Flags MMOFlags, const AAMDNodes &AAInfo, const MDNode *Ranges=nullptr, bool IsExpanding=false)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDDbgValue * getConstantDbgValue(DIVariable *Var, DIExpression *Expr, const Value *C, const DebugLoc &DL, unsigned O)
Creates a constant SDDbgValue node.
SDValue getScatterVP(SDVTList VTs, EVT VT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType)
SDValue getValueType(EVT)
SDValue getTargetConstantFP(double Val, const SDLoc &DL, EVT VT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getAtomicMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Type *SizeTy, unsigned ElemSz, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDDbgValue * getFrameIndexDbgValue(DIVariable *Var, DIExpression *Expr, unsigned FI, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a FrameIndex SDDbgValue node.
SDValue getJumpTable(int JTI, EVT VT, bool isTarget=false, unsigned TargetFlags=0)
SDValue getVPPtrExtOrTrunc(const SDLoc &DL, EVT VT, SDValue Op, SDValue Mask, SDValue EVL)
Convert a vector-predicated Op, which must be of integer type, to the vector-type integer type VT,...
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getPtrExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to extend the Op as a pointer value assuming it was the smaller SrcTy ...
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
const FunctionVarLocs * getFunctionVarLocs() const
Returns the result of the AssignmentTrackingAnalysis pass if it's available, otherwise return nullptr...
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
SDValue getLifetimeNode(bool IsStart, const SDLoc &dl, SDValue Chain, int FrameIndex, int64_t Size, int64_t Offset=-1)
Creates a LifetimeSDNode that starts (IsStart==true) or ends (IsStart==false) the lifetime of the por...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
void addPCSections(const SDNode *Node, MDNode *MD)
Set PCSections to be associated with Node.
SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL, bool LegalTypes=true)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Mask, SDValue EVL)
Helper function to make it easier to build VP_SETCCs if you just have an ISD::CondCode instead of an ...
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDDbgValue * getDbgValue(DIVariable *Var, DIExpression *Expr, SDNode *N, unsigned R, bool IsIndirect, const DebugLoc &DL, unsigned O)
Creates a SDDbgValue node.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getSplat(EVT VT, const SDLoc &DL, SDValue Op)
Returns a node representing a splat of one value into all lanes of the provided vector type.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getMaskedScatter(SDVTList VTs, EVT MemVT, const SDLoc &dl, ArrayRef< SDValue > Ops, MachineMemOperand *MMO, ISD::MemIndexType IndexType, bool IsTruncating=false)
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void swap(SmallVectorImpl &RHS)
void push_back(const T &Elt)
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Encapsulates all of the information needed to generate a stack protector check, and signals to isel w...
MachineBasicBlock * getSuccessMBB()
MachineBasicBlock * getFailureMBB()
void clear()
Clear the memory usage of this object.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
TypeSize getElementOffset(unsigned Idx) const
Class to represent struct types.
void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register)
Set the swifterror virtual register in the VRegDefMap for this basic block.
Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a use of a swifterror by an instruction.
Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *)
Get or create the swifterror value virtual register for a def of a swifterror by an instruction.
const Value * getFunctionArg() const
Get the (unique) function argument that was marked swifterror, or nullptr if this function has no swi...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
TargetIntrinsicInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
bool hasOptimizedCodeGen(LibFunc F) const
Tests if the function is both available and a candidate for optimized code generation.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
void setAttributes(const CallBase *Call, unsigned ArgIdx)
Set CallLoweringInfo attribute flags based on a call instruction and called function attributes.
virtual bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT) const
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
LegalizeAction
This enum indicates whether operations are valid for a target, and if not, what action should be used...
virtual bool useStackGuardXorFP() const
If this function returns true, stack protection checks should XOR the frame pointer (or whichever poi...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual bool isLegalScaleForGatherScatter(uint64_t Scale, uint64_t ElemSize) const
virtual bool isSExtCheaperThanZExt(EVT FromTy, EVT ToTy) const
Return true if sign-extension from FromTy to ToTy is cheaper than zero-extension.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
virtual CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps, const Value *, const Value *) const
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
bool isJumpExpensive() const
Return true if Flow Control is an expensive operation that should be avoided.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
virtual bool shouldExtendGSIndex(EVT VT, EVT &EltTy) const
Returns true if the index type for a masked gather/scatter requires extending.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
LegalizeAction getFixedPointOperationAction(unsigned Op, EVT VT, unsigned Scale) const
Some fixed point operations may be natively supported by the target but only for specific scales.
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &, MachineFunction &, unsigned) const
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
virtual Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const
Certain targets have context sensitive alignment requirements, where one type has the alignment requi...
virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const
Return true if the @llvm.get.active.lane.mask intrinsic should be expanded using generic code in Sele...
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
MVT getProgramPointerTy(const DataLayout &DL) const
Return the type for code pointers, which is determined by the program address space specified through...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual MVT getFenceOperandTy(const DataLayout &DL) const
Return the type for operands of fence.
virtual bool shouldExpandGetVectorLength(EVT CountVT, unsigned VF, bool IsScalable) const
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
virtual MVT hasFastEqualityCompare(unsigned NumBits) const
Return the preferred operand type if the target has a quick way to compare integer values of the give...
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
virtual bool shouldExpandCttzElements(EVT VT) const
Return true if the @llvm.experimental.cttz.elts intrinsic should be expanded using generic code in Se...
virtual bool signExtendConstant(const ConstantInt *C) const
Return true if this constant should be sign extended when promoting to a larger type.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
virtual Register getExceptionPointerRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception address on entry to an ...
bool supportsUnalignedAtomics() const
Whether the target supports unaligned atomic operations.
std::vector< ArgListEntry > ArgListTy
bool isBeneficialToExpandPowI(int64_t Exponent, bool OptForSize) const
Return true if it is beneficial to expand an @llvm.powi.
MVT getFrameIndexTy(const DataLayout &DL) const
Return the type for frame index, which is determined by the alloca address space specified through th...
virtual Register getExceptionSelectorRegister(const Constant *PersonalityFn) const
If a physical register, this returns the register that receives the exception typeid on entry to a la...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
virtual MVT getVPExplicitVectorLengthTy() const
Returns the type to be used for the EVL/AVL operand of VP nodes: ISD::VP_ADD, ISD::VP_SUB,...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool supportKCFIBundles() const
Return true if the target supports kcfi operand bundles.
virtual bool supportPtrAuthBundles() const
Return true if the target supports ptrauth operand bundles.
virtual bool supportSwiftError() const
Return true if the target supports swifterror attribute.
virtual SDValue visitMaskedLoad(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const
virtual SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, const SDLoc &DL) const
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual InlineAsm::ConstraintCode getInlineAsmMemConstraint(StringRef ConstraintCode) const
std::vector< AsmOperandInfo > AsmOperandInfoVector
SDValue expandIS_FPCLASS(EVT ResultVT, SDValue Op, FPClassTest Test, SDNodeFlags Flags, const SDLoc &DL, SelectionDAG &DAG) const
Expand check for floating point class.
virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const
This callback is used to prepare for a volatile or atomic load.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const
Target-specific splitting of values into parts that fit a register storing a legal type.
virtual SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const
Target-specific combining of register parts into its original value.
virtual SDValue LowerCall(CallLoweringInfo &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower calls into the specified DAG.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Glue, const SDLoc &DL, const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
virtual SDValue LowerFormalArguments(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::InputArg > &, const SDLoc &, SelectionDAG &, SmallVectorImpl< SDValue > &) const
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL, const TargetRegisterInfo *TRI, const CallBase &Call) const
Split up the constraint string from the inline assembly value into the specific constraints and their...
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const
This callback is invoked for operations that are unsupported by the target, which are registered to u...
virtual SDValue LowerReturn(SDValue, CallingConv::ID, bool, const SmallVectorImpl< ISD::OutputArg > &, const SmallVectorImpl< SDValue > &, const SDLoc &, SelectionDAG &) const
This hook must be implemented to lower outgoing return values, described by the Outs array,...
virtual bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const
For some targets, an LLVM struct type must be broken down into multiple simple types,...
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo, SDValue Op, SelectionDAG *DAG=nullptr) const
Determines the constraint code and constraint type to use for the specific AsmOperandInfo,...
virtual void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const
virtual SDValue visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO, SDValue Ptr, SDValue Val, SDValue Mask) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual bool CanLowerReturn(CallingConv::ID, MachineFunction &, bool, const SmallVectorImpl< ISD::OutputArg > &, LLVMContext &) const
This hook should be implemented to check whether the return values described by the Outs array can fi...
virtual void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
virtual bool isInlineAsmTargetBranch(const SmallVectorImpl< StringRef > &AsmStrs, unsigned OpNo) const
On x86, return true if the operand with index OpNo is a CALL or JUMP instruction, which can use eithe...
virtual const TargetIntrinsicInfo * getIntrinsicInfo() const
If intrinsic information is available, return it. If not, return null.
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
virtual TargetTransformInfo getTargetTransformInfo(const Function &F) const
Return a TargetTransformInfo for a given function.
CodeModel::Model getCodeModel() const
Returns the code model.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
unsigned NoTrapAfterNoreturn
Do not emit a trap instruction for 'unreachable' IR instructions behind noreturn calls,...
unsigned TrapUnreachable
Emit target-specific trap instruction for 'unreachable' IR instructions.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
unsigned getID() const
Return the register class ID number.
iterator begin() const
begin/end - Return all of the registers in this class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetFrameLowering * getFrameLowering() const
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
ArchType getArch() const
Get the parsed architecture type of this triple.
bool isPS() const
Tests whether the target is the PS4 or PS5 platform.
bool isWasm() const
Tests whether the target is wasm (32- and 64-bit).
bool isAArch64() const
Tests whether the target is AArch64 (little and big endian).
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
TypeID
Definitions of all of the base types for the Type system.
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
static Type * getVoidTy(LLVMContext &C)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
bool isIntegerTy() const
True if this is an instance of IntegerType.
bool isTokenTy() const
Return true if this is 'token'.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
bool isVoidTy() const
Return true if this is 'void'.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
CmpInst::Predicate getPredicate() const
This is the common base class for vector predication intrinsics.
static std::optional< unsigned > getVectorLengthParamPos(Intrinsic::ID IntrinsicID)
MaybeAlign getPointerAlignment() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
Base class of all SIMD vector types.
Type * getElementType() const
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AMDGPU_CS_Chain
Used on AMDGPUs to give the middle-end more control over argument placement.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ C
The default llvm calling convention, compatible with C.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ SET_FPENV
Sets the current floating-point environment.
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ INSERT_SUBVECTOR
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1.
@ BSWAP
Byte Swap and Counting operators.
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ RESET_FPENV
Set floating-point environment to default state.
@ ADD
Simple integer binary arithmetic operators.
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
@ EH_LABEL
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
@ EH_RETURN
OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents 'eh_return' gcc dwarf builtin,...
@ ANNOTATION_LABEL
ANNOTATION_LABEL - Represents a mid basic block label used by annotations.
@ SET_ROUNDING
Set rounding mode.
@ SIGN_EXTEND
Conversion operators.
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ ADDROFRETURNADDR
ADDROFRETURNADDR - Represents the llvm.addressofreturnaddress intrinsic.
@ BR
Control flow instructions. These all have token chains.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ SSUBO
Same for subtraction.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ VECTOR_INTERLEAVE
VECTOR_INTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the same...
@ FCANONICALIZE
Returns platform specific canonical encoding of a floating point number.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ ARITH_FENCE
ARITH_FENCE - This corresponds to a arithmetic fence intrinsic.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ CLEANUPRET
CLEANUPRET - Represents a return from a cleanup block funclet.
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
@ GET_FPENV
Gets the current floating-point environment.
@ SHL
Shift and rotation operations.
@ PtrAuthGlobalAddress
A ptrauth constant.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EntryToken
EntryToken - This is the marker used to indicate the start of a region.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ VSCALE
VSCALE(IMM) - Returns the runtime scaling factor used to calculate the number of elements within a sc...
@ LOCAL_RECOVER
LOCAL_RECOVER - Represents the llvm.localrecover intrinsic.
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VECTOR_REVERSE
VECTOR_REVERSE(VECTOR) - Returns a vector, of the same type as VECTOR, whose elements are shuffled us...
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ PCMARKER
PCMARKER - This corresponds to the pcmarker intrinsic.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ SCMP
[US]CMP - 3-way comparison of signed or unsigned integers.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ SPONENTRY
SPONENTRY - Represents the llvm.sponentry intrinsic.
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ CATCHRET
CATCHRET - Represents a return from a catch block funclet.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ VECTOR_DEINTERLEAVE
VECTOR_DEINTERLEAVE(VEC1, VEC2) - Returns two vectors with all input and output vectors having the sa...
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
MemIndexType
MemIndexType enum - This enum defines how to interpret MGATHER/SCATTER's index parameter when calcula...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
VScaleVal_match m_VScale()
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
std::vector< CaseCluster > CaseClusterVector
void sortAndRangeify(CaseClusterVector &Clusters)
Sort Clusters and merge adjacent cases.
CaseClusterVector::iterator CaseClusterIt
std::pair< JumpTableHeader, JumpTable > JumpTableBlock
@ CC_Range
A cluster of adjacent case labels with the same destination, or just one case.
@ CC_JumpTable
A cluster of cases suitable for jump table lowering.
@ CC_BitTests
A cluster of cases suitable for bit test lowering.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ DW_OP_LLVM_arg
Only used in LLVM metadata.
ExceptionBehavior
Exception behavior used for floating point operations.
@ ebStrict
This corresponds to "fpexcept.strict".
@ ebMayTrap
This corresponds to "fpexcept.maytrap".
@ ebIgnore
This corresponds to "fpexcept.ignore".
NodeAddr< FuncNode * > Func
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int popcount(T Value) noexcept
Count the number of set bits in a value.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
void diagnoseDontCall(const CallInst &CI)
auto successors(const MachineBasicBlock *BB)
bool isIntOrFPConstant(SDValue V)
Return true if V is either a integer or FP constant.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
gep_type_iterator gep_type_end(const User *GEP)
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
llvm::SmallVector< int, 16 > createStrideMask(unsigned Start, unsigned Stride, unsigned VF)
Create a stride shuffle mask.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_FMINNUM
Unsigned maximum.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
detail::zippy< detail::zip_first, T, U, Args... > zip_first(T &&t, U &&u, Args &&...args)
zip iterator that, for the sake of efficiency, assumes the first iteratee to be the shortest.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool hasSingleElement(ContainerTy &&C)
Returns true if the given container only contains a single element.
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * salvageDebugInfoImpl(Instruction &I, uint64_t CurrentLocOps, SmallVectorImpl< uint64_t > &Ops, SmallVectorImpl< Value * > &AdditionalValues)
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Global
Append to llvm.global_dtors.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
llvm::SmallVector< int, 16 > createInterleaveMask(unsigned VF, unsigned NumVecs)
Create an interleave shuffle mask.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
DWARFExpression::Operation Op
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
std::optional< RoundingMode > convertStrToRoundingMode(StringRef)
Returns a valid RoundingMode enumerator when given a string that is valid as input in constrained int...
gep_type_iterator gep_type_begin(const User *GEP)
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the largest uint64_t less than or equal to Value and is Skew mod Align.
unsigned succ_size(const MachineBasicBlock *BB)
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
static const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
uint64_t getScalarStoreSize() const
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
unsigned getVectorMinNumElements() const
Given a vector type, return the minimum number of elements it contains.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isScalableVector() const
Return true if this is a vector type where the runtime length is machine dependent.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
void setPointerAddrSpace(unsigned AS)
void setOrigAlign(Align A)
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
ConstraintPrefix Type
Type - The basic type of the constraint: input/output/clobber/label.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getUnknownStack(MachineFunction &MF)
Stack memory without other information.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
This struct represents the registers (physical or virtual) that a particular set of values is assigne...
SmallVector< unsigned, 4 > Regs
This list holds the registers assigned to the values.
SmallVector< unsigned, 4 > RegCount
This list holds the number of registers for each value.
bool isABIMangled() const
SmallVector< EVT, 4 > ValueVTs
The value types of the values, which may not be legal, and may need be promoted or synthesized from o...
SmallVector< std::pair< unsigned, TypeSize >, 4 > getRegsAndSizes() const
Return a list of registers and their sizes.
void AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector< SDValue > &Ops) const
Add this value to the specified inlineasm node operand list.
SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr) const
Emit a series of CopyFromReg nodes that copies from this value and returns the result as a ValueVTs v...
SmallVector< MVT, 4 > RegVTs
The value types of the registers.
void getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Glue, const Value *V=nullptr, ISD::NodeType PreferredExtendType=ISD::ANY_EXTEND) const
Emit a series of CopyToReg nodes that copies the specified value into the registers specified by this...
std::optional< CallingConv::ID > CallConv
Records if this value needs to be treated in an ABI dependant manner, different to normal type legali...
bool occupiesMultipleRegs() const
Check if the total RegCount is greater than one.
These are IR-level optimization flags that may be propagated to SDNodes.
void copyFMF(const FPMathOperator &FPMO)
Propagate the fast-math-flags from an IR FPMathOperator.
bool hasAllowReassociation() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
A MapVector that performs no allocations if smaller than a certain size.
MachineBasicBlock * Default
BranchProbability DefaultProb
MachineBasicBlock * Parent
bool FallthroughUnreachable
MachineBasicBlock * ThisBB
This structure is used to communicate between SelectionDAGBuilder and SDISel for the code generation ...
BranchProbability TrueProb
BranchProbability FalseProb
MachineBasicBlock * TrueBB
MachineBasicBlock * FalseBB
SDLoc DL
The debug location of the instruction this CaseBlock was produced from.
A cluster of case labels.
static CaseCluster range(const ConstantInt *Low, const ConstantInt *High, MachineBasicBlock *MBB, BranchProbability Prob)
This contains information for each constraint that we are lowering.
TargetLowering::ConstraintType ConstraintType
Information about the constraint code, e.g.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setConvergent(bool Value=true)
CallLoweringInfo & setCFIType(const ConstantInt *Type)
SmallVector< ISD::InputArg, 32 > Ins
bool IsPostTypeLegalization
SmallVector< SDValue, 4 > InVals
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setIsPatchPoint(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setTailCall(bool Value=true)
CallLoweringInfo & setIsPreallocated(bool Value=true)
CallLoweringInfo & setConvergenceControlToken(SDValue Token)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setPtrAuth(PtrAuthInfo Value)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.
MakeLibCallOptions & setDiscardResult(bool Value=true)
This structure contains the information necessary for lowering pointer-authenticating indirect calls.
void addIPToStateRange(const InvokeInst *II, MCSymbol *InvokeBegin, MCSymbol *InvokeEnd)