31#define DEBUG_TYPE "x86-isel"
69static std::pair<MVT, unsigned>
75 return {MVT::v2i64, 1};
77 return {MVT::v4i32, 1};
80 return {MVT::v8i16, 1};
83 return {MVT::v16i8, 1};
87 return {MVT::v32i8, 1};
91 return {MVT::v64i8, 1};
92 return {MVT::v32i8, 2};
96 if (!
isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
98 return {MVT::i8, NumElts};
111 unsigned NumRegisters;
112 std::tie(RegisterVT, NumRegisters) =
123 if ((VT == MVT::f64 || VT == MVT::f80) && !Subtarget.is64Bit() &&
145 unsigned NumRegisters;
146 std::tie(RegisterVT, NumRegisters) =
158 if (!Subtarget.is64Bit() && !Subtarget.hasX87()) {
174 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
181 RegisterVT = MVT::i8;
182 IntermediateVT = MVT::i1;
184 return NumIntermediates;
188 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.
useAVX512Regs() &&
190 RegisterVT = MVT::v32i8;
191 IntermediateVT = MVT::v32i1;
192 NumIntermediates = 2;
201 NumIntermediates, RegisterVT);
238 if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
239 if (VTy->getPrimitiveSizeInBits().getFixedValue() == 128)
240 MaxAlign =
Align(16);
241 }
else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
244 if (EltAlign > MaxAlign)
246 }
else if (
StructType *STy = dyn_cast<StructType>(Ty)) {
247 for (
auto *EltTy : STy->elements()) {
250 if (EltAlign > MaxAlign)
264 if (Subtarget.is64Bit()) {
266 Align TyAlign =
DL.getABITypeAlign(Ty);
268 return TyAlign.
value();
275 return Alignment.
value();
284 if (!FuncAttributes.
hasFnAttr(Attribute::NoImplicitFloat)) {
285 if (
Op.size() >= 16 &&
286 (!Subtarget.isUnalignedMem16Slow() ||
Op.isAligned(
Align(16)))) {
288 if (
Op.size() >= 64 && Subtarget.
hasAVX512() && Subtarget.hasEVEX512() &&
290 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
293 if (
Op.size() >= 32 && Subtarget.
hasAVX() &&
306 if (Subtarget.
hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
309 }
else if (((
Op.isMemcpy() && !
Op.isMemcpyStrSrc()) ||
Op.isZeroMemset()) &&
310 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.
hasSSE2()) {
323 if (Subtarget.is64Bit() &&
Op.size() >= 8)
337 return (8 * Alignment.
value()) % SizeInBits == 0;
348 return !Subtarget.isUnalignedMem16Slow();
350 return !Subtarget.isUnalignedMem32Slow();
357 unsigned *
Fast)
const {
367 return (Alignment < 16 || !Subtarget.
hasSSE41());
376 unsigned AddrSpace,
Align Alignment,
378 unsigned *
Fast)
const {
402 if (Subtarget.
hasAVX512() && Subtarget.hasEVEX512())
430 return Subtarget.useSoftFloat();
437 if (Subtarget.is64Bit())
441 unsigned ParamRegs = 0;
443 ParamRegs = M->getNumberRegisterParameters();
446 for (
auto &Arg : Args) {
448 if (
T->isIntOrPtrTy())
450 unsigned numRegs = 1;
453 if (ParamRegs < numRegs)
455 ParamRegs -= numRegs;
475 if (!Subtarget.is64Bit())
490 (Subtarget.is64Bit() &&
498std::pair<const TargetRegisterClass *, uint8_t>
506 case MVT::i8:
case MVT::i16:
case MVT::i32:
case MVT::i64:
507 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
510 RRC = &X86::VR64RegClass;
512 case MVT::f32:
case MVT::f64:
513 case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
514 case MVT::v4f32:
case MVT::v2f64:
515 case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
case MVT::v4i64:
516 case MVT::v8f32:
case MVT::v4f64:
517 case MVT::v64i8:
case MVT::v32i16:
case MVT::v16i32:
case MVT::v8i64:
518 case MVT::v16f32:
case MVT::v8f64:
519 RRC = &X86::VR128XRegClass;
522 return std::make_pair(RRC,
Cost);
525unsigned X86TargetLowering::getAddressSpace()
const {
526 if (Subtarget.is64Bit())
556 int Offset = M->getStackProtectorGuardOffset();
561 Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
563 StringRef GuardReg = M->getStackProtectorGuardReg();
564 if (GuardReg ==
"fs")
566 else if (GuardReg ==
"gs")
570 StringRef GuardSymb = M->getStackProtectorGuardSymbol();
571 if (!GuardSymb.
empty()) {
577 nullptr, GuardSymb,
nullptr,
595 M.getOrInsertGlobal(
"__security_cookie",
604 F->addParamAttr(0, Attribute::AttrKind::InReg);
609 StringRef GuardMode = M.getStackProtectorGuard();
612 if ((GuardMode ==
"tls" || GuardMode.
empty()) &&
622 return M.getGlobalVariable(
"__security_cookie");
631 return M.getFunction(
"__security_check_cookie");
644 int Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
661bool X86TargetLowering::CanLowerReturn(
665 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
666 return CCInfo.CheckReturn(Outs,
RetCC_X86);
670 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
675 static const MCPhysReg RCRegs[] = {X86::FPCW, X86::MXCSR};
685 if (ValVT == MVT::v1i1)
689 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
690 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
694 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
696 if (ValLoc == MVT::i32)
701 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
702 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
716 assert(Subtarget.hasBWI() &&
"Expected AVX512BW target!");
717 assert(Subtarget.is32Bit() &&
"Expecting 32 bit target");
720 "The value should reside in two registers");
730 RegsToPass.push_back(std::make_pair(VA.
getLocReg(),
Lo));
731 RegsToPass.push_back(std::make_pair(NextVA.
getLocReg(),
Hi));
746 bool ShouldDisableCalleeSavedRegister =
758 for (
unsigned I = 0, OutsIndex = 0, E = RVLocs.
size();
I != E;
764 if (ShouldDisableCalleeSavedRegister)
767 SDValue ValToCopy = OutVals[OutsIndex];
785 "Unexpected FP-extend for return value.");
792 }
else if (!Subtarget.
hasSSE2() &&
793 X86::FR64XRegClass.contains(VA.
getLocReg()) &&
816 if (Subtarget.is64Bit()) {
817 if (ValVT == MVT::x86mmx) {
819 ValToCopy = DAG.
getBitcast(MVT::i64, ValToCopy);
825 ValToCopy = DAG.
getBitcast(MVT::v4f32, ValToCopy);
832 "Currently the only custom case is when we split v64i1 to 2 regs");
838 if (ShouldDisableCalleeSavedRegister)
853 for (
auto &RetVal : RetVals) {
854 if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {
859 Chain = DAG.
getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Glue);
862 DAG.
getRegister(RetVal.first, RetVal.second.getValueType()));
903 Chain = DAG.
getCopyToReg(Chain, dl, RetValReg, Val, Glue);
913 if (ShouldDisableCalleeSavedRegister &&
940 return DAG.
getNode(opcode, dl, MVT::Other, RetOps);
943bool X86TargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
944 if (
N->getNumValues() != 1 || !
N->hasNUsesOfValue(1, 0))
952 if (
Copy->getOperand(
Copy->getNumOperands()-1).getValueType() == MVT::Glue)
954 TCChain =
Copy->getOperand(0);
964 if (
U->getNumOperands() > 4)
966 if (
U->getNumOperands() == 4 &&
967 U->getOperand(
U->getNumOperands() - 1).getValueType() != MVT::Glue)
981 MVT ReturnMVT = MVT::i32;
984 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
994 return VT.
bitsLT(MinVT) ? MinVT : VT;
1010 assert((Subtarget.hasBWI()) &&
"Expected AVX512BW target!");
1011 assert(Subtarget.is32Bit() &&
"Expecting 32 bit target");
1013 "Expecting first location of 64 bit width type");
1015 "The locations should have the same type");
1017 "The values should reside in two registers");
1020 SDValue ArgValueLo, ArgValueHi;
1026 if (
nullptr == InGlue) {
1062 if (ValVT == MVT::v1i1)
1065 if (ValVT == MVT::v64i1) {
1067 assert(ValLoc == MVT::i64 &&
"Expecting only i64 locations");
1073 MaskLenVT = MVT::i8;
1076 MaskLenVT = MVT::i16;
1079 MaskLenVT = MVT::i32;
1093SDValue X86TargetLowering::LowerCallResult(
1104 CCInfo.AnalyzeCallResult(Ins,
RetCC_X86);
1107 for (
unsigned I = 0, InsIndex = 0, E = RVLocs.
size();
I != E;
1127 }
else if (!Subtarget.
hasSSE2() &&
1128 X86::FR64XRegClass.contains(VA.
getLocReg()) &&
1129 CopyVT == MVT::f64) {
1139 bool RoundAfterCopy =
false;
1142 if (!Subtarget.hasX87())
1145 RoundAfterCopy = (CopyVT != VA.
getLocVT());
1151 "Currently the only custom case is when we split v64i1 to 2 regs");
1199template <
typename T>
1203 static_assert(std::is_same_v<T, ISD::OutputArg> ||
1204 std::is_same_v<T, ISD::InputArg>,
1205 "requires ISD::OutputArg or ISD::InputArg");
1209 if (!Subtarget.is32Bit())
1217 if (!Flags.isSRet() || Flags.isInReg())
1241 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
1281bool X86TargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
1302 bool isImmutable = !AlwaysUseMutable && !
Flags.isByVal();
1309 bool ExtendedInMem =
1322 if (
Flags.isByVal()) {
1323 unsigned Bytes =
Flags.getByValSize();
1324 if (Bytes == 0) Bytes = 1;
1333 EVT ArgVT =
Ins[i].ArgVT;
1344 if (
Flags.isCopyElisionCandidate() &&
1346 !ScalarizedVector) {
1348 if (Ins[i].PartOffset == 0) {
1357 ValVT, dl, Chain, PartAddr,
1371 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
1400 ValVT, dl, Chain, FIN,
1403 return ExtendedInMem
1413 assert(Subtarget.is64Bit());
1416 static const MCPhysReg GPR64ArgRegsWin64[] = {
1417 X86::RCX, X86::RDX, X86::R8, X86::R9
1419 return ArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
1422 static const MCPhysReg GPR64ArgRegs64Bit[] = {
1423 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
1425 return ArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
1432 assert(Subtarget.is64Bit());
1438 return std::nullopt;
1441 bool isSoftFloat = Subtarget.useSoftFloat();
1442 if (isSoftFloat || !Subtarget.
hasSSE1())
1445 return std::nullopt;
1447 static const MCPhysReg XMMArgRegs64Bit[] = {
1448 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1449 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1451 return ArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
1458 return A.getValNo() <
B.getValNo();
1465class VarArgsLoweringHelper {
1470 : FuncInfo(FuncInfo),
DL(Loc), DAG(DAG), Subtarget(Subtarget),
1471 TheMachineFunction(DAG.getMachineFunction()),
1473 FrameInfo(TheMachineFunction.getFrameInfo()),
1474 FrameLowering(*Subtarget.getFrameLowering()),
1475 TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),
1479 void lowerVarArgsParameters(
SDValue &Chain,
unsigned StackSize);
1482 void createVarArgAreaAndStoreRegisters(
SDValue &Chain,
unsigned StackSize);
1484 void forwardMustTailParameters(
SDValue &Chain);
1486 bool is64Bit()
const {
return Subtarget.is64Bit(); }
1487 bool isWin64()
const {
return Subtarget.isCallingConvWin64(CallConv); }
1503void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
1504 SDValue &Chain,
unsigned StackSize) {
1511 FrameInfo.CreateFixedObject(1, StackSize,
true));
1521 unsigned NumIntRegs = CCInfo.getFirstUnallocated(
ArgGPRs);
1522 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
1524 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
1525 "SSE register cannot be used when SSE is disabled!");
1530 int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;
1532 FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset,
false));
1554 Register GPR = TheMachineFunction.addLiveIn(Reg, &X86::GR64RegClass);
1557 const auto &AvailableXmms = ArgXMMs.
slice(NumXMMRegs);
1558 if (!AvailableXmms.empty()) {
1559 Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
1566 TheMachineFunction.getRegInfo().addLiveIn(Reg);
1577 for (
SDValue Val : LiveGPRs) {
1591 if (!LiveXMMRegs.
empty()) {
1607 SaveXMMOps, MVT::i8, StoreMMO));
1610 if (!MemOps.
empty())
1615void VarArgsLoweringHelper::forwardMustTailParameters(
SDValue &Chain) {
1617 MVT VecVT = MVT::Other;
1619 if (Subtarget.useAVX512Regs() &&
1622 VecVT = MVT::v16f32;
1623 else if (Subtarget.hasAVX())
1625 else if (Subtarget.hasSSE2())
1632 if (VecVT != MVT::Other)
1638 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes,
CC_X86);
1641 if (
is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {
1642 Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
1650 FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(
1651 TargLowering.getRegClassFor(FR.VT));
1656void VarArgsLoweringHelper::lowerVarArgsParameters(
SDValue &Chain,
1657 unsigned StackSize) {
1663 if (FrameInfo.hasVAStart())
1664 createVarArgAreaAndStoreRegisters(Chain, StackSize);
1666 if (FrameInfo.hasMustTailInVarArgFunc())
1667 forwardMustTailParameters(Chain);
1670SDValue X86TargetLowering::LowerFormalArguments(
1679 F.getName() ==
"main")
1683 bool Is64Bit = Subtarget.is64Bit();
1688 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
1696 CCInfo.AllocateStack(32,
Align(8));
1698 CCInfo.AnalyzeArguments(Ins,
CC_X86);
1703 CCInfo.AnalyzeArgumentsSecondPass(Ins,
CC_X86);
1709 "Argument Location list must be sorted before lowering");
1712 for (
unsigned I = 0, InsIndex = 0, E = ArgLocs.
size();
I != E;
1714 assert(InsIndex <
Ins.size() &&
"Invalid Ins index");
1722 "Currently the only custom case is when we split v64i1 to 2 regs");
1730 if (RegVT == MVT::i8)
1731 RC = &X86::GR8RegClass;
1732 else if (RegVT == MVT::i16)
1733 RC = &X86::GR16RegClass;
1734 else if (RegVT == MVT::i32)
1735 RC = &X86::GR32RegClass;
1736 else if (Is64Bit && RegVT == MVT::i64)
1737 RC = &X86::GR64RegClass;
1738 else if (RegVT == MVT::f16)
1739 RC = Subtarget.
hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
1740 else if (RegVT == MVT::f32)
1741 RC = Subtarget.
hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
1742 else if (RegVT == MVT::f64)
1743 RC = Subtarget.
hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
1744 else if (RegVT == MVT::f80)
1745 RC = &X86::RFP80RegClass;
1746 else if (RegVT == MVT::f128)
1747 RC = &X86::VR128RegClass;
1749 RC = &X86::VR512RegClass;
1751 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
1753 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
1754 else if (RegVT == MVT::x86mmx)
1755 RC = &X86::VR64RegClass;
1756 else if (RegVT == MVT::v1i1)
1757 RC = &X86::VK1RegClass;
1758 else if (RegVT == MVT::v8i1)
1759 RC = &X86::VK8RegClass;
1760 else if (RegVT == MVT::v16i1)
1761 RC = &X86::VK16RegClass;
1762 else if (RegVT == MVT::v32i1)
1763 RC = &X86::VK32RegClass;
1764 else if (RegVT == MVT::v64i1)
1765 RC = &X86::VK64RegClass;
1801 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
1806 !(Ins[
I].Flags.isByVal() && VA.
isRegLoc())) {
1814 for (
unsigned I = 0, E =
Ins.size();
I != E; ++
I) {
1815 if (Ins[
I].
Flags.isSwiftAsync()) {
1820 int PtrSize = Subtarget.is64Bit() ? 8 : 4;
1823 X86FI->setSwiftAsyncContextFrameIdx(FI);
1841 if (Ins[
I].
Flags.isSRet()) {
1843 "SRet return has already been set");
1854 unsigned StackSize = CCInfo.getStackSize();
1858 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
1861 VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)
1862 .lowerVarArgsParameters(Chain, StackSize);
1899 EHInfo->PSPSymFrameIdx = PSPSymFI;
1904 F.hasFnAttribute(
"no_caller_saved_registers")) {
1906 for (std::pair<Register, Register> Pair :
MRI.liveins())
1907 MRI.disableCalleeSavedRegister(Pair.first);
1911 for (
unsigned I = 0, E =
Ins.size();
I != E; ++
I) {
1912 if (Ins[
I].
Flags.isSwiftSelf() || Ins[
I].Flags.isSwiftAsync() ||
1913 Ins[
I].Flags.isSwiftError()) {
1915 "Swift attributes can't be used with preserve_none");
1928 bool isByVal)
const {
1941 Chain, dl, Arg, PtrOff,
1948SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
1950 bool Is64Bit,
int FPDiff,
const SDLoc &dl)
const {
1964 EVT PtrVT,
unsigned SlotSize,
1965 int FPDiff,
const SDLoc &dl) {
1967 if (!FPDiff)
return Chain;
1969 int NewReturnAddrFI =
1973 Chain = DAG.
getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
1985 Mask.push_back(NumElems);
1986 for (
unsigned i = 1; i != NumElems; ++i)
2004 const auto *CB = CLI.
CB;
2007 bool Is64Bit = Subtarget.is64Bit();
2009 bool IsSibcall =
false;
2012 bool IsCalleePopSRet = !IsGuaranteeTCO &&
hasCalleePopSRet(Outs, Subtarget);
2014 bool HasNCSR = (CB && isa<CallInst>(CB) &&
2015 CB->hasFnAttr(
"no_caller_saved_registers"));
2016 bool HasNoCfCheck = (CB && CB->doesNoCfCheck());
2017 bool IsIndirectCall = (CB && isa<CallInst>(CB) && CB->isIndirectCall());
2018 bool IsCFICall = IsIndirectCall && CLI.
CFIType;
2020 Metadata *IsCFProtectionSupported =
M->getModuleFlag(
"cf-protection-branch");
2032 CCInfo.AllocateStack(32,
Align(8));
2034 CCInfo.AnalyzeArguments(Outs,
CC_X86);
2039 CCInfo.AnalyzeArgumentsSecondPass(Outs,
CC_X86);
2043 if (Subtarget.
isPICStyleGOT() && !IsGuaranteeTCO && !IsMustTail) {
2050 if (!
G || (!
G->getGlobal()->hasLocalLinkage() &&
2051 G->getGlobal()->hasDefaultVisibility()))
2055 if (isTailCall && !IsMustTail) {
2057 isTailCall = IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs,
2062 if (!IsGuaranteeTCO && isTailCall)
2069 if (IsMustTail && !isTailCall)
2071 "site marked musttail");
2074 "Var args not supported with calling convention fastcc, ghc or hipe");
2077 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
2083 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2092 FPDiff = NumBytesCallerPushed - NumBytes;
2096 if (FPDiff < X86Info->getTCReturnAddrDelta())
2100 unsigned NumBytesToPush = NumBytes;
2101 unsigned NumBytesToPop = NumBytes;
2106 if (!Outs.
empty() && Outs.
back().Flags.isInAlloca()) {
2108 if (!ArgLocs.
back().isMemLoc())
2111 if (ArgLocs.
back().getLocMemOffset() != 0)
2113 "the only memory argument");
2116 "cannot use preallocated attribute on a register "
2119 for (
size_t i = 0; i < CLI.
OutVals.size(); ++i) {
2121 PreallocatedOffsets.
push_back(ArgLocs[i].getLocMemOffset());
2125 size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.
CB);
2126 MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);
2127 MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);
2131 if (!IsSibcall && !IsMustTail)
2133 NumBytes - NumBytesToPush, dl);
2137 if (isTailCall && FPDiff)
2138 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2139 Is64Bit, FPDiff, dl);
2148 "Argument Location list must be sorted before lowering");
2153 for (
unsigned I = 0, OutIndex = 0, E = ArgLocs.
size();
I != E;
2155 assert(OutIndex < Outs.
size() &&
"Invalid Out index");
2158 if (
Flags.isInAlloca() ||
Flags.isPreallocated())
2163 SDValue Arg = OutVals[OutIndex];
2164 bool isByVal =
Flags.isByVal();
2184 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.
getUNDEF(MVT::v2i64), Arg);
2197 Flags.getByValSize(),
2198 std::max(
Align(16),
Flags.getNonZeroByValAlign()),
false);
2209 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2211 Chain, dl, Arg, SpillSlot,
2221 "Currently the only custom case is when we split v64i1 to 2 regs");
2229 if (isVarArg && IsWin64) {
2234 case X86::XMM0: ShadowReg = X86::RCX;
break;
2235 case X86::XMM1: ShadowReg = X86::RDX;
break;
2236 case X86::XMM2: ShadowReg = X86::R8;
break;
2237 case X86::XMM3: ShadowReg = X86::R9;
break;
2240 RegsToPass.
push_back(std::make_pair(ShadowReg, Arg));
2242 }
else if (!IsSibcall && (!isTailCall || isByVal)) {
2247 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2248 dl, DAG, VA, Flags, isByVal));
2252 if (!MemOpChains.
empty())
2277 if (
G && !
G->getGlobal()->hasLocalLinkage() &&
2278 G->getGlobal()->hasDefaultVisibility())
2279 Callee = LowerGlobalAddress(Callee, DAG);
2280 else if (isa<ExternalSymbolSDNode>(Callee))
2281 Callee = LowerExternalSymbol(Callee, DAG);
2285 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail &&
2286 (Subtarget.
hasSSE1() || !
M->getModuleFlag(
"SkipRaxSetup"))) {
2297 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2298 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2300 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
2302 &&
"SSE registers cannot be used when SSE is disabled");
2308 if (isVarArg && IsMustTail) {
2310 for (
const auto &
F : Forwards) {
2312 RegsToPass.
push_back(std::make_pair(
F.PReg, Val));
2319 if (!IsSibcall && isTailCall) {
2331 for (
unsigned I = 0, OutsIndex = 0, E = ArgLocs.
size();
I != E;
2338 "Expecting custom case only in regcall calling convention");
2348 SDValue Arg = OutVals[OutsIndex];
2351 if (
Flags.isInAlloca() ||
Flags.isPreallocated())
2359 if (
Flags.isByVal()) {
2374 ArgChain, dl, Arg, FIN,
2379 if (!MemOpChains2.
empty())
2385 RegInfo->getSlotSize(), FPDiff, dl);
2391 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
2392 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
2393 RegsToPass[i].second, InGlue);
2398 assert(Is64Bit &&
"Large code model is only legal in 64-bit mode.");
2409 Callee = LowerGlobalOrExternal(Callee, DAG,
true);
2411 Callee.getValueType() == MVT::i32) {
2420 if (!IsSibcall && isTailCall && !IsMustTail) {
2434 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i)
2436 RegsToPass[i].second.getValueType()));
2440 auto AdaptedCC = CallConv;
2448 if (CB && CB->hasFnAttr(
"no_callee_saved_registers"))
2450 return RegInfo->getCallPreservedMask(MF, AdaptedCC);
2452 assert(Mask &&
"Missing call preserved mask for calling convention");
2456 if (CLI.
CB && isa<InvokeInst>(CLI.
CB))
2461 if (CLI.
CB && isa<InvokeInst>(CLI.
CB))
2470 if (!Is64Bit && CLI.
CB && isa<InvokeInst>(CLI.
CB)) {
2494 memcpy(RegMask, Mask,
sizeof(RegMask[0]) * RegMaskSize);
2498 if (ShouldDisableArgRegs) {
2499 for (
auto const &RegPair : RegsToPass)
2532 if (HasNoCfCheck && IsCFProtectionSupported && IsIndirectCall) {
2539 "tail calls cannot be marked with clang.arc.attachedcall");
2540 assert(Is64Bit &&
"clang.arc.attachedcall is only supported in 64bit mode");
2566 unsigned NumBytesForCalleeToPop = 0;
2569 NumBytesForCalleeToPop = NumBytes;
2573 NumBytesForCalleeToPop = 4;
2577 Chain = DAG.
getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
2583 for (
unsigned I = 0, E = Outs.
size();
I != E; ++
I) {
2584 if (Outs[
I].
Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftAsync() ||
2585 Outs[
I].Flags.isSwiftError()) {
2587 "Swift attributes can't be used with preserve_none");
2594 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
2632X86TargetLowering::GetAlignedArgumentStackSize(
const unsigned StackSize,
2636 assert(StackSize % SlotSize == 0 &&
2637 "StackSize must be a multiple of SlotSize");
2638 return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
2660 cast<VTSDNode>(TruncInput.
getOperand(1))->getVT() ==
2677 if (!Flags.isByVal()) {
2681 unsigned Opcode = Def->getOpcode();
2682 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
2683 Opcode == X86::LEA64_32r) &&
2684 Def->getOperand(1).isFI()) {
2685 FI = Def->getOperand(1).getIndex();
2686 Bytes = Flags.getByValSize();
2690 }
else if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2691 if (Flags.isByVal())
2706 Bytes = Flags.getByValSize();
2742bool X86TargetLowering::IsEligibleForTailCallOptimization(
2767 bool CCMatch = CallerCC == CalleeCC;
2776 if (IsCalleeWin64 != IsCallerWin64)
2779 if (IsGuaranteeTCO) {
2791 if (
RegInfo->hasStackRealignment(MF))
2802 }
else if (IsCalleePopSRet)
2810 if (isVarArg && !Outs.
empty()) {
2813 if (IsCalleeWin64 || IsCallerWin64)
2816 for (
const auto &VA : ArgLocs)
2825 for (
const auto &In : Ins) {
2833 CCState RVCCInfo(CalleeCC,
false, MF, RVLocs,
C);
2834 RVCCInfo.AnalyzeCallResult(Ins,
RetCC_X86);
2835 for (
const auto &VA : RVLocs) {
2847 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
2849 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
2850 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2858 if (!Outs.
empty()) {
2859 if (StackArgsSize > 0) {
2865 for (
unsigned I = 0, E = ArgLocs.size();
I != E; ++
I) {
2885 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
2886 !isa<ExternalSymbolSDNode>(Callee)) ||
2887 PositionIndependent)) {
2888 unsigned NumInRegs = 0;
2891 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
2893 for (
const auto &VA : ArgLocs) {
2899 case X86::EAX:
case X86::EDX:
case X86::ECX:
2900 if (++NumInRegs == MaxInRegs)
2912 bool CalleeWillPop =
2916 if (
unsigned BytesToPop =
2919 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
2920 if (!CalleePopMatches)
2922 }
else if (CalleeWillPop && StackArgsSize > 0) {
2933 bool is64Bit,
bool IsVarArg,
bool GuaranteeTCO) {
2939 switch (CallingConv) {
unsigned const MachineRegisterInfo * MRI
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const TargetInstrInfo *TII)
MatchingStackOffset - Return true if the given stack call argument is already available in the same p...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static Function * getFunction(Constant *C)
const HexagonInstrInfo * TII
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
const MCPhysReg ArgGPRs[]
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt)
Return true if the function is being made into a tailcall target by changing its ABI.
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
This file defines ARC utility functions which are used by various parts of the compiler.
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static bool is64Bit(const char *name)
static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)
Lowers masks values (v*i1) to the local register values.
static void Passv64i1ArgInRegs(const SDLoc &DL, SelectionDAG &DAG, SDValue &Arg, SmallVectorImpl< std::pair< Register, SDValue > > &RegsToPass, CCValAssign &VA, CCValAssign &NextVA, const X86Subtarget &Subtarget)
Breaks v64i1 value into two registers and adds the new node to the DAG.
static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA, SDValue &Root, SelectionDAG &DAG, const SDLoc &DL, const X86Subtarget &Subtarget, SDValue *InGlue=nullptr)
Reads two 32 bit registers and creates a 64 bit mask value.
static ArrayRef< MCPhysReg > get64BitArgumentXMMs(MachineFunction &MF, CallingConv::ID CallConv, const X86Subtarget &Subtarget)
static bool isSortedByValueNo(ArrayRef< CCValAssign > ArgLocs)
static ArrayRef< MCPhysReg > get64BitArgumentGPRs(CallingConv::ID CallConv, const X86Subtarget &Subtarget)
static std::pair< MVT, unsigned > handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC, const X86Subtarget &Subtarget)
static bool shouldDisableRetRegFromCSR(CallingConv::ID CC)
Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...
static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl, const char *Msg)
Call this when the user attempts to do something unsupported, like returning a double without SSE2 en...
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &dl)
Emit a store of the return address if tail call optimization is performed and it is required (FPDiff!...
static bool hasCalleePopSRet(const SmallVectorImpl< T > &Args, const X86Subtarget &Subtarget)
Determines whether Args, either a set of outgoing arguments to a call, or a set of incoming args of a...
static bool shouldDisableArgRegFromCSR(CallingConv::ID CC)
Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...
static bool hasStackGuardSlotTLS(const Triple &TargetTriple)
static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)
The function will lower a register of various sizes (8/16/32/64) to a mask value of the expected size...
static Constant * SegmentOffset(IRBuilderBase &IRB, int Offset, unsigned AddressSpace)
static bool isBitAligned(Align Alignment, uint64_t SizeInBits)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
const Function * getParent() const
Return the enclosing method, or null if none.
CCState - This class holds information needed while lowering arguments and return values.
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
void convertToReg(unsigned RegNo)
int64_t getLocMemOffset() const
CallingConv::ID getCallingConv() const
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Diagnostic information for unsupported feature in backend.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Type * getReturnType() const
Returns the type of the ret val.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
@ ExternalLinkage
Externally visible function.
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
Common base class shared among various IRBuilders.
BasicBlock * GetInsertBlock() const
LLVMContext & getContext() const
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
This is an important class for using LLVM in a threaded context.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool is512BitVector() const
Return true if this is a 512-bit vector type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
MVT getVectorElementType() const
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setObjectZExt(int ObjectIdx, bool IsZExt)
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setObjectSExt(int ObjectIdx, bool IsSExt)
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
void setHasTailCall(bool V=true)
bool isObjectZExt(int ObjectIdx) const
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isObjectSExt(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getObjectIndexBegin() const
Return the minimum frame object index.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Representation of each machine instruction.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_LabelDifference64
EK_LabelDifference64 - Each entry is the address of the block minus the address of the jump table.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOStore
The memory access writes data.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void disableCalleeSavedRegister(MCRegister Reg)
Disables the register from the list of CSRs.
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
void setCFIType(uint32_t Type)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
void addHeapAllocSite(const SDNode *Node, MDNode *MD)
Set HeapAllocSite to be associated with Node.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)
Set CallSiteInfo to be associated with Node.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent struct types.
Information about stack frame layout on the target.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const
Returns the target-specific address of the unsafe stack pointer.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool isPositionIndependent() const
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Triple - Helper class for working with autoconf configuration names.
bool isAndroidVersionLT(unsigned Major) const
bool isAndroid() const
Tests whether the target is Android.
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
bool isOSGlibc() const
Tests whether the OS uses glibc.
bool isWindowsMSVCEnvironment() const
Checks if the environment could be MSVC.
bool isWindowsItaniumEnvironment() const
The instances of the Type class are immutable: once they are created, they are never changed.
bool isX86_FP80Ty() const
Return true if this is x86 long double.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
LLVM Value Representation.
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
void setBytesToPopOnReturn(unsigned bytes)
void setBPClobberedByCall(bool C)
void setFPClobberedByCall(bool C)
unsigned getVarArgsGPOffset() const
int getRegSaveFrameIndex() const
void setHasSwiftAsyncContext(bool v)
Register getSRetReturnReg() const
void setVarArgsGPOffset(unsigned Offset)
void setRegSaveFrameIndex(int Idx)
void setForceFramePointer(bool forceFP)
void setSRetReturnReg(Register Reg)
unsigned getVarArgsFPOffset() const
void setArgumentStackSize(unsigned size)
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setTCReturnAddrDelta(int delta)
void setVarArgsFrameIndex(int Idx)
void setBPClobberedByInvoke(bool C)
void setFPClobberedByInvoke(bool C)
unsigned getBytesToPopOnReturn() const
void setVarArgsFPOffset(unsigned Offset)
unsigned getSlotSize() const
bool useLight256BitInstructions() const
bool isPICStyleGOT() const
bool isTargetWindowsMSVC() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
bool isTargetDarwin() const
const Triple & getTargetTriple() const
const X86InstrInfo * getInstrInfo() const override
bool useAVX512Regs() const
bool isTargetCOFF() const
bool isCallingConvWin64(CallingConv::ID CC) const
bool isTargetFuchsia() const
bool isPICStyleRIPRel() const
bool isTargetCygMing() const
const X86RegisterInfo * getRegisterInfo() const override
unsigned getPreferVectorWidth() const
bool isTargetAndroid() const
const X86FrameLowering * getFrameLowering() const override
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isMemoryAccessFast(EVT VT, Align Alignment) const
bool useSoftFloat() const override
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool isSafeMemOpType(MVT VT) const override
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
Value * getIRStackGuard(IRBuilderBase &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Returns true if the target allows unaligned memory accesses of the specified type.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
Return the desired alignment for ByVal aggregate function arguments in the caller parameter area.
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const override
Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const override
Return true if the target stores SafeStack pointer at a fixed offset in some non-standard address spa...
bool isScalarFPTypeInSSEReg(EVT VT) const
Return true if the specified scalar FP type is computed in an SSE register, not on the X87 floating p...
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
This function returns true if the memory access is aligned or if the target allows this specific unal...
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the value type to use for ISD::SETCC.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const override
For types supported by the target, this is an identity function.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ X86_64_SysV
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
@ Swift
Calling convention for Swift.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ X86_INTR
x86 hardware interrupt context.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ X86_ThisCall
Similar to X86_StdCall.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ X86_StdCall
stdcall is mostly used by the Win32 API.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ X86_RegCall
Register calling convention used for parameters transfer optimization.
@ C
The default llvm calling convention, compatible with C.
@ X86_FastCall
'fast' analog of X86_StdCall.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Reg
All possible values of the reg field in the ModR/M byte.
@ RET_GLUE
Return with a glue operand.
@ IRET
Return from interrupt. Operand 0 is the number of bytes to pop.
@ CALL
These operations represent an abstract X86 call instruction, which includes a bunch of information.
@ GlobalBaseReg
On Darwin, this node represents the result of the popl at function entry, used for PIC code.
@ TC_RETURN
Tail call return.
@ NT_CALL
Same as call except it adds the NoTrack prefix.
@ MOVDQ2Q
Copies a 64-bit value from the low word of an XMM vector to an MMX vector.
bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget, const MachineFunction &MF)
True if the target supports the extended frame for async Swift functions.
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
bool RetCC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool CC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
bool is512BitVector() const
Return true if this is a 512-bit vector type.
bool isVector() const
Return true if this is a vector value type.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Describes a register that needs to be forwarded from the prologue to a musttail call.
SmallVector< ArgRegPair, 1 > ArgRegPairs
Vector of call argument and its forwarding register.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
const ConstantInt * CFIType
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals