30#define DEBUG_TYPE "x86-isel"
68static std::pair<MVT, unsigned>
74 return {MVT::v2i64, 1};
76 return {MVT::v4i32, 1};
79 return {MVT::v8i16, 1};
82 return {MVT::v16i8, 1};
86 return {MVT::v32i8, 1};
90 return {MVT::v64i8, 1};
91 return {MVT::v32i8, 2};
95 if (!
isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
97 return {MVT::i8, NumElts};
110 unsigned NumRegisters;
111 std::tie(RegisterVT, NumRegisters) =
122 if ((VT == MVT::f64 || VT == MVT::f80) && !Subtarget.is64Bit() &&
141 unsigned NumRegisters;
142 std::tie(RegisterVT, NumRegisters) =
154 if (!Subtarget.is64Bit() && !Subtarget.hasX87()) {
170 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
177 RegisterVT = MVT::i8;
178 IntermediateVT = MVT::i1;
180 return NumIntermediates;
184 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.
useAVX512Regs() &&
186 RegisterVT = MVT::v32i8;
187 IntermediateVT = MVT::v32i1;
188 NumIntermediates = 2;
197 NumIntermediates, RegisterVT);
234 if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
235 if (VTy->getPrimitiveSizeInBits().getFixedValue() == 128)
236 MaxAlign =
Align(16);
237 }
else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
240 if (EltAlign > MaxAlign)
242 }
else if (
StructType *STy = dyn_cast<StructType>(Ty)) {
243 for (
auto *EltTy : STy->elements()) {
246 if (EltAlign > MaxAlign)
260 if (Subtarget.is64Bit()) {
262 Align TyAlign =
DL.getABITypeAlign(Ty);
264 return TyAlign.
value();
271 return Alignment.
value();
280 if (!FuncAttributes.
hasFnAttr(Attribute::NoImplicitFloat)) {
281 if (
Op.size() >= 16 &&
282 (!Subtarget.isUnalignedMem16Slow() ||
Op.isAligned(
Align(16)))) {
284 if (
Op.size() >= 64 && Subtarget.
hasAVX512() && Subtarget.hasEVEX512() &&
286 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
289 if (
Op.size() >= 32 && Subtarget.
hasAVX() &&
302 if (Subtarget.
hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
305 }
else if (((
Op.isMemcpy() && !
Op.isMemcpyStrSrc()) ||
Op.isZeroMemset()) &&
306 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.
hasSSE2()) {
319 if (Subtarget.is64Bit() &&
Op.size() >= 8)
333 return (8 * Alignment.
value()) % SizeInBits == 0;
344 return !Subtarget.isUnalignedMem16Slow();
346 return !Subtarget.isUnalignedMem32Slow();
353 unsigned *
Fast)
const {
363 return (Alignment < 16 || !Subtarget.
hasSSE41());
372 unsigned AddrSpace,
Align Alignment,
374 unsigned *
Fast)
const {
398 if (Subtarget.
hasAVX512() && Subtarget.hasEVEX512())
424bool X86TargetLowering::splitValueIntoRegisterParts(
426 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
427 bool IsABIRegCopy =
CC.has_value();
429 if (IsABIRegCopy && ValueVT == MVT::bf16 && PartVT == MVT::f32) {
441SDValue X86TargetLowering::joinRegisterPartsIntoValue(
443 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
444 bool IsABIRegCopy =
CC.has_value();
445 if (IsABIRegCopy && ValueVT == MVT::bf16 && PartVT == MVT::f32) {
459 return Subtarget.useSoftFloat();
466 if (Subtarget.is64Bit())
470 unsigned ParamRegs = 0;
472 ParamRegs = M->getNumberRegisterParameters();
475 for (
auto &Arg : Args) {
477 if (
T->isIntOrPtrTy())
479 unsigned numRegs = 1;
482 if (ParamRegs < numRegs)
484 ParamRegs -= numRegs;
504 if (!Subtarget.is64Bit())
519 (Subtarget.is64Bit() &&
527std::pair<const TargetRegisterClass *, uint8_t>
535 case MVT::i8:
case MVT::i16:
case MVT::i32:
case MVT::i64:
536 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
539 RRC = &X86::VR64RegClass;
541 case MVT::f32:
case MVT::f64:
542 case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
543 case MVT::v4f32:
case MVT::v2f64:
544 case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
case MVT::v4i64:
545 case MVT::v8f32:
case MVT::v4f64:
546 case MVT::v64i8:
case MVT::v32i16:
case MVT::v16i32:
case MVT::v8i64:
547 case MVT::v16f32:
case MVT::v8f64:
548 RRC = &X86::VR128XRegClass;
551 return std::make_pair(RRC,
Cost);
554unsigned X86TargetLowering::getAddressSpace()
const {
555 if (Subtarget.is64Bit())
585 int Offset = M->getStackProtectorGuardOffset();
590 Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
592 StringRef GuardReg = M->getStackProtectorGuardReg();
593 if (GuardReg ==
"fs")
595 else if (GuardReg ==
"gs")
599 StringRef GuardSymb = M->getStackProtectorGuardSymbol();
600 if (!GuardSymb.
empty()) {
606 nullptr, GuardSymb,
nullptr,
624 M.getOrInsertGlobal(
"__security_cookie",
633 F->addParamAttr(0, Attribute::AttrKind::InReg);
638 StringRef GuardMode = M.getStackProtectorGuard();
641 if ((GuardMode ==
"tls" || GuardMode.
empty()) &&
651 return M.getGlobalVariable(
"__security_cookie");
660 return M.getFunction(
"__security_check_cookie");
673 int Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
690bool X86TargetLowering::CanLowerReturn(
694 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
695 return CCInfo.CheckReturn(Outs,
RetCC_X86);
699 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
706 static const MCPhysReg RCRegs[] = {X86::MXCSR};
716 if (ValVT == MVT::v1i1)
720 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
721 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
725 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
727 if (ValLoc == MVT::i32)
732 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
733 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
747 assert(Subtarget.hasBWI() &&
"Expected AVX512BW target!");
748 assert(Subtarget.is32Bit() &&
"Expecting 32 bit target");
751 "The value should reside in two registers");
761 RegsToPass.push_back(std::make_pair(VA.
getLocReg(),
Lo));
762 RegsToPass.push_back(std::make_pair(NextVA.
getLocReg(),
Hi));
777 bool ShouldDisableCalleeSavedRegister =
789 for (
unsigned I = 0, OutsIndex = 0,
E = RVLocs.
size();
I !=
E;
795 if (ShouldDisableCalleeSavedRegister)
798 SDValue ValToCopy = OutVals[OutsIndex];
816 "Unexpected FP-extend for return value.");
823 }
else if (!Subtarget.
hasSSE2() &&
824 X86::FR64XRegClass.contains(VA.
getLocReg()) &&
847 if (Subtarget.is64Bit()) {
848 if (ValVT == MVT::x86mmx) {
850 ValToCopy = DAG.
getBitcast(MVT::i64, ValToCopy);
856 ValToCopy = DAG.
getBitcast(MVT::v4f32, ValToCopy);
863 "Currently the only custom case is when we split v64i1 to 2 regs");
869 if (ShouldDisableCalleeSavedRegister)
884 for (
auto &RetVal : RetVals) {
885 if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {
890 Chain = DAG.
getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Glue);
893 DAG.
getRegister(RetVal.first, RetVal.second.getValueType()));
934 Chain = DAG.
getCopyToReg(Chain, dl, RetValReg, Val, Glue);
944 if (ShouldDisableCalleeSavedRegister &&
971 return DAG.
getNode(opcode, dl, MVT::Other, RetOps);
974bool X86TargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
975 if (
N->getNumValues() != 1 || !
N->hasNUsesOfValue(1, 0))
983 if (
Copy->getOperand(
Copy->getNumOperands()-1).getValueType() == MVT::Glue)
985 TCChain =
Copy->getOperand(0);
995 if (
U->getNumOperands() > 4)
997 if (
U->getNumOperands() == 4 &&
998 U->getOperand(
U->getNumOperands() - 1).getValueType() != MVT::Glue)
1012 MVT ReturnMVT = MVT::i32;
1015 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
1021 ReturnMVT = MVT::i8;
1025 return VT.
bitsLT(MinVT) ? MinVT : VT;
1041 assert((Subtarget.hasBWI()) &&
"Expected AVX512BW target!");
1042 assert(Subtarget.is32Bit() &&
"Expecting 32 bit target");
1044 "Expecting first location of 64 bit width type");
1046 "The locations should have the same type");
1048 "The values should reside in two registers");
1051 SDValue ArgValueLo, ArgValueHi;
1057 if (
nullptr == InGlue) {
1093 if (ValVT == MVT::v1i1)
1096 if (ValVT == MVT::v64i1) {
1098 assert(ValLoc == MVT::i64 &&
"Expecting only i64 locations");
1104 MaskLenVT = MVT::i8;
1107 MaskLenVT = MVT::i16;
1110 MaskLenVT = MVT::i32;
1124SDValue X86TargetLowering::LowerCallResult(
1135 CCInfo.AnalyzeCallResult(Ins,
RetCC_X86);
1138 for (
unsigned I = 0, InsIndex = 0,
E = RVLocs.
size();
I !=
E;
1158 }
else if (!Subtarget.
hasSSE2() &&
1159 X86::FR64XRegClass.contains(VA.
getLocReg()) &&
1160 CopyVT == MVT::f64) {
1170 bool RoundAfterCopy =
false;
1173 if (!Subtarget.hasX87())
1176 RoundAfterCopy = (CopyVT != VA.
getLocVT());
1182 "Currently the only custom case is when we split v64i1 to 2 regs");
1230template <
typename T>
1234 static_assert(std::is_same_v<T, ISD::OutputArg> ||
1235 std::is_same_v<T, ISD::InputArg>,
1236 "requires ISD::OutputArg or ISD::InputArg");
1240 if (!Subtarget.is32Bit())
1248 if (!Flags.isSRet() || Flags.isInReg())
1272 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
1311bool X86TargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
1332 bool isImmutable = !AlwaysUseMutable && !
Flags.isByVal();
1339 bool ExtendedInMem =
1352 if (
Flags.isByVal()) {
1353 unsigned Bytes =
Flags.getByValSize();
1354 if (Bytes == 0) Bytes = 1;
1363 EVT ArgVT =
Ins[i].ArgVT;
1374 if (
Flags.isCopyElisionCandidate() &&
1376 !ScalarizedVector) {
1378 if (Ins[i].PartOffset == 0) {
1387 ValVT, dl, Chain, PartAddr,
1401 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
1430 ValVT, dl, Chain, FIN,
1433 return ExtendedInMem
1443 assert(Subtarget.is64Bit());
1446 static const MCPhysReg GPR64ArgRegsWin64[] = {
1447 X86::RCX, X86::RDX, X86::R8, X86::R9
1449 return ArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
1452 static const MCPhysReg GPR64ArgRegs64Bit[] = {
1453 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
1455 return ArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
1462 assert(Subtarget.is64Bit());
1468 return std::nullopt;
1471 bool isSoftFloat = Subtarget.useSoftFloat();
1472 if (isSoftFloat || !Subtarget.
hasSSE1())
1475 return std::nullopt;
1477 static const MCPhysReg XMMArgRegs64Bit[] = {
1478 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1479 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1481 return ArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
1488 return A.getValNo() <
B.getValNo();
1495class VarArgsLoweringHelper {
1500 : FuncInfo(FuncInfo),
DL(Loc), DAG(DAG), Subtarget(Subtarget),
1501 TheMachineFunction(DAG.getMachineFunction()),
1503 FrameInfo(TheMachineFunction.getFrameInfo()),
1504 FrameLowering(*Subtarget.getFrameLowering()),
1505 TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),
1509 void lowerVarArgsParameters(
SDValue &Chain,
unsigned StackSize);
1512 void createVarArgAreaAndStoreRegisters(
SDValue &Chain,
unsigned StackSize);
1514 void forwardMustTailParameters(
SDValue &Chain);
1516 bool is64Bit()
const {
return Subtarget.is64Bit(); }
1517 bool isWin64()
const {
return Subtarget.isCallingConvWin64(CallConv); }
1533void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
1534 SDValue &Chain,
unsigned StackSize) {
1541 FrameInfo.CreateFixedObject(1, StackSize,
true));
1551 unsigned NumIntRegs = CCInfo.getFirstUnallocated(
ArgGPRs);
1552 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
1554 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
1555 "SSE register cannot be used when SSE is disabled!");
1560 int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;
1562 FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset,
false));
1584 Register GPR = TheMachineFunction.addLiveIn(Reg, &X86::GR64RegClass);
1587 const auto &AvailableXmms = ArgXMMs.
slice(NumXMMRegs);
1588 if (!AvailableXmms.empty()) {
1589 Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
1596 TheMachineFunction.getRegInfo().addLiveIn(Reg);
1607 for (
SDValue Val : LiveGPRs) {
1621 if (!LiveXMMRegs.
empty()) {
1637 SaveXMMOps, MVT::i8, StoreMMO));
1640 if (!MemOps.
empty())
1645void VarArgsLoweringHelper::forwardMustTailParameters(
SDValue &Chain) {
1647 MVT VecVT = MVT::Other;
1649 if (Subtarget.useAVX512Regs() &&
1652 VecVT = MVT::v16f32;
1653 else if (Subtarget.hasAVX())
1655 else if (Subtarget.hasSSE2())
1662 if (VecVT != MVT::Other)
1668 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes,
CC_X86);
1671 if (
is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {
1672 Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
1680 FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(
1681 TargLowering.getRegClassFor(FR.VT));
1686void VarArgsLoweringHelper::lowerVarArgsParameters(
SDValue &Chain,
1687 unsigned StackSize) {
1693 if (FrameInfo.hasVAStart())
1694 createVarArgAreaAndStoreRegisters(Chain, StackSize);
1696 if (FrameInfo.hasMustTailInVarArgFunc())
1697 forwardMustTailParameters(Chain);
1700SDValue X86TargetLowering::LowerFormalArguments(
1709 F.getName() ==
"main")
1713 bool Is64Bit = Subtarget.is64Bit();
1718 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
1726 CCInfo.AllocateStack(32,
Align(8));
1728 CCInfo.AnalyzeArguments(Ins,
CC_X86);
1733 CCInfo.AnalyzeArgumentsSecondPass(Ins,
CC_X86);
1739 "Argument Location list must be sorted before lowering");
1742 for (
unsigned I = 0, InsIndex = 0,
E = ArgLocs.
size();
I !=
E;
1744 assert(InsIndex <
Ins.size() &&
"Invalid Ins index");
1752 "Currently the only custom case is when we split v64i1 to 2 regs");
1760 if (RegVT == MVT::i8)
1761 RC = &X86::GR8RegClass;
1762 else if (RegVT == MVT::i16)
1763 RC = &X86::GR16RegClass;
1764 else if (RegVT == MVT::i32)
1765 RC = &X86::GR32RegClass;
1766 else if (Is64Bit && RegVT == MVT::i64)
1767 RC = &X86::GR64RegClass;
1768 else if (RegVT == MVT::f16)
1769 RC = Subtarget.
hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
1770 else if (RegVT == MVT::f32)
1771 RC = Subtarget.
hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
1772 else if (RegVT == MVT::f64)
1773 RC = Subtarget.
hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
1774 else if (RegVT == MVT::f80)
1775 RC = &X86::RFP80RegClass;
1776 else if (RegVT == MVT::f128)
1777 RC = &X86::VR128RegClass;
1779 RC = &X86::VR512RegClass;
1781 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
1783 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
1784 else if (RegVT == MVT::x86mmx)
1785 RC = &X86::VR64RegClass;
1786 else if (RegVT == MVT::v1i1)
1787 RC = &X86::VK1RegClass;
1788 else if (RegVT == MVT::v8i1)
1789 RC = &X86::VK8RegClass;
1790 else if (RegVT == MVT::v16i1)
1791 RC = &X86::VK16RegClass;
1792 else if (RegVT == MVT::v32i1)
1793 RC = &X86::VK32RegClass;
1794 else if (RegVT == MVT::v64i1)
1795 RC = &X86::VK64RegClass;
1831 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
1836 !(Ins[
I].Flags.isByVal() && VA.
isRegLoc())) {
1844 for (
unsigned I = 0,
E =
Ins.size();
I !=
E; ++
I) {
1845 if (Ins[
I].
Flags.isSwiftAsync()) {
1847 if (Subtarget.is64Bit())
1851 X86FI->setSwiftAsyncContextFrameIdx(FI);
1868 if (Ins[
I].
Flags.isSRet()) {
1870 "SRet return has already been set");
1881 unsigned StackSize = CCInfo.getStackSize();
1885 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
1888 VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)
1889 .lowerVarArgsParameters(Chain, StackSize);
1926 EHInfo->PSPSymFrameIdx = PSPSymFI;
1931 F.hasFnAttribute(
"no_caller_saved_registers")) {
1933 for (std::pair<Register, Register> Pair :
MRI.liveins())
1934 MRI.disableCalleeSavedRegister(Pair.first);
1945 bool isByVal)
const {
1958 Chain, dl, Arg, PtrOff,
1965SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
1967 bool Is64Bit,
int FPDiff,
const SDLoc &dl)
const {
1981 EVT PtrVT,
unsigned SlotSize,
1982 int FPDiff,
const SDLoc &dl) {
1984 if (!FPDiff)
return Chain;
1986 int NewReturnAddrFI =
1990 Chain = DAG.
getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2002 Mask.push_back(NumElems);
2003 for (
unsigned i = 1; i != NumElems; ++i)
2021 const auto *CB = CLI.
CB;
2024 bool Is64Bit = Subtarget.is64Bit();
2026 bool IsSibcall =
false;
2029 bool IsCalleePopSRet = !IsGuaranteeTCO &&
hasCalleePopSRet(Outs, Subtarget);
2031 bool HasNCSR = (CB && isa<CallInst>(CB) &&
2032 CB->hasFnAttr(
"no_caller_saved_registers"));
2033 bool HasNoCfCheck = (CB && CB->doesNoCfCheck());
2034 bool IsIndirectCall = (CB && isa<CallInst>(CB) && CB->isIndirectCall());
2035 bool IsCFICall = IsIndirectCall && CLI.
CFIType;
2037 Metadata *IsCFProtectionSupported =
M->getModuleFlag(
"cf-protection-branch");
2044 if (Subtarget.
isPICStyleGOT() && !IsGuaranteeTCO && !IsMustTail) {
2051 if (!
G || (!
G->getGlobal()->hasLocalLinkage() &&
2052 G->getGlobal()->hasDefaultVisibility()))
2056 if (isTailCall && !IsMustTail) {
2058 isTailCall = IsEligibleForTailCallOptimization(
2059 Callee, CallConv, IsCalleePopSRet, isVarArg, CLI.
RetTy, Outs, OutVals,
2064 if (!IsGuaranteeTCO && isTailCall)
2071 if (IsMustTail && !isTailCall)
2073 "site marked musttail");
2076 "Var args not supported with calling convention fastcc, ghc or hipe");
2084 CCInfo.AllocateStack(32,
Align(8));
2086 CCInfo.AnalyzeArguments(Outs,
CC_X86);
2091 CCInfo.AnalyzeArgumentsSecondPass(Outs,
CC_X86);
2095 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
2101 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2110 FPDiff = NumBytesCallerPushed - NumBytes;
2114 if (FPDiff < X86Info->getTCReturnAddrDelta())
2118 unsigned NumBytesToPush = NumBytes;
2119 unsigned NumBytesToPop = NumBytes;
2124 if (!Outs.
empty() && Outs.
back().Flags.isInAlloca()) {
2126 if (!ArgLocs.
back().isMemLoc())
2129 if (ArgLocs.
back().getLocMemOffset() != 0)
2131 "the only memory argument");
2134 "cannot use preallocated attribute on a register "
2137 for (
size_t i = 0; i < CLI.
OutVals.size(); ++i) {
2139 PreallocatedOffsets.
push_back(ArgLocs[i].getLocMemOffset());
2143 size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.
CB);
2144 MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);
2145 MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);
2149 if (!IsSibcall && !IsMustTail)
2151 NumBytes - NumBytesToPush, dl);
2155 if (isTailCall && FPDiff)
2156 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2157 Is64Bit, FPDiff, dl);
2166 "Argument Location list must be sorted before lowering");
2171 for (
unsigned I = 0, OutIndex = 0,
E = ArgLocs.
size();
I !=
E;
2173 assert(OutIndex < Outs.
size() &&
"Invalid Out index");
2176 if (
Flags.isInAlloca() ||
Flags.isPreallocated())
2181 SDValue Arg = OutVals[OutIndex];
2182 bool isByVal =
Flags.isByVal();
2202 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.
getUNDEF(MVT::v2i64), Arg);
2215 Flags.getByValSize(),
2216 std::max(
Align(16),
Flags.getNonZeroByValAlign()),
false);
2227 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2229 Chain, dl, Arg, SpillSlot,
2239 "Currently the only custom case is when we split v64i1 to 2 regs");
2247 if (isVarArg && IsWin64) {
2252 case X86::XMM0: ShadowReg = X86::RCX;
break;
2253 case X86::XMM1: ShadowReg = X86::RDX;
break;
2254 case X86::XMM2: ShadowReg = X86::R8;
break;
2255 case X86::XMM3: ShadowReg = X86::R9;
break;
2258 RegsToPass.
push_back(std::make_pair(ShadowReg, Arg));
2260 }
else if (!IsSibcall && (!isTailCall || isByVal)) {
2265 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2266 dl, DAG, VA, Flags, isByVal));
2270 if (!MemOpChains.
empty())
2295 if (
G && !
G->getGlobal()->hasLocalLinkage() &&
2296 G->getGlobal()->hasDefaultVisibility())
2297 Callee = LowerGlobalAddress(Callee, DAG);
2298 else if (isa<ExternalSymbolSDNode>(Callee))
2299 Callee = LowerExternalSymbol(Callee, DAG);
2303 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail &&
2304 (Subtarget.
hasSSE1() || !
M->getModuleFlag(
"SkipRaxSetup"))) {
2315 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2316 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2318 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
2320 &&
"SSE registers cannot be used when SSE is disabled");
2326 if (isVarArg && IsMustTail) {
2328 for (
const auto &
F : Forwards) {
2330 RegsToPass.
push_back(std::make_pair(
F.PReg, Val));
2337 if (!IsSibcall && isTailCall) {
2349 for (
unsigned I = 0, OutsIndex = 0,
E = ArgLocs.
size();
I !=
E;
2356 "Expecting custom case only in regcall calling convention");
2366 SDValue Arg = OutVals[OutsIndex];
2369 if (
Flags.isInAlloca() ||
Flags.isPreallocated())
2377 if (
Flags.isByVal()) {
2392 ArgChain, dl, Arg, FIN,
2397 if (!MemOpChains2.
empty())
2403 RegInfo->getSlotSize(), FPDiff, dl);
2409 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
2410 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
2411 RegsToPass[i].second, InGlue);
2416 assert(Is64Bit &&
"Large code model is only legal in 64-bit mode.");
2427 Callee = LowerGlobalOrExternal(Callee, DAG,
true);
2429 Callee.getValueType() == MVT::i32) {
2438 if (!IsSibcall && isTailCall && !IsMustTail) {
2451 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i)
2453 RegsToPass[i].second.getValueType()));
2457 auto AdaptedCC = CallConv;
2465 if (CB && CB->hasFnAttr(
"no_callee_saved_registers"))
2467 return RegInfo->getCallPreservedMask(MF, AdaptedCC);
2469 assert(Mask &&
"Missing call preserved mask for calling convention");
2476 if (!Is64Bit && CLI.
CB && isa<InvokeInst>(CLI.
CB)) {
2504 if (ShouldDisableArgRegs) {
2505 for (
auto const &RegPair : RegsToPass)
2538 if (HasNoCfCheck && IsCFProtectionSupported && IsIndirectCall) {
2545 "tail calls cannot be marked with clang.arc.attachedcall");
2546 assert(Is64Bit &&
"clang.arc.attachedcall is only supported in 64bit mode");
2572 unsigned NumBytesForCalleeToPop = 0;
2575 NumBytesForCalleeToPop = NumBytes;
2579 NumBytesForCalleeToPop = 4;
2583 Chain = DAG.
getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
2590 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
2628X86TargetLowering::GetAlignedArgumentStackSize(
const unsigned StackSize,
2632 assert(StackSize % SlotSize == 0 &&
2633 "StackSize must be a multiple of SlotSize");
2634 return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
2656 cast<VTSDNode>(TruncInput.
getOperand(1))->getVT() ==
2673 if (!Flags.isByVal()) {
2677 unsigned Opcode = Def->getOpcode();
2679 Opcode == X86::LEA64_32r) &&
2680 Def->getOperand(1).isFI()) {
2681 FI = Def->getOperand(1).getIndex();
2682 Bytes = Flags.getByValSize();
2686 }
else if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2687 if (Flags.isByVal())
2702 Bytes = Flags.getByValSize();
2735bool X86TargetLowering::IsEligibleForTailCallOptimization(
2754 bool CCMatch = CallerCC == CalleeCC;
2763 if (IsCalleeWin64 != IsCallerWin64)
2766 if (IsGuaranteeTCO) {
2778 if (
RegInfo->hasStackRealignment(MF))
2789 }
else if (IsCalleePopSRet)
2797 if (isVarArg && !Outs.
empty()) {
2800 if (IsCalleeWin64 || IsCallerWin64)
2804 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs,
C);
2805 CCInfo.AnalyzeCallOperands(Outs,
CC_X86);
2806 for (
const auto &VA : ArgLocs)
2815 for (
const auto &In : Ins) {
2823 CCState CCInfo(CalleeCC,
false, MF, RVLocs,
C);
2824 CCInfo.AnalyzeCallResult(Ins,
RetCC_X86);
2825 for (
const auto &VA : RVLocs) {
2837 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
2839 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
2840 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2844 unsigned StackArgsSize = 0;
2848 if (!Outs.
empty()) {
2852 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs,
C);
2856 CCInfo.AllocateStack(32,
Align(8));
2858 CCInfo.AnalyzeCallOperands(Outs,
CC_X86);
2859 StackArgsSize = CCInfo.getStackSize();
2861 if (CCInfo.getStackSize()) {
2867 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
2887 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
2888 !isa<ExternalSymbolSDNode>(Callee)) ||
2889 PositionIndependent)) {
2890 unsigned NumInRegs = 0;
2893 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
2895 for (
const auto &VA : ArgLocs) {
2901 case X86::EAX:
case X86::EDX:
case X86::ECX:
2902 if (++NumInRegs == MaxInRegs)
2914 bool CalleeWillPop =
2918 if (
unsigned BytesToPop =
2921 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
2922 if (!CalleePopMatches)
2924 }
else if (CalleeWillPop && StackArgsSize > 0) {
2935 bool is64Bit,
bool IsVarArg,
bool GuaranteeTCO) {
2941 switch (CallingConv) {
unsigned const MachineRegisterInfo * MRI
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const TargetInstrInfo *TII)
MatchingStackOffset - Return true if the given stack call argument is already available in the same p...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Function * getFunction(Constant *C)
const HexagonInstrInfo * TII
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
const MCPhysReg ArgGPRs[]
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt)
Return true if the function is being made into a tailcall target by changing its ABI.
unsigned const TargetRegisterInfo * TRI
This file defines ARC utility functions which are used by various parts of the compiler.
static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static bool is64Bit(const char *name)
static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)
Lowers masks values (v*i1) to the local register values.
static void Passv64i1ArgInRegs(const SDLoc &DL, SelectionDAG &DAG, SDValue &Arg, SmallVectorImpl< std::pair< Register, SDValue > > &RegsToPass, CCValAssign &VA, CCValAssign &NextVA, const X86Subtarget &Subtarget)
Breaks v64i1 value into two registers and adds the new node to the DAG.
static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA, SDValue &Root, SelectionDAG &DAG, const SDLoc &DL, const X86Subtarget &Subtarget, SDValue *InGlue=nullptr)
Reads two 32 bit registers and creates a 64 bit mask value.
static ArrayRef< MCPhysReg > get64BitArgumentXMMs(MachineFunction &MF, CallingConv::ID CallConv, const X86Subtarget &Subtarget)
static bool isSortedByValueNo(ArrayRef< CCValAssign > ArgLocs)
static ArrayRef< MCPhysReg > get64BitArgumentGPRs(CallingConv::ID CallConv, const X86Subtarget &Subtarget)
static std::pair< MVT, unsigned > handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC, const X86Subtarget &Subtarget)
static bool shouldDisableRetRegFromCSR(CallingConv::ID CC)
Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...
static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl, const char *Msg)
Call this when the user attempts to do something unsupported, like returning a double without SSE2 en...
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &dl)
Emit a store of the return address if tail call optimization is performed and it is required (FPDiff!...
static bool hasCalleePopSRet(const SmallVectorImpl< T > &Args, const X86Subtarget &Subtarget)
Determines whether Args, either a set of outgoing arguments to a call, or a set of incoming args of a...
static bool shouldDisableArgRegFromCSR(CallingConv::ID CC)
Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...
static bool hasStackGuardSlotTLS(const Triple &TargetTriple)
static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)
The function will lower a register of various sizes (8/16/32/64) to a mask value of the expected size...
static Constant * SegmentOffset(IRBuilderBase &IRB, int Offset, unsigned AddressSpace)
static bool isBitAligned(Align Alignment, uint64_t SizeInBits)
static constexpr uint32_t RegMask
static constexpr uint32_t Opcode
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
const Function * getParent() const
Return the enclosing method, or null if none.
CCState - This class holds information needed while lowering arguments and return values.
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
void convertToReg(unsigned RegNo)
int64_t getLocMemOffset() const
CallingConv::ID getCallingConv() const
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Diagnostic information for unsupported feature in backend.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Type * getReturnType() const
Returns the type of the ret val.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
@ ExternalLinkage
Externally visible function.
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
Common base class shared among various IRBuilders.
BasicBlock * GetInsertBlock() const
LLVMContext & getContext() const
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
This is an important class for using LLVM in a threaded context.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool is512BitVector() const
Return true if this is a 512-bit vector type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setObjectZExt(int ObjectIdx, bool IsZExt)
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setObjectSExt(int ObjectIdx, bool IsSExt)
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
void setHasTailCall(bool V=true)
bool isObjectZExt(int ObjectIdx) const
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isObjectSExt(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getObjectIndexBegin() const
Return the minimum frame object index.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
Representation of each machine instruction.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_LabelDifference64
EK_LabelDifference64 - Each entry is the address of the block minus the address of the jump table.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOStore
The memory access writes data.
const Module * getModule() const
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void disableCalleeSavedRegister(MCRegister Reg)
Disables the register from the list of CSRs.
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
void setCFIType(uint32_t Type)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
void addCallSiteInfo(const SDNode *Node, CallSiteInfoImpl &&CallInfo)
Set CallSiteInfo to be associated with Node.
void addHeapAllocSite(const SDNode *Node, MDNode *MD)
Set HeapAllocSite to be associated with Node.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
LLVMContext * getContext() const
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent struct types.
Information about stack frame layout on the target.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const
Returns the target-specific address of the unsafe stack pointer.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool isPositionIndependent() const
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Triple - Helper class for working with autoconf configuration names.
bool isAndroidVersionLT(unsigned Major) const
bool isAndroid() const
Tests whether the target is Android.
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, or DriverKit).
bool isOSGlibc() const
Tests whether the OS uses glibc.
bool isWindowsMSVCEnvironment() const
Checks if the environment could be MSVC.
bool isWindowsItaniumEnvironment() const
The instances of the Type class are immutable: once they are created, they are never changed.
bool isX86_FP80Ty() const
Return true if this is x86 long double.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
LLVM Value Representation.
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
void setBytesToPopOnReturn(unsigned bytes)
unsigned getVarArgsGPOffset() const
int getRegSaveFrameIndex() const
void setHasSwiftAsyncContext(bool v)
Register getSRetReturnReg() const
void setVarArgsGPOffset(unsigned Offset)
void setRegSaveFrameIndex(int Idx)
void setForceFramePointer(bool forceFP)
void setSRetReturnReg(Register Reg)
unsigned getVarArgsFPOffset() const
void setArgumentStackSize(unsigned size)
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setTCReturnAddrDelta(int delta)
void setVarArgsFrameIndex(int Idx)
unsigned getBytesToPopOnReturn() const
void setVarArgsFPOffset(unsigned Offset)
unsigned getSlotSize() const
bool useLight256BitInstructions() const
bool isPICStyleGOT() const
bool isTargetWindowsMSVC() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
bool isTargetDarwin() const
const Triple & getTargetTriple() const
const X86InstrInfo * getInstrInfo() const override
bool useAVX512Regs() const
bool isCallingConvWin64(CallingConv::ID CC) const
bool isTargetFuchsia() const
bool isPICStyleRIPRel() const
bool isTargetCygMing() const
const X86RegisterInfo * getRegisterInfo() const override
unsigned getPreferVectorWidth() const
bool isTargetAndroid() const
const X86FrameLowering * getFrameLowering() const override
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isMemoryAccessFast(EVT VT, Align Alignment) const
bool useSoftFloat() const override
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool isSafeMemOpType(MVT VT) const override
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
Value * getIRStackGuard(IRBuilderBase &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Returns true if the target allows unaligned memory accesses of the specified type.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
Return the desired alignment for ByVal aggregate function arguments in the caller parameter area.
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const override
Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const override
Return true if the target stores SafeStack pointer at a fixed offset in some non-standard address spa...
bool isScalarFPTypeInSSEReg(EVT VT) const
Return true if the specified scalar FP type is computed in an SSE register, not on the X87 floating p...
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
This function returns true if the memory access is aligned or if the target allows this specific unal...
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the value type to use for ISD::SETCC.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const override
For types supported by the target, this is an identity function.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ X86_64_SysV
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
@ Swift
Calling convention for Swift.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ X86_INTR
x86 hardware interrupt context.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ X86_ThisCall
Similar to X86_StdCall.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ X86_StdCall
stdcall is mostly used by the Win32 API.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ X86_RegCall
Register calling convention used for parameters transfer optimization.
@ C
The default llvm calling convention, compatible with C.
@ X86_FastCall
'fast' analog of X86_StdCall.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Reg
All possible values of the reg field in the ModR/M byte.
@ RET_GLUE
Return with a glue operand.
@ IRET
Return from interrupt. Operand 0 is the number of bytes to pop.
@ CALL
These operations represent an abstract X86 call instruction, which includes a bunch of information.
@ GlobalBaseReg
On Darwin, this node represents the result of the popl at function entry, used for PIC code.
@ TC_RETURN
Tail call return.
@ NT_CALL
Same as call except it adds the NoTrack prefix.
@ MOVDQ2Q
Copies a 64-bit value from the low word of an XMM vector to an MMX vector.
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
bool RetCC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool CC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
bool is512BitVector() const
Return true if this is a 512-bit vector type.
bool isVector() const
Return true if this is a vector value type.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Describes a register that needs to be forwarded from the prologue to a musttail call.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
const ConstantInt * CFIType
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals