30#define DEBUG_TYPE "x86-isel"
68static std::pair<MVT, unsigned>
74 return {MVT::v2i64, 1};
76 return {MVT::v4i32, 1};
79 return {MVT::v8i16, 1};
82 return {MVT::v16i8, 1};
86 return {MVT::v32i8, 1};
90 return {MVT::v64i8, 1};
91 return {MVT::v32i8, 2};
95 if (!
isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
97 return {MVT::i8, NumElts};
110 unsigned NumRegisters;
111 std::tie(RegisterVT, NumRegisters) =
122 if ((VT == MVT::f64 || VT == MVT::f80) && !Subtarget.is64Bit() &&
146 unsigned NumRegisters;
147 std::tie(RegisterVT, NumRegisters) =
159 if (!Subtarget.is64Bit() && !Subtarget.hasX87()) {
176 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
183 RegisterVT = MVT::i8;
184 IntermediateVT = MVT::i1;
186 return NumIntermediates;
190 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.
useAVX512Regs() &&
192 RegisterVT = MVT::v32i8;
193 IntermediateVT = MVT::v32i1;
194 NumIntermediates = 2;
204 NumIntermediates, RegisterVT);
241 if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
242 if (VTy->getPrimitiveSizeInBits().getFixedValue() == 128)
243 MaxAlign =
Align(16);
244 }
else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
247 if (EltAlign > MaxAlign)
249 }
else if (
StructType *STy = dyn_cast<StructType>(Ty)) {
250 for (
auto *EltTy : STy->elements()) {
253 if (EltAlign > MaxAlign)
267 if (Subtarget.is64Bit())
268 return std::max(
DL.getABITypeAlign(Ty), Align::Constant<8>());
282 if (!FuncAttributes.
hasFnAttr(Attribute::NoImplicitFloat)) {
283 if (
Op.size() >= 16 &&
284 (!Subtarget.isUnalignedMem16Slow() ||
Op.isAligned(
Align(16)))) {
286 if (
Op.size() >= 64 && Subtarget.
hasAVX512() && Subtarget.hasEVEX512() &&
288 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
291 if (
Op.size() >= 32 && Subtarget.
hasAVX() &&
304 if (Subtarget.
hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
307 }
else if (((
Op.isMemcpy() && !
Op.isMemcpyStrSrc()) ||
Op.isZeroMemset()) &&
308 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.
hasSSE2()) {
321 if (Subtarget.is64Bit() &&
Op.size() >= 8)
335 return (8 * Alignment.
value()) % SizeInBits == 0;
346 return !Subtarget.isUnalignedMem16Slow();
348 return !Subtarget.isUnalignedMem32Slow();
355 unsigned *
Fast)
const {
365 return (Alignment < 16 || !Subtarget.
hasSSE41());
374 unsigned AddrSpace,
Align Alignment,
376 unsigned *
Fast)
const {
400 if (Subtarget.
hasAVX512() && Subtarget.hasEVEX512())
428 return Subtarget.useSoftFloat();
435 if (Subtarget.is64Bit())
439 unsigned ParamRegs = 0;
441 ParamRegs = M->getNumberRegisterParameters();
444 for (
auto &Arg : Args) {
446 if (
T->isIntOrPtrTy())
448 unsigned numRegs = 1;
451 if (ParamRegs < numRegs)
453 ParamRegs -= numRegs;
473 if (!Subtarget.is64Bit())
488 (Subtarget.is64Bit() &&
496std::pair<const TargetRegisterClass *, uint8_t>
504 case MVT::i8:
case MVT::i16:
case MVT::i32:
case MVT::i64:
505 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
508 RRC = &X86::VR64RegClass;
510 case MVT::f32:
case MVT::f64:
511 case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
512 case MVT::v4f32:
case MVT::v2f64:
513 case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
case MVT::v4i64:
514 case MVT::v8f32:
case MVT::v4f64:
515 case MVT::v64i8:
case MVT::v32i16:
case MVT::v16i32:
case MVT::v8i64:
516 case MVT::v16f32:
case MVT::v8f64:
517 RRC = &X86::VR128XRegClass;
520 return std::make_pair(RRC,
Cost);
523unsigned X86TargetLowering::getAddressSpace()
const {
524 if (Subtarget.is64Bit())
555 int Offset = M->getStackProtectorGuardOffset();
560 Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
562 StringRef GuardReg = M->getStackProtectorGuardReg();
563 if (GuardReg ==
"fs")
565 else if (GuardReg ==
"gs")
569 StringRef GuardSymb = M->getStackProtectorGuardSymbol();
570 if (!GuardSymb.
empty()) {
576 nullptr, GuardSymb,
nullptr,
594 M.getOrInsertGlobal(
"__security_cookie",
603 F->addParamAttr(0, Attribute::AttrKind::InReg);
608 StringRef GuardMode = M.getStackProtectorGuard();
611 if ((GuardMode ==
"tls" || GuardMode.
empty()) &&
621 return M.getGlobalVariable(
"__security_cookie");
630 return M.getFunction(
"__security_check_cookie");
643 int Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
660bool X86TargetLowering::CanLowerReturn(
664 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
665 return CCInfo.CheckReturn(Outs,
RetCC_X86);
669 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
674 static const MCPhysReg RCRegs[] = {X86::FPCW, X86::MXCSR};
684 if (ValVT == MVT::v1i1)
688 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
689 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
693 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
695 if (ValLoc == MVT::i32)
700 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
701 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
715 assert(Subtarget.hasBWI() &&
"Expected AVX512BW target!");
716 assert(Subtarget.is32Bit() &&
"Expecting 32 bit target");
719 "The value should reside in two registers");
729 RegsToPass.push_back(std::make_pair(VA.
getLocReg(),
Lo));
730 RegsToPass.push_back(std::make_pair(NextVA.
getLocReg(),
Hi));
745 bool ShouldDisableCalleeSavedRegister =
757 for (
unsigned I = 0, OutsIndex = 0, E = RVLocs.
size();
I != E;
763 if (ShouldDisableCalleeSavedRegister)
766 SDValue ValToCopy = OutVals[OutsIndex];
784 "Unexpected FP-extend for return value.");
791 }
else if (!Subtarget.
hasSSE2() &&
792 X86::FR64XRegClass.contains(VA.
getLocReg()) &&
815 if (Subtarget.is64Bit()) {
816 if (ValVT == MVT::x86mmx) {
818 ValToCopy = DAG.
getBitcast(MVT::i64, ValToCopy);
824 ValToCopy = DAG.
getBitcast(MVT::v4f32, ValToCopy);
831 "Currently the only custom case is when we split v64i1 to 2 regs");
837 if (ShouldDisableCalleeSavedRegister)
852 for (
auto &RetVal : RetVals) {
853 if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {
858 Chain = DAG.
getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Glue);
861 DAG.
getRegister(RetVal.first, RetVal.second.getValueType()));
902 Chain = DAG.
getCopyToReg(Chain, dl, RetValReg, Val, Glue);
912 if (ShouldDisableCalleeSavedRegister &&
939 return DAG.
getNode(opcode, dl, MVT::Other, RetOps);
942bool X86TargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
943 if (
N->getNumValues() != 1 || !
N->hasNUsesOfValue(1, 0))
951 if (
Copy->getOperand(
Copy->getNumOperands()-1).getValueType() == MVT::Glue)
953 TCChain =
Copy->getOperand(0);
963 if (
U->getNumOperands() > 4)
965 if (
U->getNumOperands() == 4 &&
966 U->getOperand(
U->getNumOperands() - 1).getValueType() != MVT::Glue)
980 MVT ReturnMVT = MVT::i32;
983 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
993 return VT.
bitsLT(MinVT) ? MinVT : VT;
1009 assert((Subtarget.hasBWI()) &&
"Expected AVX512BW target!");
1010 assert(Subtarget.is32Bit() &&
"Expecting 32 bit target");
1012 "Expecting first location of 64 bit width type");
1014 "The locations should have the same type");
1016 "The values should reside in two registers");
1019 SDValue ArgValueLo, ArgValueHi;
1025 if (
nullptr == InGlue) {
1061 if (ValVT == MVT::v1i1)
1064 if (ValVT == MVT::v64i1) {
1066 assert(ValLoc == MVT::i64 &&
"Expecting only i64 locations");
1072 MaskLenVT = MVT::i8;
1075 MaskLenVT = MVT::i16;
1078 MaskLenVT = MVT::i32;
1092SDValue X86TargetLowering::LowerCallResult(
1103 CCInfo.AnalyzeCallResult(Ins,
RetCC_X86);
1106 for (
unsigned I = 0, InsIndex = 0, E = RVLocs.
size();
I != E;
1126 }
else if (!Subtarget.
hasSSE2() &&
1127 X86::FR64XRegClass.contains(VA.
getLocReg()) &&
1128 CopyVT == MVT::f64) {
1138 bool RoundAfterCopy =
false;
1141 if (!Subtarget.hasX87())
1144 RoundAfterCopy = (CopyVT != VA.
getLocVT());
1150 "Currently the only custom case is when we split v64i1 to 2 regs");
1198template <
typename T>
1202 static_assert(std::is_same_v<T, ISD::OutputArg> ||
1203 std::is_same_v<T, ISD::InputArg>,
1204 "requires ISD::OutputArg or ISD::InputArg");
1208 if (!Subtarget.is32Bit())
1216 if (!Flags.isSRet() || Flags.isInReg())
1240 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
1280bool X86TargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
1301 bool isImmutable = !AlwaysUseMutable && !
Flags.isByVal();
1308 bool ExtendedInMem =
1321 if (
Flags.isByVal()) {
1322 unsigned Bytes =
Flags.getByValSize();
1323 if (Bytes == 0) Bytes = 1;
1332 EVT ArgVT =
Ins[i].ArgVT;
1343 if (
Flags.isCopyElisionCandidate() &&
1345 !ScalarizedVector) {
1347 if (Ins[i].PartOffset == 0) {
1356 ValVT, dl, Chain, PartAddr,
1370 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
1399 ValVT, dl, Chain, FIN,
1402 return ExtendedInMem
1412 assert(Subtarget.is64Bit());
1415 static const MCPhysReg GPR64ArgRegsWin64[] = {
1416 X86::RCX, X86::RDX, X86::R8, X86::R9
1418 return ArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
1421 static const MCPhysReg GPR64ArgRegs64Bit[] = {
1422 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
1424 return ArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
1431 assert(Subtarget.is64Bit());
1440 bool isSoftFloat = Subtarget.useSoftFloat();
1441 if (isSoftFloat || !Subtarget.
hasSSE1())
1446 static const MCPhysReg XMMArgRegs64Bit[] = {
1447 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1448 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1450 return ArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
1457 return A.getValNo() <
B.getValNo();
1464class VarArgsLoweringHelper {
1469 : FuncInfo(FuncInfo),
DL(Loc), DAG(DAG), Subtarget(Subtarget),
1470 TheMachineFunction(DAG.getMachineFunction()),
1472 FrameInfo(TheMachineFunction.getFrameInfo()),
1473 FrameLowering(*Subtarget.getFrameLowering()),
1474 TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),
1478 void lowerVarArgsParameters(
SDValue &Chain,
unsigned StackSize);
1481 void createVarArgAreaAndStoreRegisters(
SDValue &Chain,
unsigned StackSize);
1483 void forwardMustTailParameters(
SDValue &Chain);
1485 bool is64Bit()
const {
return Subtarget.is64Bit(); }
1486 bool isWin64()
const {
return Subtarget.isCallingConvWin64(CallConv); }
1502void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
1503 SDValue &Chain,
unsigned StackSize) {
1510 FrameInfo.CreateFixedObject(1, StackSize,
true));
1520 unsigned NumIntRegs = CCInfo.getFirstUnallocated(
ArgGPRs);
1521 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
1523 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
1524 "SSE register cannot be used when SSE is disabled!");
1529 int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;
1531 FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset,
false));
1553 Register GPR = TheMachineFunction.addLiveIn(Reg, &X86::GR64RegClass);
1556 const auto &AvailableXmms = ArgXMMs.
slice(NumXMMRegs);
1557 if (!AvailableXmms.empty()) {
1558 Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
1565 TheMachineFunction.getRegInfo().addLiveIn(Reg);
1576 for (
SDValue Val : LiveGPRs) {
1590 if (!LiveXMMRegs.
empty()) {
1606 SaveXMMOps, MVT::i8, StoreMMO));
1609 if (!MemOps.
empty())
1614void VarArgsLoweringHelper::forwardMustTailParameters(
SDValue &Chain) {
1616 MVT VecVT = MVT::Other;
1618 if (Subtarget.useAVX512Regs() &&
1621 VecVT = MVT::v16f32;
1622 else if (Subtarget.hasAVX())
1624 else if (Subtarget.hasSSE2())
1631 if (VecVT != MVT::Other)
1637 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes,
CC_X86);
1640 if (
is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {
1641 Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
1649 FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(
1650 TargLowering.getRegClassFor(FR.VT));
1655void VarArgsLoweringHelper::lowerVarArgsParameters(
SDValue &Chain,
1656 unsigned StackSize) {
1662 if (FrameInfo.hasVAStart())
1663 createVarArgAreaAndStoreRegisters(Chain, StackSize);
1665 if (FrameInfo.hasMustTailInVarArgFunc())
1666 forwardMustTailParameters(Chain);
1669SDValue X86TargetLowering::LowerFormalArguments(
1678 F.getName() ==
"main")
1682 bool Is64Bit = Subtarget.is64Bit();
1687 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
1695 CCInfo.AllocateStack(32,
Align(8));
1697 CCInfo.AnalyzeArguments(Ins,
CC_X86);
1702 CCInfo.AnalyzeArgumentsSecondPass(Ins,
CC_X86);
1708 "Argument Location list must be sorted before lowering");
1711 for (
unsigned I = 0, InsIndex = 0, E = ArgLocs.
size();
I != E;
1713 assert(InsIndex <
Ins.size() &&
"Invalid Ins index");
1721 "Currently the only custom case is when we split v64i1 to 2 regs");
1729 if (RegVT == MVT::i8)
1730 RC = &X86::GR8RegClass;
1731 else if (RegVT == MVT::i16)
1732 RC = &X86::GR16RegClass;
1733 else if (RegVT == MVT::i32)
1734 RC = &X86::GR32RegClass;
1735 else if (Is64Bit && RegVT == MVT::i64)
1736 RC = &X86::GR64RegClass;
1737 else if (RegVT == MVT::f16)
1738 RC = Subtarget.
hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
1739 else if (RegVT == MVT::f32)
1740 RC = Subtarget.
hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
1741 else if (RegVT == MVT::f64)
1742 RC = Subtarget.
hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
1743 else if (RegVT == MVT::f80)
1744 RC = &X86::RFP80RegClass;
1745 else if (RegVT == MVT::f128)
1746 RC = &X86::VR128RegClass;
1748 RC = &X86::VR512RegClass;
1750 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
1752 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
1753 else if (RegVT == MVT::x86mmx)
1754 RC = &X86::VR64RegClass;
1755 else if (RegVT == MVT::v1i1)
1756 RC = &X86::VK1RegClass;
1757 else if (RegVT == MVT::v8i1)
1758 RC = &X86::VK8RegClass;
1759 else if (RegVT == MVT::v16i1)
1760 RC = &X86::VK16RegClass;
1761 else if (RegVT == MVT::v32i1)
1762 RC = &X86::VK32RegClass;
1763 else if (RegVT == MVT::v64i1)
1764 RC = &X86::VK64RegClass;
1800 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
1805 !(Ins[
I].Flags.isByVal() && VA.
isRegLoc())) {
1813 for (
unsigned I = 0, E =
Ins.size();
I != E; ++
I) {
1814 if (Ins[
I].
Flags.isSwiftAsync()) {
1819 int PtrSize = Subtarget.is64Bit() ? 8 : 4;
1822 X86FI->setSwiftAsyncContextFrameIdx(FI);
1840 if (Ins[
I].
Flags.isSRet()) {
1842 "SRet return has already been set");
1853 unsigned StackSize = CCInfo.getStackSize();
1857 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
1860 VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)
1861 .lowerVarArgsParameters(Chain, StackSize);
1898 EHInfo->PSPSymFrameIdx = PSPSymFI;
1903 F.hasFnAttribute(
"no_caller_saved_registers")) {
1905 for (std::pair<MCRegister, Register> Pair :
MRI.liveins())
1906 MRI.disableCalleeSavedRegister(Pair.first);
1910 for (
unsigned I = 0, E =
Ins.size();
I != E; ++
I) {
1911 if (Ins[
I].
Flags.isSwiftSelf() || Ins[
I].Flags.isSwiftAsync() ||
1912 Ins[
I].Flags.isSwiftError()) {
1914 "Swift attributes can't be used with preserve_none");
1927 bool isByVal)
const {
1940 Chain, dl, Arg, PtrOff,
1947SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
1949 bool Is64Bit,
int FPDiff,
const SDLoc &dl)
const {
1963 EVT PtrVT,
unsigned SlotSize,
1964 int FPDiff,
const SDLoc &dl) {
1966 if (!FPDiff)
return Chain;
1968 int NewReturnAddrFI =
1972 Chain = DAG.
getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
1984 Mask.push_back(NumElems);
1985 for (
unsigned i = 1; i != NumElems; ++i)
2003 const auto *CB = CLI.
CB;
2006 bool Is64Bit = Subtarget.is64Bit();
2008 bool IsSibcall =
false;
2011 bool IsCalleePopSRet = !IsGuaranteeTCO &&
hasCalleePopSRet(Outs, Subtarget);
2013 bool HasNCSR = (CB && isa<CallInst>(CB) &&
2014 CB->hasFnAttr(
"no_caller_saved_registers"));
2015 bool HasNoCfCheck = (CB && CB->doesNoCfCheck());
2016 bool IsIndirectCall = (CB && isa<CallInst>(CB) && CB->isIndirectCall());
2017 bool IsCFICall = IsIndirectCall && CLI.
CFIType;
2019 Metadata *IsCFProtectionSupported =
M->getModuleFlag(
"cf-protection-branch");
2031 CCInfo.AllocateStack(32,
Align(8));
2033 CCInfo.AnalyzeArguments(Outs,
CC_X86);
2038 CCInfo.AnalyzeArgumentsSecondPass(Outs,
CC_X86);
2042 if (Subtarget.
isPICStyleGOT() && !IsGuaranteeTCO && !IsMustTail) {
2049 if (!
G || (!
G->getGlobal()->hasLocalLinkage() &&
2050 G->getGlobal()->hasDefaultVisibility()))
2054 if (isTailCall && !IsMustTail) {
2056 isTailCall = IsEligibleForTailCallOptimization(CLI, CCInfo, ArgLocs,
2061 if (!IsGuaranteeTCO && isTailCall)
2068 if (IsMustTail && !isTailCall)
2070 "site marked musttail");
2073 "Var args not supported with calling convention fastcc, ghc or hipe");
2076 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
2082 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2091 FPDiff = NumBytesCallerPushed - NumBytes;
2095 if (FPDiff < X86Info->getTCReturnAddrDelta())
2099 unsigned NumBytesToPush = NumBytes;
2100 unsigned NumBytesToPop = NumBytes;
2105 if (!Outs.
empty() && Outs.
back().Flags.isInAlloca()) {
2107 if (!ArgLocs.
back().isMemLoc())
2110 if (ArgLocs.
back().getLocMemOffset() != 0)
2112 "the only memory argument");
2115 "cannot use preallocated attribute on a register "
2118 for (
size_t i = 0; i < CLI.
OutVals.size(); ++i) {
2120 PreallocatedOffsets.
push_back(ArgLocs[i].getLocMemOffset());
2124 size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.
CB);
2125 MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);
2126 MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);
2130 if (!IsSibcall && !IsMustTail)
2132 NumBytes - NumBytesToPush, dl);
2136 if (isTailCall && FPDiff)
2137 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2138 Is64Bit, FPDiff, dl);
2147 "Argument Location list must be sorted before lowering");
2152 for (
unsigned I = 0, OutIndex = 0, E = ArgLocs.
size();
I != E;
2154 assert(OutIndex < Outs.
size() &&
"Invalid Out index");
2157 if (
Flags.isInAlloca() ||
Flags.isPreallocated())
2162 SDValue Arg = OutVals[OutIndex];
2163 bool isByVal =
Flags.isByVal();
2183 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.
getUNDEF(MVT::v2i64), Arg);
2196 Flags.getByValSize(),
2197 std::max(
Align(16),
Flags.getNonZeroByValAlign()),
false);
2208 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2210 Chain, dl, Arg, SpillSlot,
2220 "Currently the only custom case is when we split v64i1 to 2 regs");
2228 if (isVarArg && IsWin64) {
2233 case X86::XMM0: ShadowReg = X86::RCX;
break;
2234 case X86::XMM1: ShadowReg = X86::RDX;
break;
2235 case X86::XMM2: ShadowReg = X86::R8;
break;
2236 case X86::XMM3: ShadowReg = X86::R9;
break;
2239 RegsToPass.
push_back(std::make_pair(ShadowReg, Arg));
2241 }
else if (!IsSibcall && (!isTailCall || isByVal)) {
2246 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2247 dl, DAG, VA, Flags, isByVal));
2251 if (!MemOpChains.
empty())
2276 if (
G && !
G->getGlobal()->hasLocalLinkage() &&
2277 G->getGlobal()->hasDefaultVisibility())
2278 Callee = LowerGlobalAddress(Callee, DAG);
2279 else if (isa<ExternalSymbolSDNode>(Callee))
2280 Callee = LowerExternalSymbol(Callee, DAG);
2284 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail &&
2285 (Subtarget.
hasSSE1() || !
M->getModuleFlag(
"SkipRaxSetup"))) {
2296 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2297 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2299 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
2301 &&
"SSE registers cannot be used when SSE is disabled");
2307 if (isVarArg && IsMustTail) {
2309 for (
const auto &
F : Forwards) {
2311 RegsToPass.
push_back(std::make_pair(
F.PReg, Val));
2318 if (!IsSibcall && isTailCall) {
2330 for (
unsigned I = 0, OutsIndex = 0, E = ArgLocs.
size();
I != E;
2337 "Expecting custom case only in regcall calling convention");
2347 SDValue Arg = OutVals[OutsIndex];
2350 if (
Flags.isInAlloca() ||
Flags.isPreallocated())
2358 if (
Flags.isByVal()) {
2373 ArgChain, dl, Arg, FIN,
2378 if (!MemOpChains2.
empty())
2384 RegInfo->getSlotSize(), FPDiff, dl);
2390 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
2391 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
2392 RegsToPass[i].second, InGlue);
2397 assert(Is64Bit &&
"Large code model is only legal in 64-bit mode.");
2408 Callee = LowerGlobalOrExternal(Callee, DAG,
true);
2410 Callee.getValueType() == MVT::i32) {
2417 if (!IsSibcall && isTailCall && !IsMustTail) {
2430 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i)
2432 RegsToPass[i].second.getValueType()));
2436 auto AdaptedCC = CallConv;
2444 if (CB && CB->hasFnAttr(
"no_callee_saved_registers"))
2446 return RegInfo->getCallPreservedMask(MF, AdaptedCC);
2448 assert(Mask &&
"Missing call preserved mask for calling convention");
2452 if (CLI.
CB && isa<InvokeInst>(CLI.
CB))
2457 if (CLI.
CB && isa<InvokeInst>(CLI.
CB))
2466 if (!Is64Bit && CLI.
CB && isa<InvokeInst>(CLI.
CB)) {
2490 memcpy(RegMask, Mask,
sizeof(RegMask[0]) * RegMaskSize);
2494 if (ShouldDisableArgRegs) {
2495 for (
auto const &RegPair : RegsToPass)
2530 if (HasNoCfCheck && IsCFProtectionSupported && IsIndirectCall) {
2537 "tail calls cannot be marked with clang.arc.attachedcall");
2538 assert(Is64Bit &&
"clang.arc.attachedcall is only supported in 64bit mode");
2564 unsigned NumBytesForCalleeToPop = 0;
2567 NumBytesForCalleeToPop = NumBytes;
2571 NumBytesForCalleeToPop = 4;
2575 Chain = DAG.
getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
2581 for (
unsigned I = 0, E = Outs.
size();
I != E; ++
I) {
2582 if (Outs[
I].
Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftAsync() ||
2583 Outs[
I].Flags.isSwiftError()) {
2585 "Swift attributes can't be used with preserve_none");
2592 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
2630X86TargetLowering::GetAlignedArgumentStackSize(
const unsigned StackSize,
2634 assert(StackSize % SlotSize == 0 &&
2635 "StackSize must be a multiple of SlotSize");
2636 return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
2658 cast<VTSDNode>(TruncInput.
getOperand(1))->getVT() ==
2675 if (!Flags.isByVal()) {
2679 unsigned Opcode = Def->getOpcode();
2680 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
2681 Opcode == X86::LEA64_32r) &&
2682 Def->getOperand(1).isFI()) {
2683 FI = Def->getOperand(1).getIndex();
2684 Bytes = Flags.getByValSize();
2688 }
else if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2689 if (Flags.isByVal())
2704 Bytes = Flags.getByValSize();
2740bool X86TargetLowering::IsEligibleForTailCallOptimization(
2765 bool CCMatch = CallerCC == CalleeCC;
2774 if (IsCalleeWin64 != IsCallerWin64)
2777 if (IsGuaranteeTCO) {
2789 if (
RegInfo->hasStackRealignment(MF))
2800 }
else if (IsCalleePopSRet)
2808 if (isVarArg && !Outs.
empty()) {
2811 if (IsCalleeWin64 || IsCallerWin64)
2814 for (
const auto &VA : ArgLocs)
2823 for (
const auto &In : Ins) {
2831 CCState RVCCInfo(CalleeCC,
false, MF, RVLocs,
C);
2832 RVCCInfo.AnalyzeCallResult(Ins,
RetCC_X86);
2833 for (
const auto &VA : RVLocs) {
2845 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
2847 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
2848 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2863 if (!Outs.
empty()) {
2864 if (StackArgsSize > 0) {
2870 for (
unsigned I = 0, E = ArgLocs.size();
I != E; ++
I) {
2890 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
2891 !isa<ExternalSymbolSDNode>(Callee)) ||
2892 PositionIndependent)) {
2893 unsigned NumInRegs = 0;
2896 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
2898 for (
const auto &VA : ArgLocs) {
2904 case X86::EAX:
case X86::EDX:
case X86::ECX:
2905 if (++NumInRegs == MaxInRegs)
2917 bool CalleeWillPop =
2921 if (
unsigned BytesToPop =
2924 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
2925 if (!CalleePopMatches)
2927 }
else if (CalleeWillPop && StackArgsSize > 0) {
2938 bool is64Bit,
bool IsVarArg,
bool GuaranteeTCO) {
2944 switch (CallingConv) {
unsigned const MachineRegisterInfo * MRI
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static Function * getFunction(Constant *C)
const HexagonInstrInfo * TII
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
Module.h This file contains the declarations for the Module class.
const MCPhysReg ArgGPRs[]
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt)
Return true if the function is being made into a tailcall target by changing its ABI.
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const M68kInstrInfo *TII, const CCValAssign &VA)
Return true if the given stack call argument is already available in the same position (relatively) o...
unsigned const TargetRegisterInfo * TRI
This file defines ARC utility functions which are used by various parts of the compiler.
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool is64Bit(const char *name)
static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)
Lowers masks values (v*i1) to the local register values.
static void Passv64i1ArgInRegs(const SDLoc &DL, SelectionDAG &DAG, SDValue &Arg, SmallVectorImpl< std::pair< Register, SDValue > > &RegsToPass, CCValAssign &VA, CCValAssign &NextVA, const X86Subtarget &Subtarget)
Breaks v64i1 value into two registers and adds the new node to the DAG.
static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA, SDValue &Root, SelectionDAG &DAG, const SDLoc &DL, const X86Subtarget &Subtarget, SDValue *InGlue=nullptr)
Reads two 32 bit registers and creates a 64 bit mask value.
static ArrayRef< MCPhysReg > get64BitArgumentXMMs(MachineFunction &MF, CallingConv::ID CallConv, const X86Subtarget &Subtarget)
static bool isSortedByValueNo(ArrayRef< CCValAssign > ArgLocs)
static ArrayRef< MCPhysReg > get64BitArgumentGPRs(CallingConv::ID CallConv, const X86Subtarget &Subtarget)
static std::pair< MVT, unsigned > handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC, const X86Subtarget &Subtarget)
static bool shouldDisableRetRegFromCSR(CallingConv::ID CC)
Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...
static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl, const char *Msg)
Call this when the user attempts to do something unsupported, like returning a double without SSE2 en...
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &dl)
Emit a store of the return address if tail call optimization is performed and it is required (FPDiff!...
static bool hasCalleePopSRet(const SmallVectorImpl< T > &Args, const X86Subtarget &Subtarget)
Determines whether Args, either a set of outgoing arguments to a call, or a set of incoming args of a...
static bool shouldDisableArgRegFromCSR(CallingConv::ID CC)
Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...
static bool hasStackGuardSlotTLS(const Triple &TargetTriple)
static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)
The function will lower a register of various sizes (8/16/32/64) to a mask value of the expected size...
static Constant * SegmentOffset(IRBuilderBase &IRB, int Offset, unsigned AddressSpace)
static bool isBitAligned(Align Alignment, uint64_t SizeInBits)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
bool hasFnAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the function.
const Function * getParent() const
Return the enclosing method, or null if none.
CCState - This class holds information needed while lowering arguments and return values.
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
void convertToReg(MCRegister Reg)
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
CallingConv::ID getCallingConv() const
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
static Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Diagnostic information for unsupported feature in backend.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Type * getReturnType() const
Returns the type of the ret val.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
@ ExternalLinkage
Externally visible function.
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
Common base class shared among various IRBuilders.
BasicBlock * GetInsertBlock() const
LLVMContext & getContext() const
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
This is an important class for using LLVM in a threaded context.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool is512BitVector() const
Return true if this is a 512-bit vector type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
MVT getVectorElementType() const
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setObjectZExt(int ObjectIdx, bool IsZExt)
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setObjectSExt(int ObjectIdx, bool IsSExt)
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
void setHasTailCall(bool V=true)
bool isObjectZExt(int ObjectIdx) const
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isObjectSExt(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getObjectIndexBegin() const
Return the minimum frame object index.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_LabelDifference64
EK_LabelDifference64 - Each entry is the address of the block minus the address of the jump table.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOStore
The memory access writes data.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void disableCalleeSavedRegister(MCRegister Reg)
Disables the register from the list of CSRs.
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
void setCFIType(uint32_t Type)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getRegister(Register Reg, EVT VT)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
void addHeapAllocSite(const SDNode *Node, MDNode *MD)
Set HeapAllocSite to be associated with Node.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
const TargetMachine & getTarget() const
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue getRegisterMask(const uint32_t *RegMask)
void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)
Set CallSiteInfo to be associated with Node.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent struct types.
Information about stack frame layout on the target.
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const
Returns the target-specific address of the unsafe stack pointer.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool isPositionIndependent() const
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Triple - Helper class for working with autoconf configuration names.
bool isAndroidVersionLT(unsigned Major) const
bool isAndroid() const
Tests whether the target is Android.
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
bool isOSGlibc() const
Tests whether the OS uses glibc.
bool isWindowsMSVCEnvironment() const
Checks if the environment could be MSVC.
bool isWindowsItaniumEnvironment() const
The instances of the Type class are immutable: once they are created, they are never changed.
bool isX86_FP80Ty() const
Return true if this is x86 long double.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
LLVM Value Representation.
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
void setBytesToPopOnReturn(unsigned bytes)
void setBPClobberedByCall(bool C)
void setFPClobberedByCall(bool C)
unsigned getVarArgsGPOffset() const
int getRegSaveFrameIndex() const
void setHasSwiftAsyncContext(bool v)
Register getSRetReturnReg() const
void setVarArgsGPOffset(unsigned Offset)
void setRegSaveFrameIndex(int Idx)
void setForceFramePointer(bool forceFP)
void setSRetReturnReg(Register Reg)
unsigned getVarArgsFPOffset() const
void setArgumentStackSize(unsigned size)
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setTCReturnAddrDelta(int delta)
void setVarArgsFrameIndex(int Idx)
void setBPClobberedByInvoke(bool C)
void setFPClobberedByInvoke(bool C)
unsigned getBytesToPopOnReturn() const
void setVarArgsFPOffset(unsigned Offset)
unsigned getSlotSize() const
bool useLight256BitInstructions() const
bool isPICStyleGOT() const
bool isTargetWindowsMSVC() const
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
bool isTargetDarwin() const
const Triple & getTargetTriple() const
const X86InstrInfo * getInstrInfo() const override
bool useAVX512Regs() const
bool isTargetCOFF() const
bool isCallingConvWin64(CallingConv::ID CC) const
bool isTargetFuchsia() const
bool isPICStyleRIPRel() const
bool isTargetCygMing() const
const X86RegisterInfo * getRegisterInfo() const override
unsigned getPreferVectorWidth() const
bool isTargetAndroid() const
const X86FrameLowering * getFrameLowering() const override
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isMemoryAccessFast(EVT VT, Align Alignment) const
bool useSoftFloat() const override
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool isSafeMemOpType(MVT VT) const override
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
Value * getIRStackGuard(IRBuilderBase &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
Return the desired alignment for ByVal aggregate function arguments in the caller parameter area.
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Returns true if the target allows unaligned memory accesses of the specified type.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const override
Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const override
Return true if the target stores SafeStack pointer at a fixed offset in some non-standard address spa...
bool isScalarFPTypeInSSEReg(EVT VT) const
Return true if the specified scalar FP type is computed in an SSE register, not on the X87 floating p...
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
This function returns true if the memory access is aligned or if the target allows this specific unal...
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the value type to use for ISD::SETCC.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const override
For types supported by the target, this is an identity function.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ X86_64_SysV
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
@ Swift
Calling convention for Swift.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ X86_INTR
x86 hardware interrupt context.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ X86_ThisCall
Similar to X86_StdCall.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ X86_StdCall
stdcall is mostly used by the Win32 API.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ X86_RegCall
Register calling convention used for parameters transfer optimization.
@ C
The default llvm calling convention, compatible with C.
@ X86_FastCall
'fast' analog of X86_StdCall.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
Reg
All possible values of the reg field in the ModR/M byte.
@ RET_GLUE
Return with a glue operand.
@ IRET
Return from interrupt. Operand 0 is the number of bytes to pop.
@ CALL
These operations represent an abstract X86 call instruction, which includes a bunch of information.
@ GlobalBaseReg
On Darwin, this node represents the result of the popl at function entry, used for PIC code.
@ TC_RETURN
Tail call return.
@ NT_CALL
Same as call except it adds the NoTrack prefix.
@ MOVDQ2Q
Copies a 64-bit value from the low word of an XMM vector to an MMX vector.
bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget, const MachineFunction &MF)
True if the target supports the extended frame for async Swift functions.
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
bool RetCC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool CC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
bool is512BitVector() const
Return true if this is a 512-bit vector type.
bool isVector() const
Return true if this is a vector value type.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Describes a register that needs to be forwarded from the prologue to a musttail call.
SmallVector< ArgRegPair, 1 > ArgRegPairs
Vector of call argument and its forwarding register.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
const ConstantInt * CFIType
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals