32#define DEBUG_TYPE "x86-isel"
70static std::pair<MVT, unsigned>
76 return {MVT::v2i64, 1};
78 return {MVT::v4i32, 1};
81 return {MVT::v8i16, 1};
84 return {MVT::v16i8, 1};
88 return {MVT::v32i8, 1};
92 return {MVT::v64i8, 1};
93 return {MVT::v32i8, 2};
97 if (!
isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
99 return {MVT::i8, NumElts};
112 unsigned NumRegisters;
113 std::tie(RegisterVT, NumRegisters) =
124 if ((VT == MVT::f64 || VT == MVT::f80) && !Subtarget.is64Bit() &&
148 unsigned NumRegisters;
149 std::tie(RegisterVT, NumRegisters) =
161 if (!Subtarget.is64Bit() && !Subtarget.hasX87()) {
178 unsigned &NumIntermediates,
MVT &RegisterVT)
const {
181 Subtarget.hasAVX512() &&
185 RegisterVT = MVT::i8;
186 IntermediateVT = MVT::i1;
188 return NumIntermediates;
192 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
194 RegisterVT = MVT::v32i8;
195 IntermediateVT = MVT::v32i1;
196 NumIntermediates = 2;
206 NumIntermediates, RegisterVT);
215 if (Subtarget.hasAVX512()) {
245 if (Ty->isIntegerTy(128))
249 if (Subtarget.is32Bit() && Ty->isFP128Ty())
261 if (VTy->getPrimitiveSizeInBits().getFixedValue() == 128)
262 MaxAlign =
Align(16);
266 if (EltAlign > MaxAlign)
269 for (
auto *EltTy : STy->elements()) {
272 if (EltAlign > MaxAlign)
286 if (Subtarget.is64Bit())
290 if (Subtarget.hasSSE1())
301 const AttributeList &FuncAttributes)
const {
302 if (!FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat)) {
303 if (
Op.size() >= 16 &&
304 (!Subtarget.isUnalignedMem16Slow() ||
Op.isAligned(
Align(16)))) {
306 if (
Op.size() >= 64 && Subtarget.hasAVX512() &&
307 (Subtarget.getPreferVectorWidth() >= 512)) {
308 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
311 if (
Op.size() >= 32 && Subtarget.hasAVX() &&
312 Subtarget.useLight256BitInstructions()) {
320 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
324 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
325 (Subtarget.getPreferVectorWidth() >= 128))
327 }
else if (((
Op.isMemcpy() && !
Op.isMemcpyStrSrc()) ||
Op.isZeroMemset()) &&
328 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
341 if (Subtarget.is64Bit() &&
Op.size() >= 8)
348 return Subtarget.hasSSE1();
350 return Subtarget.hasSSE2();
355 return (8 * Alignment.
value()) % SizeInBits == 0;
366 return !Subtarget.isUnalignedMem16Slow();
368 return !Subtarget.isUnalignedMem32Slow();
375 unsigned *
Fast)
const {
385 return (Alignment < 16 || !Subtarget.hasSSE41());
394 unsigned AddrSpace,
Align Alignment,
396 unsigned *
Fast)
const {
420 if (Subtarget.hasAVX512())
440 !Subtarget.isTargetCOFF())
448 return Subtarget.useSoftFloat();
455 if (Subtarget.is64Bit())
459 unsigned ParamRegs = 0;
461 ParamRegs = M->getNumberRegisterParameters();
464 for (
auto &Arg : Args) {
466 if (
T->isIntOrPtrTy())
468 unsigned numRegs = 1;
471 if (ParamRegs < numRegs)
473 ParamRegs -= numRegs;
492 if (!Subtarget.is64Bit())
506 if (Subtarget.isPICStyleRIPRel() ||
507 (Subtarget.is64Bit() &&
515std::pair<const TargetRegisterClass *, uint8_t>
523 case MVT::i8:
case MVT::i16:
case MVT::i32:
case MVT::i64:
524 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
527 RRC = &X86::VR64RegClass;
529 case MVT::f32:
case MVT::f64:
530 case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
531 case MVT::v4f32:
case MVT::v2f64:
532 case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
case MVT::v4i64:
533 case MVT::v8f32:
case MVT::v4f64:
534 case MVT::v64i8:
case MVT::v32i16:
case MVT::v16i32:
case MVT::v8i64:
535 case MVT::v16f32:
case MVT::v8f64:
536 RRC = &X86::VR128XRegClass;
539 return std::make_pair(RRC,
Cost);
542unsigned X86TargetLowering::getAddressSpace()
const {
543 if (Subtarget.is64Bit())
571 if (Subtarget.isTargetFuchsia())
576 int Offset = M->getStackProtectorGuardOffset();
581 Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
583 StringRef GuardReg = M->getStackProtectorGuardReg();
584 if (GuardReg ==
"fs")
586 else if (GuardReg ==
"gs")
590 StringRef GuardSymb = M->getStackProtectorGuardSymbol();
591 if (!GuardSymb.
empty()) {
597 nullptr, GuardSymb,
nullptr,
599 if (!Subtarget.isTargetDarwin())
613 RTLIB::LibcallImpl SecurityCheckCookieLibcall =
614 Libcalls.getLibcallImpl(RTLIB::SECURITY_CHECK_COOKIE);
616 RTLIB::LibcallImpl SecurityCookieVar =
617 Libcalls.getLibcallImpl(RTLIB::STACK_CHECK_GUARD);
618 if (SecurityCheckCookieLibcall != RTLIB::Unsupported &&
619 SecurityCookieVar != RTLIB::Unsupported) {
633 F->addParamAttr(0, Attribute::AttrKind::InReg);
638 StringRef GuardMode = M.getStackProtectorGuard();
641 if ((GuardMode ==
"tls" || GuardMode.
empty()) &&
652 if (Subtarget.isTargetAndroid()) {
655 int Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
660 if (Subtarget.isTargetFuchsia()) {
672bool X86TargetLowering::CanLowerReturn(
675 const Type *RetTy)
const {
677 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
678 return CCInfo.CheckReturn(Outs,
RetCC_X86);
682 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
687 static const MCPhysReg RCRegs[] = {X86::FPCW, X86::MXCSR};
697 if (ValVT == MVT::v1i1)
701 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
702 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
706 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
708 if (ValLoc == MVT::i32)
713 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
714 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
728 assert(Subtarget.hasBWI() &&
"Expected AVX512BW target!");
729 assert(Subtarget.is32Bit() &&
"Expecting 32 bit target");
732 "The value should reside in two registers");
742 RegsToPass.push_back(std::make_pair(VA.
getLocReg(),
Lo));
743 RegsToPass.push_back(std::make_pair(NextVA.
getLocReg(),
Hi));
753 X86MachineFunctionInfo *FuncInfo = MF.
getInfo<X86MachineFunctionInfo>();
758 bool ShouldDisableCalleeSavedRegister =
766 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.
getContext());
770 for (
unsigned I = 0, OutsIndex = 0,
E = RVLocs.
size();
I !=
E;
772 CCValAssign &VA = RVLocs[
I];
776 if (ShouldDisableCalleeSavedRegister)
779 SDValue ValToCopy = OutVals[OutsIndex];
797 "Unexpected FP-extend for return value.");
801 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.
getLocReg())) {
804 }
else if (!Subtarget.hasSSE2() &&
805 X86::FR64XRegClass.contains(VA.
getLocReg()) &&
828 if (Subtarget.is64Bit()) {
829 if (ValVT == MVT::x86mmx) {
831 ValToCopy = DAG.
getBitcast(MVT::i64, ValToCopy);
836 if (!Subtarget.hasSSE2())
837 ValToCopy = DAG.
getBitcast(MVT::v4f32, ValToCopy);
844 "Currently the only custom case is when we split v64i1 to 2 regs");
850 if (ShouldDisableCalleeSavedRegister)
865 for (
auto &RetVal : RetVals) {
866 if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {
871 Chain = DAG.
getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Glue);
874 DAG.
getRegister(RetVal.first, RetVal.second.getValueType()));
913 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
915 Chain = DAG.
getCopyToReg(Chain, dl, RetValReg, Val, Glue);
925 if (ShouldDisableCalleeSavedRegister &&
931 const X86RegisterInfo *
TRI = Subtarget.getRegisterInfo();
952 return DAG.
getNode(opcode, dl, MVT::Other, RetOps);
955bool X86TargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
956 if (
N->getNumValues() != 1 || !
N->hasNUsesOfValue(1, 0))
960 SDNode *
Copy = *
N->user_begin();
964 if (
Copy->getOperand(
Copy->getNumOperands()-1).getValueType() == MVT::Glue)
966 TCChain =
Copy->getOperand(0);
971 for (
const SDNode *U :
Copy->users()) {
976 if (
U->getNumOperands() > 4)
978 if (
U->getNumOperands() == 4 &&
979 U->getOperand(
U->getNumOperands() - 1).getValueType() != MVT::Glue)
993 MVT ReturnMVT = MVT::i32;
995 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
996 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
1002 ReturnMVT = MVT::i8;
1006 return VT.
bitsLT(MinVT) ? MinVT : VT;
1022 assert((Subtarget.hasBWI()) &&
"Expected AVX512BW target!");
1023 assert(Subtarget.is32Bit() &&
"Expecting 32 bit target");
1025 "Expecting first location of 64 bit width type");
1027 "The locations should have the same type");
1029 "The values should reside in two registers");
1032 SDValue ArgValueLo, ArgValueHi;
1038 if (
nullptr == InGlue) {
1074 if (ValVT == MVT::v1i1)
1077 if (ValVT == MVT::v64i1) {
1079 assert(ValLoc == MVT::i64 &&
"Expecting only i64 locations");
1085 MaskLenVT = MVT::i8;
1088 MaskLenVT = MVT::i16;
1091 MaskLenVT = MVT::i32;
1114SDValue X86TargetLowering::LowerCallResult(
1118 uint32_t *RegMask)
const {
1120 const TargetRegisterInfo *
TRI = Subtarget.getRegisterInfo();
1125 CCInfo.AnalyzeCallResult(Ins,
RetCC_X86);
1128 for (
unsigned I = 0, InsIndex = 0,
E = RVLocs.
size();
I !=
E;
1130 CCValAssign &VA = RVLocs[
I];
1142 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.
getLocReg())) {
1148 }
else if (!Subtarget.hasSSE2() &&
1149 X86::FR64XRegClass.contains(VA.
getLocReg()) &&
1150 CopyVT == MVT::f64) {
1160 bool RoundAfterCopy =
false;
1163 if (!Subtarget.hasX87())
1166 RoundAfterCopy = (CopyVT != VA.
getLocVT());
1172 "Currently the only custom case is when we split v64i1 to 2 regs");
1215template <
typename T>
1220 static_assert(std::is_same_v<T, ISD::OutputArg> ||
1221 std::is_same_v<T, ISD::InputArg>,
1222 "requires ISD::OutputArg or ISD::InputArg");
1228 if (!TT.isX86_32() || TT.isOSMSVCRT() || TT.isOSIAMCU())
1232 bool IsSRetInMem =
false;
1234 IsSRetInMem = Args.front().Flags.isSRet() && ArgLocs.
front().isMemLoc();
1247 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
1287bool X86TargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
1305 ISD::ArgFlagsTy
Flags = Ins[i].Flags;
1308 bool isImmutable = !AlwaysUseMutable && !
Flags.isByVal();
1315 bool ExtendedInMem =
1328 if (
Flags.isByVal()) {
1329 unsigned Bytes =
Flags.getByValSize();
1330 if (Bytes == 0) Bytes = 1;
1339 EVT ArgVT = Ins[i].ArgVT;
1350 if (
Flags.isCopyElisionCandidate() &&
1352 !ScalarizedVector) {
1354 if (Ins[i].PartOffset == 0) {
1363 ValVT, dl, Chain, PartAddr,
1377 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
1384 return DAG.
getLoad(ValVT, dl, Chain, Addr,
1400 MaybeAlign Alignment;
1401 if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&
1403 Alignment = MaybeAlign(4);
1406 ValVT, dl, Chain, FIN,
1409 return ExtendedInMem
1419 assert(Subtarget.is64Bit());
1422 static const MCPhysReg GPR64ArgRegsWin64[] = {
1423 X86::RCX, X86::RDX, X86::R8, X86::R9
1425 return GPR64ArgRegsWin64;
1428 static const MCPhysReg GPR64ArgRegs64Bit[] = {
1429 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
1431 return GPR64ArgRegs64Bit;
1438 assert(Subtarget.is64Bit());
1447 bool isSoftFloat = Subtarget.useSoftFloat();
1448 if (isSoftFloat || !Subtarget.
hasSSE1())
1453 static const MCPhysReg XMMArgRegs64Bit[] = {
1454 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
1455 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
1457 return XMMArgRegs64Bit;
1464 return A.getValNo() <
B.getValNo();
1471class VarArgsLoweringHelper {
1473 VarArgsLoweringHelper(X86MachineFunctionInfo *FuncInfo,
const SDLoc &Loc,
1474 SelectionDAG &DAG,
const X86Subtarget &Subtarget,
1475 CallingConv::ID CallConv, CCState &CCInfo)
1476 : FuncInfo(FuncInfo),
DL(Loc), DAG(DAG), Subtarget(Subtarget),
1477 TheMachineFunction(DAG.getMachineFunction()),
1479 FrameInfo(TheMachineFunction.getFrameInfo()),
1480 FrameLowering(*Subtarget.getFrameLowering()),
1481 TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),
1485 void lowerVarArgsParameters(
SDValue &Chain,
unsigned StackSize);
1488 void createVarArgAreaAndStoreRegisters(
SDValue &Chain,
unsigned StackSize);
1490 void forwardMustTailParameters(
SDValue &Chain);
1492 bool is64Bit()
const {
return Subtarget.is64Bit(); }
1493 bool isWin64()
const {
return Subtarget.isCallingConvWin64(CallConv); }
1495 X86MachineFunctionInfo *FuncInfo;
1498 const X86Subtarget &Subtarget;
1499 MachineFunction &TheMachineFunction;
1501 MachineFrameInfo &FrameInfo;
1502 const TargetFrameLowering &FrameLowering;
1503 const TargetLowering &TargLowering;
1504 CallingConv::ID CallConv;
1509void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
1510 SDValue &Chain,
unsigned StackSize) {
1517 FrameInfo.CreateFixedObject(1, StackSize,
true));
1527 unsigned NumIntRegs = CCInfo.getFirstUnallocated(
ArgGPRs);
1528 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
1530 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
1531 "SSE register cannot be used when SSE is disabled!");
1536 int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;
1538 FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset,
false));
1560 Register GPR = TheMachineFunction.addLiveIn(
Reg, &X86::GR64RegClass);
1563 const auto &AvailableXmms = ArgXMMs.
slice(NumXMMRegs);
1564 if (!AvailableXmms.empty()) {
1565 Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
1572 TheMachineFunction.getRegInfo().addLiveIn(
Reg);
1583 for (
SDValue Val : LiveGPRs) {
1597 if (!LiveXMMRegs.
empty()) {
1613 SaveXMMOps, MVT::i8, StoreMMO));
1616 if (!MemOps.
empty())
1621void VarArgsLoweringHelper::forwardMustTailParameters(
SDValue &Chain) {
1623 MVT VecVT = MVT::Other;
1625 if (Subtarget.useAVX512Regs() &&
1628 VecVT = MVT::v16f32;
1629 else if (Subtarget.hasAVX())
1631 else if (Subtarget.hasSSE2())
1638 if (VecVT != MVT::Other)
1644 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes,
CC_X86);
1647 if (
is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {
1648 Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
1656 FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(
1657 TargLowering.getRegClassFor(FR.VT));
1662void VarArgsLoweringHelper::lowerVarArgsParameters(
SDValue &Chain,
1663 unsigned StackSize) {
1669 if (FrameInfo.hasVAStart())
1670 createVarArgAreaAndStoreRegisters(Chain, StackSize);
1672 if (FrameInfo.hasMustTailInVarArgFunc())
1673 forwardMustTailParameters(Chain);
1676SDValue X86TargetLowering::LowerFormalArguments(
1681 X86MachineFunctionInfo *FuncInfo = MF.
getInfo<X86MachineFunctionInfo>();
1684 if (
F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
1685 F.getName() ==
"main")
1689 bool Is64Bit = Subtarget.is64Bit();
1690 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
1694 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
1698 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.
getContext());
1702 CCInfo.AllocateStack(32,
Align(8));
1704 CCInfo.AnalyzeArguments(Ins,
CC_X86);
1709 CCInfo.AnalyzeArgumentsSecondPass(Ins,
CC_X86);
1715 "Argument Location list must be sorted before lowering");
1718 for (
unsigned I = 0, InsIndex = 0,
E = ArgLocs.
size();
I !=
E;
1720 assert(InsIndex < Ins.
size() &&
"Invalid Ins index");
1721 CCValAssign &VA = ArgLocs[
I];
1728 "Currently the only custom case is when we split v64i1 to 2 regs");
1735 const TargetRegisterClass *RC;
1736 if (RegVT == MVT::i8)
1737 RC = &X86::GR8RegClass;
1738 else if (RegVT == MVT::i16)
1739 RC = &X86::GR16RegClass;
1740 else if (RegVT == MVT::i32)
1741 RC = &X86::GR32RegClass;
1742 else if (Is64Bit && RegVT == MVT::i64)
1743 RC = &X86::GR64RegClass;
1744 else if (RegVT == MVT::f16)
1745 RC = Subtarget.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
1746 else if (RegVT == MVT::f32)
1747 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
1748 else if (RegVT == MVT::f64)
1749 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
1750 else if (RegVT == MVT::f80)
1751 RC = &X86::RFP80RegClass;
1752 else if (RegVT == MVT::f128)
1753 RC = &X86::VR128RegClass;
1755 RC = &X86::VR512RegClass;
1757 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
1759 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
1760 else if (RegVT == MVT::x86mmx)
1761 RC = &X86::VR64RegClass;
1762 else if (RegVT == MVT::v1i1)
1763 RC = &X86::VK1RegClass;
1764 else if (RegVT == MVT::v8i1)
1765 RC = &X86::VK8RegClass;
1766 else if (RegVT == MVT::v16i1)
1767 RC = &X86::VK16RegClass;
1768 else if (RegVT == MVT::v32i1)
1769 RC = &X86::VK32RegClass;
1770 else if (RegVT == MVT::v64i1)
1771 RC = &X86::VK64RegClass;
1807 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
1812 !(Ins[
I].Flags.isByVal() && VA.
isRegLoc())) {
1814 DAG.
getLoad(VA.
getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
1820 for (
unsigned I = 0,
E = Ins.
size();
I !=
E; ++
I) {
1821 if (Ins[
I].
Flags.isSwiftAsync()) {
1822 auto X86FI = MF.
getInfo<X86MachineFunctionInfo>();
1824 X86FI->setHasSwiftAsyncContext(
true);
1826 int PtrSize = Subtarget.is64Bit() ? 8 : 4;
1829 X86FI->setSwiftAsyncContextFrameIdx(FI);
1847 if (Ins[
I].
Flags.isSRet()) {
1849 "SRet return has already been set");
1860 unsigned StackSize = CCInfo.getStackSize();
1864 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
1867 VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)
1868 .lowerVarArgsParameters(Chain, StackSize);
1905 EHInfo->PSPSymFrameIdx = PSPSymFI;
1910 F.hasFnAttribute(
"no_caller_saved_registers")) {
1912 for (std::pair<MCRegister, Register> Pair :
MRI.liveins())
1913 MRI.disableCalleeSavedRegister(Pair.first);
1917 for (
const ISD::InputArg &In : Ins) {
1918 if (
In.Flags.isSwiftSelf() ||
In.Flags.isSwiftAsync() ||
1919 In.Flags.isSwiftError()) {
1921 "Swift attributes can't be used with preserve_none");
1934 bool isByVal)
const {
1942 MaybeAlign Alignment;
1943 if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&
1945 Alignment = MaybeAlign(4);
1947 Chain, dl, Arg, PtrOff,
1954SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
1956 bool Is64Bit,
int FPDiff,
const SDLoc &dl)
const {
1962 OutRetAddr = DAG.
getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
1970 EVT PtrVT,
unsigned SlotSize,
1971 int FPDiff,
const SDLoc &dl) {
1973 if (!FPDiff)
return Chain;
1975 int NewReturnAddrFI =
1979 Chain = DAG.
getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
1990 SmallVector<int, 8>
Mask;
1991 Mask.push_back(NumElems);
1992 for (
unsigned i = 1; i != NumElems; ++i)
2003X86TargetLowering::ByValCopyKind X86TargetLowering::ByValNeedsCopyForTailCall(
2015 if (!SrcFrameIdxNode || !DstFrameIdxNode)
2018 int SrcFI = SrcFrameIdxNode->getIndex();
2019 int DstFI = DstFrameIdxNode->getIndex();
2021 "byval passed in non-fixed stack slot");
2029 if (!FixedSrc || (FixedSrc && SrcOffset < 0))
2034 if (SrcOffset == DstOffset)
2043 SelectionDAG &DAG = CLI.
DAG;
2045 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.
Outs;
2046 SmallVectorImpl<SDValue> &OutVals = CLI.
OutVals;
2047 SmallVectorImpl<ISD::InputArg> &Ins = CLI.
Ins;
2053 const auto *CB = CLI.
CB;
2056 bool Is64Bit = Subtarget.is64Bit();
2057 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
2060 X86MachineFunctionInfo *X86Info = MF.
getInfo<X86MachineFunctionInfo>();
2062 CB->hasFnAttr(
"no_caller_saved_registers"));
2063 bool IsIndirectCall = (CB &&
isa<CallInst>(CB) && CB->isIndirectCall());
2064 bool IsCFICall = IsIndirectCall && CLI.
CFIType;
2071 bool IsNoTrackIndirectCall = IsIndirectCall && CB->doesNoCfCheck() &&
2072 M->getModuleFlag(
"cf-protection-branch");
2073 if (IsNoTrackIndirectCall)
2076 MachineFunction::CallSiteInfo CSInfo;
2082 CSInfo = MachineFunction::CallSiteInfo(*CB);
2084 if (IsIndirectCall && !IsWin64 &&
2085 M->getModuleFlag(
"import-call-optimization"))
2087 "Indirect calls must have a normal calling convention if "
2088 "Import Call Optimization is enabled");
2092 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.
getContext());
2096 CCInfo.AllocateStack(32,
Align(8));
2098 CCInfo.AnalyzeArguments(Outs,
CC_X86);
2103 CCInfo.AnalyzeArgumentsSecondPass(Outs,
CC_X86);
2107 bool IsSibcall =
false;
2108 if (isTailCall && ShouldGuaranteeTCO) {
2114 isTailCall = (CallConv == CallerCC);
2115 IsSibcall = IsMustTail;
2116 }
else if (isTailCall) {
2121 IsSibcall = isEligibleForSiblingCallOpt(CLI, CCInfo, ArgLocs);
2122 isTailCall = IsSibcall || IsMustTail;
2128 if (IsMustTail && !isTailCall)
2130 "site marked musttail");
2133 "Var args not supported with calling convention fastcc, ghc or hipe");
2136 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
2142 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2146 if (isTailCall && ShouldGuaranteeTCO && !IsSibcall) {
2150 FPDiff = NumBytesCallerPushed - NumBytes;
2154 if (FPDiff < X86Info->getTCReturnAddrDelta())
2158 unsigned NumBytesToPush = NumBytes;
2159 unsigned NumBytesToPop = NumBytes;
2162 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
2179 for (
const CCValAssign &VA : ArgLocs) {
2181 SDValue Src = OutVals[ArgIdx];
2182 ISD::ArgFlagsTy
Flags = Outs[ArgIdx].Flags;
2184 if (!
Flags.isByVal())
2201 ByValCopyKind
Copy = ByValNeedsCopyForTailCall(DAG, Src, Dst, Flags);
2203 if (Copy == NoCopy) {
2208 }
else if (Copy == CopyOnce) {
2212 ByValTemporaries[ArgIdx] = Src;
2214 assert(Copy == CopyViaTemp &&
"unexpected enum value");
2220 Flags.getNonZeroByValAlign(),
2230 if (!ByValCopyChains.
empty())
2238 if (!Outs.
empty() && Outs.
back().Flags.isInAlloca()) {
2240 if (!ArgLocs.back().isMemLoc())
2243 if (ArgLocs.back().getLocMemOffset() != 0)
2245 "the only memory argument");
2247 assert(ArgLocs.back().isMemLoc() &&
2248 "cannot use preallocated attribute on a register "
2251 for (
size_t i = 0; i < CLI.
OutVals.size(); ++i) {
2253 PreallocatedOffsets.
push_back(ArgLocs[i].getLocMemOffset());
2257 size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.
CB);
2258 MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);
2259 MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);
2263 if (!IsSibcall && !IsMustTail)
2265 NumBytes - NumBytesToPush, dl);
2269 if (isTailCall && FPDiff)
2270 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2271 Is64Bit, FPDiff, dl);
2279 "Argument Location list must be sorted before lowering");
2283 for (
unsigned I = 0, OutIndex = 0,
E = ArgLocs.size();
I !=
E;
2285 assert(OutIndex < Outs.
size() &&
"Invalid Out index");
2287 ISD::ArgFlagsTy
Flags = Outs[OutIndex].Flags;
2288 if (
Flags.isInAlloca() ||
Flags.isPreallocated())
2291 CCValAssign &VA = ArgLocs[
I];
2293 SDValue Arg = OutVals[OutIndex];
2294 bool isByVal =
Flags.isByVal();
2314 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.
getUNDEF(MVT::v2i64), Arg);
2327 Flags.getByValSize(),
2328 std::max(
Align(16),
Flags.getNonZeroByValAlign()),
false);
2341 Chain, dl, Arg, SpillSlot,
2351 "Currently the only custom case is when we split v64i1 to 2 regs");
2359 if (isVarArg && IsWin64) {
2364 case X86::XMM0: ShadowReg = X86::RCX;
break;
2365 case X86::XMM1: ShadowReg = X86::RDX;
break;
2366 case X86::XMM2: ShadowReg = X86::R8;
break;
2367 case X86::XMM3: ShadowReg = X86::R9;
break;
2370 RegsToPass.
push_back(std::make_pair(ShadowReg, Arg));
2372 }
else if (!IsSibcall && (!isTailCall || (isByVal && !IsMustTail))) {
2377 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2378 dl, DAG, VA, Flags, isByVal));
2382 if (!MemOpChains.
empty())
2385 if (Subtarget.isPICStyleGOT()) {
2407 if (
G && !
G->getGlobal()->hasLocalLinkage() &&
2408 G->getGlobal()->hasDefaultVisibility())
2409 Callee = LowerGlobalAddress(Callee, DAG);
2411 Callee = LowerExternalSymbol(Callee, DAG);
2415 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail &&
2416 (Subtarget.hasSSE1() || !
M->getModuleFlag(
"SkipRaxSetup"))) {
2427 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2428 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2430 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
2431 assert((Subtarget.hasSSE1() || !NumXMMRegs)
2432 &&
"SSE registers cannot be used when SSE is disabled");
2438 if (isVarArg && IsMustTail) {
2440 for (
const auto &
F : Forwards) {
2442 RegsToPass.
push_back(std::make_pair(
F.PReg, Val));
2449 if (isTailCall && !IsSibcall) {
2466 for (
unsigned I = 0, OutsIndex = 0,
E = ArgLocs.size();
I !=
E;
2468 CCValAssign &VA = ArgLocs[
I];
2473 "Expecting custom case only in regcall calling convention");
2483 SDValue Arg = OutVals[OutsIndex];
2484 ISD::ArgFlagsTy
Flags = Outs[OutsIndex].Flags;
2486 if (
Flags.isInAlloca() ||
Flags.isPreallocated())
2490 uint32_t OpSize = (VA.
getLocVT().getSizeInBits()+7)/8;
2494 if (
Flags.isByVal()) {
2495 if (
SDValue ByValSrc = ByValTemporaries[OutsIndex]) {
2500 ByValSrc, DstAddr, Chain, Flags, DAG, dl));
2505 Chain, dl, Arg, FIN,
2510 if (!MemOpChains2.
empty())
2522 for (
const auto &[
Reg,
N] : RegsToPass) {
2527 bool IsImpCall =
false;
2528 bool IsCFGuardCall =
false;
2530 assert(Is64Bit &&
"Large code model is only legal in 64-bit mode.");
2541 Callee = LowerGlobalOrExternal(Callee, DAG,
true, &IsImpCall);
2542 }
else if (Subtarget.isTarget64BitILP32() &&
2543 Callee.getValueType() == MVT::i32) {
2551 IsCFGuardCall =
true;
2553 GlobalAddressSDNode *GA =
2556 "CFG Call should be to a guard function");
2557 assert(LoadNode->getOffset()->isUndef() &&
2558 "CFG Function load should not have an offset");
2565 if (!IsSibcall && isTailCall && !IsMustTail) {
2570 Ops.push_back(Chain);
2571 Ops.push_back(Callee);
2578 for (
const auto &[
Reg,
N] : RegsToPass)
2582 const uint32_t *
Mask = [&]() {
2583 auto AdaptedCC = CallConv;
2591 if (CB && CB->hasFnAttr(
"no_callee_saved_registers"))
2595 assert(Mask &&
"Missing call preserved mask for calling convention");
2624 uint32_t *RegMask =
nullptr;
2632 const TargetRegisterInfo *
TRI = Subtarget.getRegisterInfo();
2637 memcpy(RegMask, Mask,
sizeof(RegMask[0]) * RegMaskSize);
2641 if (ShouldDisableArgRegs) {
2642 for (
auto const &RegPair : RegsToPass)
2655 Ops.push_back(InGlue);
2678 SDVTList NodeTys = DAG.
getVTList(MVT::Other, MVT::Glue);
2681 }
else if (IsNoTrackIndirectCall) {
2683 }
else if (IsCFGuardCall) {
2690 "tail calls cannot be marked with clang.arc.attachedcall");
2691 assert(Is64Bit &&
"clang.arc.attachedcall is only supported in 64bit mode");
2698 Ops.insert(
Ops.begin() + 1, GA);
2713 if (MDNode *HeapAlloc = CLI.
CB->
getMetadata(
"heapallocsite"))
2717 unsigned NumBytesForCalleeToPop = 0;
2720 NumBytesForCalleeToPop = NumBytes;
2724 NumBytesForCalleeToPop = 4;
2729 Chain = DAG.
getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
2735 for (
const ISD::OutputArg &Out : Outs) {
2736 if (Out.Flags.isSwiftSelf() || Out.Flags.isSwiftAsync() ||
2737 Out.Flags.isSwiftError()) {
2739 "Swift attributes can't be used with preserve_none");
2746 return LowerCallResult(Chain, InGlue, CallConv, isVarArg, Ins, dl, DAG,
2784X86TargetLowering::GetAlignedArgumentStackSize(
const unsigned StackSize,
2786 const Align StackAlignment = Subtarget.getFrameLowering()->getStackAlign();
2787 const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize();
2788 assert(StackSize % SlotSize == 0 &&
2789 "StackSize must be a multiple of SlotSize");
2790 return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
2829 if (!Flags.isByVal()) {
2830 if (!
TII->isLoadFromStackSlot(*Def, FI))
2833 unsigned Opcode = Def->getOpcode();
2834 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
2835 Opcode == X86::LEA64_32r) &&
2836 Def->getOperand(1).isFI()) {
2837 FI = Def->getOperand(1).getIndex();
2838 Bytes = Flags.getByValSize();
2843 if (Flags.isByVal())
2850 SDValue Ptr = Ld->getBasePtr();
2858 Bytes = Flags.getByValSize();
2892 const auto &Outs = CLI.
Outs;
2893 const auto &OutVals = CLI.
OutVals;
2898 for (
unsigned E = Outs.size(); Pos !=
E; ++Pos)
2899 if (Outs[Pos].Flags.isSRet())
2902 if (Pos == Outs.size())
2909 SDValue SRetArgVal = OutVals[Pos];
2930bool X86TargetLowering::isEligibleForSiblingCallOpt(
2933 SelectionDAG &DAG = CLI.
DAG;
2934 const SmallVectorImpl<ISD::OutputArg> &Outs = CLI.
Outs;
2935 const SmallVectorImpl<SDValue> &OutVals = CLI.
OutVals;
2936 const SmallVectorImpl<ISD::InputArg> &Ins = CLI.
Ins;
2946 X86MachineFunctionInfo *FuncInfo = MF.
getInfo<X86MachineFunctionInfo>();
2959 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
2960 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
2961 if (IsCalleeWin64 != IsCallerWin64)
2969 if (Subtarget.isPICStyleGOT()) {
2973 if (!
G->getGlobal()->hasLocalLinkage() &&
2974 G->getGlobal()->hasDefaultVisibility())
2984 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
2985 if (RegInfo->hasStackRealignment(MF))
3005 if (isVarArg && !Outs.
empty()) {
3008 if (IsCalleeWin64 || IsCallerWin64)
3011 for (
const auto &VA : ArgLocs)
3020 for (
const auto &In : Ins) {
3028 CCState RVCCInfo(CalleeCC,
false, MF, RVLocs,
C);
3029 RVCCInfo.AnalyzeCallResult(Ins,
RetCC_X86);
3030 for (
const auto &VA : RVLocs) {
3041 const X86RegisterInfo *
TRI = Subtarget.getRegisterInfo();
3042 const uint32_t *CallerPreserved =
TRI->getCallPreservedMask(MF, CallerCC);
3043 if (CallerCC != CalleeCC) {
3044 const uint32_t *CalleePreserved =
TRI->getCallPreservedMask(MF, CalleeCC);
3045 if (!
TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
3053 if (CallerF.hasFnAttribute(
"no_caller_saved_registers"))
3060 if (!Outs.
empty()) {
3061 if (StackArgsSize > 0) {
3066 const X86InstrInfo *
TII = Subtarget.getInstrInfo();
3067 for (
unsigned I = 0,
E = ArgLocs.size();
I !=
E; ++
I) {
3068 const CCValAssign &VA = ArgLocs[
I];
3070 ISD::ArgFlagsTy
Flags = Outs[
I].Flags;
3089 PositionIndependent)) {
3090 unsigned NumInRegs = 0;
3093 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
3095 for (
const auto &VA : ArgLocs) {
3101 case X86::EAX:
case X86::EDX:
case X86::ECX:
3102 if (++NumInRegs == MaxInRegs)
3114 bool CalleeWillPop =
3120 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
3121 if (!CalleePopMatches)
3123 }
else if (CalleeWillPop && StackArgsSize > 0) {
3134 bool is64Bit,
bool IsVarArg,
bool GuaranteeTCO) {
unsigned const MachineRegisterInfo * MRI
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const HexagonInstrInfo * TII
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
const MCPhysReg ArgGPRs[]
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt)
Return true if the function is being made into a tailcall target by changing its ABI.
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const M68kInstrInfo *TII, const CCValAssign &VA)
Return true if the given stack call argument is already available in the same position (relatively) o...
Machine Check Debug Module
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file defines ARC utility functions which are used by various parts of the compiler.
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static Function * getFunction(FunctionType *Ty, const Twine &Name, Module *M)
static bool is64Bit(const char *name)
static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)
Lowers masks values (v*i1) to the local register values.
static void Passv64i1ArgInRegs(const SDLoc &DL, SelectionDAG &DAG, SDValue &Arg, SmallVectorImpl< std::pair< Register, SDValue > > &RegsToPass, CCValAssign &VA, CCValAssign &NextVA, const X86Subtarget &Subtarget)
Breaks v64i1 value into two registers and adds the new node to the DAG.
static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA, SDValue &Root, SelectionDAG &DAG, const SDLoc &DL, const X86Subtarget &Subtarget, SDValue *InGlue=nullptr)
Reads two 32 bit registers and creates a 64 bit mask value.
static ArrayRef< MCPhysReg > get64BitArgumentXMMs(MachineFunction &MF, CallingConv::ID CallConv, const X86Subtarget &Subtarget)
static bool isSortedByValueNo(ArrayRef< CCValAssign > ArgLocs)
static ArrayRef< MCPhysReg > get64BitArgumentGPRs(CallingConv::ID CallConv, const X86Subtarget &Subtarget)
static SDValue getPopFromX87Reg(SelectionDAG &DAG, SDValue Chain, const SDLoc &dl, Register Reg, EVT VT, SDValue Glue)
static bool mayBeSRetTailCallCompatible(const TargetLowering::CallLoweringInfo &CLI, Register CallerSRetReg)
static std::pair< MVT, unsigned > handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC, const X86Subtarget &Subtarget)
static bool shouldDisableRetRegFromCSR(CallingConv::ID CC)
Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...
static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl, const char *Msg)
Call this when the user attempts to do something unsupported, like returning a double without SSE2 en...
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, unsigned SlotSize, int FPDiff, const SDLoc &dl)
Emit a store of the return address if tail call optimization is performed and it is required (FPDiff!...
static bool shouldDisableArgRegFromCSR(CallingConv::ID CC)
Returns true if a CC can dynamically exclude a register from the list of callee-saved-registers (Targ...
static bool hasStackGuardSlotTLS(const Triple &TargetTriple)
static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT, const EVT &ValLoc, const SDLoc &DL, SelectionDAG &DAG)
The function will lower a register of various sizes (8/16/32/64) to a mask value of the expected size...
static Constant * SegmentOffset(IRBuilderBase &IRB, int Offset, unsigned AddressSpace)
static bool hasCalleePopSRet(const SmallVectorImpl< T > &Args, const SmallVectorImpl< CCValAssign > &ArgLocs, const X86Subtarget &Subtarget)
Determines whether Args, either a set of outgoing arguments to a call, or a set of incoming args of a...
static bool isBitAligned(Align Alignment, uint64_t SizeInBits)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
const Function * getParent() const
Return the enclosing method, or null if none.
CCState - This class holds information needed while lowering arguments and return values.
static LLVM_ABI bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
void convertToReg(MCRegister Reg)
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
unsigned getValNo() const
CallingConv::ID getCallingConv() const
LLVM_ABI bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V, bool ImplicitTrunc=false)
Return a ConstantInt with the specified value for the specified type.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Diagnostic information for unsupported feature in backend.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalValue * getGlobal() const
Module * getParent()
Get the module that this global value is contained inside of...
void setDSOLocal(bool Local)
@ ExternalLinkage
Externally visible function.
Common base class shared among various IRBuilders.
BasicBlock * GetInsertBlock() const
LLVMContext & getContext() const
PointerType * getPtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Tracks which library functions to use for a particular subtarget.
This class is used to represent ISD::LOAD nodes.
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool is512BitVector() const
Return true if this is a 512-bit vector type.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
MVT getVectorElementType() const
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setObjectZExt(int ObjectIdx, bool IsZExt)
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setObjectSExt(int ObjectIdx, bool IsSExt)
bool isImmutableObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to an immutable object.
void setHasTailCall(bool V=true)
bool isObjectZExt(int ObjectIdx) const
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isObjectSExt(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getObjectIndexBegin() const
Return the minimum frame object index.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Representation of each machine instruction.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_LabelDifference64
EK_LabelDifference64 - Each entry is the address of the block minus the address of the jump table.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOStore
The memory access writes data.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void disableCalleeSavedRegister(MCRegister Reg)
Disables the register from the list of CSRs.
A Module instance is used to store all the information related to an LLVM module.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setCFIType(uint32_t Type)
iterator_range< user_iterator > users()
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
void addHeapAllocSite(const SDNode *Node, MDNode *MD)
Set HeapAllocSite to be associated with Node.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
const TargetMachine & getTarget() const
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
void addCallSiteInfo(const SDNode *Node, CallSiteInfo &&CallInfo)
Set CallSiteInfo to be associated with Node.
LLVMContext * getContext() const
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr bool empty() const
empty - Check if the string is empty.
Class to represent struct types.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
virtual Value * getIRStackGuard(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
virtual void insertSSPDeclarations(Module &M, const LibcallLoweringInfo &Libcalls) const
Inserts necessary declarations for SSP (stack protection) purpose.
virtual unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
static StringRef getLibcallImplName(RTLIB::LibcallImpl Call)
Get the libcall routine name for the specified libcall implementation.
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const
Returns the target-specific address of the unsafe stack pointer.
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
std::vector< ArgListEntry > ArgListTy
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
bool isPositionIndependent() const
virtual ArrayRef< MCPhysReg > getRoundingControlRegisters() const
Returns a 0 terminated array of rounding control registers that can be attached into strict FP call.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
unsigned EmitCallGraphSection
Emit section containing call graph metadata.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Triple - Helper class for working with autoconf configuration names.
bool isAndroid() const
Tests whether the target is Android.
bool isMusl() const
Tests whether the environment is musl-libc.
bool isOSGlibc() const
Tests whether the OS uses glibc.
The instances of the Type class are immutable: once they are created, they are never changed.
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
bool isX86_FP80Ty() const
Return true if this is x86 long double.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Value * getOperand(unsigned i) const
LLVM Value Representation.
void setBytesToPopOnReturn(unsigned bytes)
void setBPClobberedByCall(bool C)
void setFPClobberedByCall(bool C)
unsigned getVarArgsGPOffset() const
int getRegSaveFrameIndex() const
Register getSRetReturnReg() const
void setVarArgsGPOffset(unsigned Offset)
void setRegSaveFrameIndex(int Idx)
void setForceFramePointer(bool forceFP)
void setSRetReturnReg(Register Reg)
unsigned getVarArgsFPOffset() const
void setArgumentStackSize(unsigned size)
SmallVectorImpl< ForwardedRegister > & getForwardedMustTailRegParms()
void setTCReturnAddrDelta(int delta)
void setVarArgsFrameIndex(int Idx)
void setBPClobberedByInvoke(bool C)
void setFPClobberedByInvoke(bool C)
unsigned getBytesToPopOnReturn() const
void setVarArgsFPOffset(unsigned Offset)
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register getStackRegister() const
unsigned getSlotSize() const
Register getFramePtr() const
Returns physical register used as frame pointer.
Register getBaseRegister() const
const uint32_t * getNoPreservedMask() const override
const Triple & getTargetTriple() const
bool useAVX512Regs() const
bool isCallingConvWin64(CallingConv::ID CC) const
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isMemoryAccessFast(EVT VT, Align Alignment) const
Value * getIRStackGuard(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
bool useSoftFloat() const override
Value * getSafeStackPointerLocation(IRBuilderBase &IRB, const LibcallLoweringInfo &Libcalls) const override
Return true if the target stores SafeStack pointer at a fixed offset in some non-standard address spa...
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool isSafeMemOpType(MVT VT) const override
Returns true if it's safe to use load / store of the specified type to expand memcpy / memset inline.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
For some targets, an LLVM struct type must be broken down into multiple simple types,...
Align getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
Return the desired alignment for ByVal aggregate function arguments in the caller parameter area.
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Returns true if the target allows unaligned memory accesses of the specified type.
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
unsigned getVectorTypeBreakdownForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const override
Certain targets such as MIPS require that some types such as vectors are always broken down into scal...
void markLibCallAttributes(MachineFunction *MF, unsigned CC, ArgListTy &Args) const override
void insertSSPDeclarations(Module &M, const LibcallLoweringInfo &Libcalls) const override
Inserts necessary declarations for SSP (stack protection) purpose.
bool isScalarFPTypeInSSEReg(EVT VT) const
Return true if the specified scalar FP type is computed in an SSE register, not on the X87 floating p...
unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain targets require unusual breakdowns of certain types.
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
This function returns true if the memory access is aligned or if the target allows this specific unal...
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const
SDValue unwrapAddress(SDValue N) const override
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the value type to use for ISD::SETCC.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const override
For types supported by the target, this is an identity function.
const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, MCContext &Ctx) const override
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
CallingConv Namespace - This namespace contains an enum with a value for the well-known calling conve...
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ X86_64_SysV
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
@ HiPE
Used by the High-Performance Erlang Compiler (HiPE).
@ Swift
Calling convention for Swift.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ X86_INTR
x86 hardware interrupt context.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ X86_ThisCall
Similar to X86_StdCall.
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ X86_StdCall
stdcall is mostly used by the Win32 API.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ X86_VectorCall
MSVC calling convention that passes vectors and vector aggregates in SSE registers.
@ Intel_OCL_BI
Used for Intel OpenCL built-ins.
@ PreserveNone
Used for runtime calls that preserves none general registers.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ X86_RegCall
Register calling convention used for parameters transfer optimization.
@ C
The default llvm calling convention, compatible with C.
@ X86_FastCall
'fast' analog of X86_StdCall.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
LLVM_ABI LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
@ MO_NO_FLAG
MO_NO_FLAG - No flag for the operand.
@ RET_GLUE
Return with a glue operand.
@ IRET
Return from interrupt. Operand 0 is the number of bytes to pop.
@ CALL
These operations represent an abstract X86 call instruction, which includes a bunch of information.
@ GlobalBaseReg
On Darwin, this node represents the result of the popl at function entry, used for PIC code.
@ TC_RETURN
Tail call return.
@ NT_CALL
Same as call except it adds the NoTrack prefix.
@ MOVDQ2Q
Copies a 64-bit value from the low word of an XMM vector to an MMX vector.
@ POP_FROM_X87_REG
The same as ISD::CopyFromReg except that this node makes it explicit that it may lower to an x87 FPU ...
bool isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget, const MachineFunction &MF)
True if the target supports the extended frame for async Swift functions.
bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool GuaranteeTCO)
Determines whether the callee is required to pop its own arguments.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI bool isCFGuardCall(const CallBase *CB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI bool isCFGuardFunction(const GlobalValue *GV)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool is_sorted(R &&Range, Compare C)
Wrapper function around std::is_sorted to check if elements in a range R are sorted with respect to a...
LLVM_ABI EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
bool isFuncletEHPersonality(EHPersonality Pers)
Returns true if this is a personality function that invokes handler funclets (which must return to it...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool CC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
bool RetCC_X86(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, Type *OrigTy, CCState &State)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
static constexpr Align Constant()
Allow constructions of constexpr Align.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
EVT changeVectorElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
bool is512BitVector() const
Return true if this is a 512-bit vector type.
bool isVector() const
Return true if this is a vector value type.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Describes a register that needs to be forwarded from the prologue to a musttail call.
SmallVector< ArgRegPair, 1 > ArgRegPairs
Vector of call argument and its forwarding register.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
const ConstantInt * CFIType
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.