39#define DEBUG_TYPE "x86-fl"
41STATISTIC(NumFrameLoopProbe,
"Number of loop stack probes used in prologue");
43 "Number of extra stack probes generated in prologue");
51 STI(STI),
TII(*STI.getInstrInfo()),
TRI(STI.getRegisterInfo()) {
75 (
hasFP(MF) && !
TRI->hasStackRealignment(MF)) ||
110 return X86::SUB64ri8;
111 return X86::SUB64ri32;
114 return X86::SUB32ri8;
122 return X86::ADD64ri8;
123 return X86::ADD64ri32;
126 return X86::ADD32ri8;
132 return IsLP64 ? X86::SUB64rr : X86::SUB32rr;
136 return IsLP64 ? X86::ADD64rr : X86::ADD32rr;
142 return X86::AND64ri8;
143 return X86::AND64ri32;
146 return X86::AND32ri8;
151 return IsLP64 ? X86::LEA64r : X86::LEA32r;
157 return X86::MOV32ri64;
159 return X86::MOV64ri32;
167 unsigned Reg = RegMask.PhysReg;
169 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
170 Reg == X86::AH || Reg == X86::AL)
184 bool BreakNext =
false;
189 if (Reg != X86::EFLAGS)
210 if (Succ->isLiveIn(X86::EFLAGS))
221 int64_t NumBytes,
bool InEpilogue)
const {
222 bool isSub = NumBytes < 0;
236 if (EmitInlineStackProbe && !InEpilogue) {
242 }
else if (
Offset > Chunk) {
253 unsigned AddSubRROpc =
262 MI->getOperand(3).setIsDead();
264 }
else if (
Offset > 8 * Chunk) {
288 MI->getOperand(3).setIsDead();
310 ? (
Is64Bit ? X86::PUSH64r : X86::PUSH32r)
311 : (
Is64Bit ? X86::POP64r : X86::POP32r);
320 BuildStackAdjustment(
MBB,
MBBI,
DL, isSub ? -ThisVal : ThisVal, InEpilogue)
330 assert(
Offset != 0 &&
"zero offset stack adjustment requested");
348 if (UseLEA && !
STI.useLeaForSP())
353 "We shouldn't have allowed this insertion point");
370 MI->getOperand(3).setIsDead();
377 bool doMergeWithPrevious)
const {
396 if (doMergeWithPrevious && PI !=
MBB.
begin() && PI->isCFIInstruction())
399 unsigned Opc = PI->getOpcode();
402 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
403 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
404 PI->getOperand(0).getReg() ==
StackPtr){
406 Offset = PI->getOperand(2).getImm();
407 }
else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
408 PI->getOperand(0).getReg() ==
StackPtr &&
409 PI->getOperand(1).getReg() ==
StackPtr &&
410 PI->getOperand(2).getImm() == 1 &&
411 PI->getOperand(3).getReg() == X86::NoRegister &&
412 PI->getOperand(5).getReg() == X86::NoRegister) {
414 Offset = PI->getOperand(4).getImm();
415 }
else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
416 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
417 PI->getOperand(0).getReg() ==
StackPtr) {
419 Offset = -PI->getOperand(2).getImm();
424 if (PI !=
MBB.
end() && PI->isCFIInstruction()) {
431 if (!doMergeWithPrevious)
464 unsigned DwarfReg =
MRI->getDwarfRegNum(MachineFramePtr,
true);
487 unsigned DwarfReg =
MRI->getDwarfRegNum(Reg,
true);
499void X86FrameLowering::emitZeroCallUsedRegs(
BitVector RegsToZero,
515 if (!X86::RFP80RegClass.
contains(Reg))
518 unsigned NumFPRegs = ST.is64Bit() ? 8 : 7;
519 for (
unsigned i = 0; i != NumFPRegs; ++i)
522 for (
unsigned i = 0; i != NumFPRegs; ++i)
530 if (
TRI->isGeneralPurposeRegister(MF, Reg)) {
532 RegsToZero.
reset(Reg);
542 if (
ST.hasMMX() && X86::VR64RegClass.contains(Reg))
547 if (X86::VR128RegClass.
contains(Reg)) {
552 }
else if (X86::VR256RegClass.
contains(Reg)) {
556 XorOp = X86::VPXORrr;
557 }
else if (X86::VR512RegClass.
contains(Reg)) {
561 XorOp = X86::VPXORYrr;
562 }
else if (X86::VK1RegClass.
contains(Reg) ||
569 XorOp =
ST.hasBWI() ? X86::KXORQrr : X86::KXORWrr;
583 std::optional<MachineFunction::DebugInstrOperandPair> InstrNum)
const {
590 emitStackProbeInline(MF,
MBB,
MBBI,
DL,
false);
593 emitStackProbeCall(MF,
MBB,
MBBI,
DL, InProlog, InstrNum);
604 return MI.getOpcode() == X86::STACKALLOC_W_PROBING;
606 if (Where != PrologMBB.
end()) {
608 emitStackProbeInline(MF, PrologMBB, Where,
DL,
true);
609 Where->eraseFromParent();
617 bool InProlog)
const {
620 emitStackProbeInlineWindowsCoreCLR64(MF,
MBB,
MBBI,
DL, InProlog);
622 emitStackProbeInlineGeneric(MF,
MBB,
MBBI,
DL, InProlog);
625void X86FrameLowering::emitStackProbeInlineGeneric(
634 "different expansion expected for CoreCLR 64 bit");
636 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
637 uint64_t ProbeChunk = StackProbeSize * 8;
640 TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0;
645 if (
Offset > ProbeChunk) {
647 MaxAlign % StackProbeSize);
650 MaxAlign % StackProbeSize);
654void X86FrameLowering::emitStackProbeInlineGenericBlock(
659 const bool NeedsDwarfCFI = needsDwarfCFI(MF);
663 const unsigned MovMIOpc =
Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
664 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
668 assert(AlignOffset < StackProbeSize);
671 if (StackProbeSize <
Offset + AlignOffset) {
674 BuildStackAdjustment(
MBB,
MBBI,
DL, -StackAdjustment,
false)
676 if (!HasFP && NeedsDwarfCFI) {
687 NumFrameExtraProbe++;
688 CurrentOffset = StackProbeSize - AlignOffset;
694 while (CurrentOffset + StackProbeSize <
Offset) {
695 BuildStackAdjustment(
MBB,
MBBI,
DL, -StackProbeSize,
false)
698 if (!HasFP && NeedsDwarfCFI) {
708 NumFrameExtraProbe++;
709 CurrentOffset += StackProbeSize;
718 unsigned Opc =
Is64Bit ? X86::PUSH64r : X86::PUSH32r;
723 BuildStackAdjustment(
MBB,
MBBI,
DL, -ChunkSize,
false)
730void X86FrameLowering::emitStackProbeInlineGenericLoop(
738 "Inline stack probe loop will clobber live EFLAGS.");
740 const bool NeedsDwarfCFI = needsDwarfCFI(MF);
744 const unsigned MovMIOpc =
Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
745 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
748 if (AlignOffset < StackProbeSize) {
750 BuildStackAdjustment(
MBB,
MBBI,
DL, -AlignOffset,
false)
758 NumFrameExtraProbe++;
771 MF.
insert(MBBIter, testMBB);
772 MF.
insert(MBBIter, tailMBB);
793 if (!HasFP && NeedsDwarfCFI) {
796 const Register DwarfFinalStackProbed =
803 nullptr,
TRI->getDwarfRegNum(DwarfFinalStackProbed,
true)));
810 BuildStackAdjustment(*testMBB, testMBB->
end(),
DL, -StackProbeSize,
844 BuildStackAdjustment(*tailMBB, TailMBBIter,
DL, -TailOffset,
850 if (!HasFP && NeedsDwarfCFI) {
860 nullptr,
TRI->getDwarfRegNum(DwarfStackPtr,
true)));
868void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
872 assert(
STI.is64Bit() &&
"different expansion needed for 32 bit");
879 "Inline stack probe loop will clobber live EFLAGS.");
914 MF.
insert(MBBIter, RoundMBB);
915 MF.
insert(MBBIter, LoopMBB);
916 MF.
insert(MBBIter, ContinueMBB);
924 const int64_t ThreadEnvironmentStackLimit = 0x10;
926 const int64_t PageMask = ~(
PageSize - 1);
932 const Register SizeReg = InProlog ? X86::RAX
933 :
MRI.createVirtualRegister(RegClass),
934 ZeroReg = InProlog ? X86::RCX
935 :
MRI.createVirtualRegister(RegClass),
936 CopyReg = InProlog ? X86::RDX
937 :
MRI.createVirtualRegister(RegClass),
938 TestReg = InProlog ? X86::RDX
939 :
MRI.createVirtualRegister(RegClass),
940 FinalReg = InProlog ? X86::RDX
941 :
MRI.createVirtualRegister(RegClass),
942 RoundedReg = InProlog ? X86::RDX
943 :
MRI.createVirtualRegister(RegClass),
944 LimitReg = InProlog ? X86::RCX
945 :
MRI.createVirtualRegister(RegClass),
946 JoinReg = InProlog ? X86::RCX
947 :
MRI.createVirtualRegister(RegClass),
948 ProbeReg = InProlog ? X86::RCX
949 :
MRI.createVirtualRegister(RegClass);
952 int64_t RCXShadowSlot = 0;
953 int64_t RDXShadowSlot = 0;
969 int64_t InitSlot = 8 + CalleeSaveSize + (
HasFP ? 8 : 0);
973 RCXShadowSlot = InitSlot;
975 RDXShadowSlot = InitSlot;
976 if (IsRDXLiveIn && IsRCXLiveIn)
1017 .
addImm(ThreadEnvironmentStackLimit)
1025 BuildMI(RoundMBB,
DL,
TII.get(X86::AND64ri32), RoundedReg)
1066 TII.get(X86::MOV64rm), X86::RCX),
1067 X86::RSP,
false, RCXShadowSlot);
1070 TII.get(X86::MOV64rm), X86::RDX),
1071 X86::RSP,
false, RDXShadowSlot);
1077 BuildMI(*ContinueMBB, ContinueMBBI,
DL,
TII.get(X86::SUB64rr), X86::RSP)
1090 for (++BeforeMBBI; BeforeMBBI !=
MBB.
end(); ++BeforeMBBI) {
1106void X86FrameLowering::emitStackProbeCall(
1109 std::optional<MachineFunction::DebugInstrOperandPair> InstrNum)
const {
1115 "code model and indirect thunks not yet implemented.");
1119 "Stack probe calls will clobber live EFLAGS.");
1123 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
1125 CallOp = X86::CALLpcrel32;
1186 for (++ExpansionMBBI; ExpansionMBBI !=
MBBI; ++ExpansionMBBI)
1194 const uint64_t Win64MaxSEHOffset = 128;
1195 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
1197 return SEHFrameOffset & -16;
1210 MaxAlign = (
StackAlign > MaxAlign) ? StackAlign : MaxAlign;
1214 return MaxAlign.
value();
1227 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
1228 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
1233 if (Reg ==
StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) {
1235 NumFrameLoopProbe++;
1246 MF.
insert(MBBIter, entryMBB);
1247 MF.
insert(MBBIter, headMBB);
1248 MF.
insert(MBBIter, bodyMBB);
1249 MF.
insert(MBBIter, footMBB);
1250 const unsigned MovMIOpc =
Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
1259 BuildMI(entryMBB,
DL,
TII.get(TargetOpcode::COPY), FinalStackProbed)
1263 BuildMI(entryMBB,
DL,
TII.get(AndOp), FinalStackProbed)
1264 .
addReg(FinalStackProbed)
1269 MI->getOperand(3).setIsDead();
1273 .
addReg(FinalStackProbed)
1287 const unsigned SUBOpc =
1297 .
addReg(FinalStackProbed)
1318 const unsigned SUBOpc =
1328 .
addReg(FinalStackProbed)
1344 .
addReg(FinalStackProbed)
1366 MI->getOperand(3).setIsDead();
1374 "MF used frame lowering for wrong subtarget");
1383bool X86FrameLowering::isWin64Prologue(
const MachineFunction &MF)
const {
1387bool X86FrameLowering::needsDwarfCFI(
const MachineFunction &MF)
const {
1479 "MF used frame lowering for wrong subtarget");
1485 uint64_t MaxAlign = calculateMaxStackAlign(MF);
1491 bool FnHasClrFunclet =
1493 bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
1494 bool HasFP =
hasFP(MF);
1495 bool IsWin64Prologue = isWin64Prologue(MF);
1500 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
1501 bool NeedsDwarfCFI = needsDwarfCFI(MF);
1507 bool HasWinCFI =
false;
1516 if (TailCallArgReserveSize && IsWin64Prologue)
1519 const bool EmitStackProbeCall =
1536 .
addUse(X86::NoRegister);
1571 !EmitStackProbeCall &&
1578 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
1585 if (TailCallArgReserveSize != 0) {
1586 BuildStackAdjustment(
MBB,
MBBI,
DL, -(
int)TailCallArgReserveSize,
1609 Register Establisher = X86::NoRegister;
1615 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {
1635 NumBytes = FrameSize -
1639 if (
TRI->hasStackRealignment(MF) && !IsWin64Prologue)
1640 NumBytes =
alignTo(NumBytes, MaxAlign);
1647 if (NeedsDwarfCFI) {
1656 unsigned DwarfFramePtr =
TRI->getDwarfRegNum(MachineFramePtr,
true);
1677 if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) {
1712 if (!IsWin64Prologue && !IsFunclet) {
1721 if (NeedsDwarfCFI) {
1724 unsigned DwarfFramePtr =
TRI->getDwarfRegNum(MachineFramePtr,
true);
1742 assert(!IsFunclet &&
"funclets without FPs not yet implemented");
1743 NumBytes = StackSize -
1750 if (HasFP &&
TRI->hasStackRealignment(MF))
1758 unsigned ParentFrameNumBytes = NumBytes;
1760 NumBytes = getWinEHFuncletFrameSize(MF);
1763 bool PushedRegs =
false;
1768 (
MBBI->getOpcode() == X86::PUSH32r ||
1769 MBBI->getOpcode() == X86::PUSH64r)) {
1774 if (!HasFP && NeedsDwarfCFI) {
1795 if (!IsWin64Prologue && !IsFunclet &&
TRI->hasStackRealignment(MF)) {
1796 assert(HasFP &&
"There should be a frame pointer if stack is realigned.");
1822 uint64_t AlignedNumBytes = NumBytes;
1823 if (IsWin64Prologue && !IsFunclet &&
TRI->hasStackRealignment(MF))
1824 AlignedNumBytes =
alignTo(AlignedNumBytes, MaxAlign);
1825 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) {
1827 "The Red Zone is not accounted for in stack probes");
1849 int64_t
Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;
1857 .
addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
1876 }
else if (NumBytes) {
1880 if (NeedsWinCFI && NumBytes) {
1887 int SEHFrameOffset = 0;
1888 unsigned SPOrEstablisher;
1895 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1899 Establisher,
false, PSPSlotOffset)
1906 false, PSPSlotOffset)
1913 SPOrEstablisher = Establisher;
1918 if (IsWin64Prologue && HasFP) {
1925 SPOrEstablisher,
false, SEHFrameOffset);
1928 .
addReg(SPOrEstablisher);
1931 if (NeedsWinCFI && !IsFunclet) {
1932 assert(!NeedsWinFPO &&
"this setframe incompatible with FPO data");
1941 }
else if (IsFunclet &&
STI.is32Bit()) {
1965 if (X86::FR64RegClass.
contains(Reg)) {
1968 if (IsWin64Prologue && IsFunclet)
1976 assert(!NeedsWinFPO &&
"SEH_SaveXMM incompatible with FPO data");
1986 if (NeedsWinCFI && HasWinCFI)
1990 if (FnHasClrFunclet && !IsFunclet) {
1994 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
2008 if (IsWin64Prologue &&
TRI->hasStackRealignment(MF)) {
2009 assert(HasFP &&
"There should be a frame pointer if stack is realigned.");
2010 BuildStackAlignAND(
MBB,
MBBI,
DL, SPOrEstablisher, MaxAlign);
2014 if (IsFunclet &&
STI.is32Bit())
2047 assert(UsedReg == BasePtr);
2054 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
2056 if (!HasFP && NumBytes) {
2097 switch (
MI.getOpcode()) {
2099 case X86::CLEANUPRET:
2121X86FrameLowering::getPSPSlotOffsetFromSP(
const MachineFunction &MF)
const {
2128 return static_cast<unsigned>(
Offset);
2132X86FrameLowering::getWinEHFuncletFrameSize(
const MachineFunction &MF)
const {
2138 unsigned XMMSize = WinEHXMMSlotInfo.
size() *
2139 TRI->getSpillSize(X86::VR128RegClass);
2148 UsedSize = getPSPSlotOffsetFromSP(MF) +
SlotSize;
2159 return FrameSizeMinusRBP + XMMSize - CSSize;
2163 return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
2164 Opc == X86::TCRETURNmi ||
2165 Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 ||
2166 Opc == X86::TCRETURNmi64;
2177 DL =
MBBI->getDebugLoc();
2185 bool NeedsWin64CFI =
2191 uint64_t MaxAlign = calculateMaxStackAlign(MF);
2194 bool HasFP =
hasFP(MF);
2202 assert(HasFP &&
"EH funclets without FP not yet implemented");
2203 NumBytes = getWinEHFuncletFrameSize(MF);
2207 NumBytes = FrameSize - CSSize - TailCallArgReserveSize;
2211 if (
TRI->hasStackRealignment(MF) && !IsWin64Prologue)
2212 NumBytes =
alignTo(FrameSize, MaxAlign);
2214 NumBytes = StackSize - CSSize - TailCallArgReserveSize;
2216 uint64_t SEHStackAllocAmt = NumBytes;
2241 if (NeedsDwarfCFI) {
2242 unsigned DwarfStackPtr =
2243 TRI->getDwarfRegNum(
Is64Bit ? X86::RSP : X86::ESP,
true);
2248 unsigned DwarfFramePtr =
TRI->getDwarfRegNum(MachineFramePtr,
true);
2263 unsigned Opc = PI->getOpcode();
2265 if (Opc != X86::DBG_VALUE && !PI->isTerminator()) {
2278 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)
2279 emitCatchRetReturnValue(
MBB, FirstCSPop, &*Terminator);
2282 DL =
MBBI->getDebugLoc();
2294 if (
TRI->hasStackRealignment(MF))
2298 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
2310 if (LEAAmount != 0) {
2321 }
else if (NumBytes) {
2324 if (!HasFP && NeedsDwarfCFI) {
2328 nullptr, CSSize + TailCallArgReserveSize +
SlotSize),
2343 if (!HasFP && NeedsDwarfCFI) {
2350 unsigned Opc = PI->getOpcode();
2352 if (Opc == X86::POP32r || Opc == X86::POP64r) {
2370 assert(
Offset >= 0 &&
"TCDelta should never be positive");
2394 else if (
TRI->hasStackRealignment(MF))
2408 int64_t FPDelta = 0;
2419 if (IsWin64Prologue) {
2427 uint64_t NumBytes = FrameSize - CSSize;
2437 FPDelta = FrameSize - SEHFrameOffset;
2439 "FPDelta isn't aligned per the Win64 ABI!");
2451 if (TailCallReturnAddrDelta < 0)
2452 Offset -= TailCallReturnAddrDelta;
2470 const auto it = WinEHXMMSlotInfo.find(FI);
2472 if (it == WinEHXMMSlotInfo.end())
2483 int Adjustment)
const {
2493 bool IgnoreSPUpdates)
const {
2543 "we don't handle this case!");
2575 std::vector<CalleeSavedInfo> &CSI)
const {
2579 unsigned CalleeSavedFrameSize = 0;
2580 unsigned XMMCalleeSavedFrameSize = 0;
2586 if (TailCallReturnAddrDelta < 0) {
2597 TailCallReturnAddrDelta -
SlotSize,
true);
2601 if (this->TRI->hasBasePointer(MF)) {
2627 for (
unsigned i = 0; i < CSI.size(); ++i) {
2628 if (
TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
2629 CSI.erase(CSI.begin() + i);
2660 if (X86::VK16RegClass.
contains(Reg))
2664 unsigned Size =
TRI->getSpillSize(*RC);
2665 Align Alignment =
TRI->getSpillAlign(*RC);
2667 assert(SpillSlotOffset < 0 &&
"SpillSlotOffset should always < 0 on X86");
2668 SpillSlotOffset = -
alignTo(-SpillSlotOffset, Alignment);
2671 SpillSlotOffset -=
Size;
2677 if (X86::VR128RegClass.
contains(Reg)) {
2678 WinEHXMMSlotInfo[
SlotIndex] = XMMCalleeSavedFrameSize;
2679 XMMCalleeSavedFrameSize +=
Size;
2698 unsigned Opc =
STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
2706 bool isLiveIn =
MRI.isLiveIn(Reg);
2711 bool CanKill = !isLiveIn;
2715 if (
MRI.isLiveIn(*AReg)) {
2740 if (X86::VK16RegClass.
contains(Reg))
2763 "SEH should not use CATCHRET");
2768 if (
STI.is64Bit()) {
2800 if (
MI->getOpcode() == X86::CATCHRET) {
2814 if (X86::GR64RegClass.
contains(Reg) ||
2820 if (X86::VK16RegClass.
contains(Reg))
2829 unsigned Opc =
STI.is64Bit() ? X86::POP64r : X86::POP32r;
2832 if (!X86::GR64RegClass.
contains(Reg) &&
2852 SavedRegs.
set(BasePtr);
2861 if (
I->hasNestAttr() && !
I->use_empty())
2878 return Primary ? X86::R14 : X86::R13;
2880 return Primary ? X86::EBX : X86::EDI;
2885 return Primary ? X86::R11 : X86::R12;
2887 return Primary ? X86::R11D : X86::R12D;
2897 "nested function.");
2898 return Primary ? X86::EAX : X86::ECX;
2901 return Primary ? X86::EDX : X86::EAX;
2902 return Primary ? X86::ECX : X86::EAX;
2913 unsigned TlsReg, TlsOffset;
2918 assert(&(*MF.
begin()) == &PrologueMBB &&
"Shrink-wrapping not supported yet");
2922 "Scratch register is live-in");
2942 bool IsNested =
false;
2951 for (
const auto &LI : PrologueMBB.
liveins()) {
2970 TlsOffset =
IsLP64 ? 0x70 : 0x40;
2973 TlsOffset = 0x60 + 90*8;
2987 if (CompareStackPointer)
2988 ScratchReg =
IsLP64 ? X86::RSP : X86::ESP;
3001 TlsOffset = 0x48 + 90*4;
3014 if (CompareStackPointer)
3015 ScratchReg = X86::ESP;
3027 unsigned ScratchReg2;
3029 if (CompareStackPointer) {
3032 SaveScratch2 =
false;
3044 "Scratch register is live-in and not saved");
3050 BuildMI(checkMBB,
DL,
TII.get(X86::MOV32ri), ScratchReg2)
3059 BuildMI(checkMBB,
DL,
TII.get(X86::POP32r), ScratchReg2);
3073 const unsigned RegAX =
IsLP64 ? X86::RAX : X86::EAX;
3074 const unsigned Reg10 =
IsLP64 ? X86::R10 : X86::R10D;
3075 const unsigned Reg11 =
IsLP64 ? X86::R11 : X86::R11D;
3076 const unsigned MOVrr =
IsLP64 ? X86::MOV64rr : X86::MOV32rr;
3113 "code model and thunks not yet implemented.");
3130 BuildMI(allocMBB,
DL,
TII.get(X86::MORESTACK_RET_RESTORE_R10));
3139#ifdef EXPENSIVE_CHECKS
3150 for (
int i = 0, e = HiPELiteralsMD->
getNumOperands(); i != e; ++i) {
3152 if (
Node->getNumOperands() != 2)
continue;
3153 MDString *NodeName = dyn_cast<MDString>(
Node->getOperand(0));
3155 if (!NodeName || !NodeVal)
continue;
3157 if (ValConst && NodeName->
getString() == LiteralName) {
3163 +
" required but not provided");
3174 return MI.isMetaInstruction();
3200 assert(&(*MF.
begin()) == &PrologueMBB &&
"Shrink-wrapping not supported yet");
3205 if (!HiPELiteralsMD)
3207 "Can't generate HiPE prologue without runtime parameters");
3208 const unsigned HipeLeafWords
3210 Is64Bit ?
"AMD64_LEAF_WORDS" :
"X86_LEAF_WORDS");
3211 const unsigned CCRegisteredArgs =
Is64Bit ? 6 : 5;
3212 const unsigned Guaranteed = HipeLeafWords *
SlotSize;
3218 "HiPE prologue is only supported on Linux operating systems.");
3228 unsigned MoreStackForCalls = 0;
3230 for (
auto &
MBB : MF) {
3231 for (
auto &
MI :
MBB) {
3251 if (
F->getName().contains(
"erlang.") ||
F->getName().contains(
"bif_") ||
3255 unsigned CalleeStkArity =
3256 F->arg_size() > CCRegisteredArgs ?
F->arg_size()-CCRegisteredArgs : 0;
3257 if (HipeLeafWords - 1 > CalleeStkArity)
3258 MoreStackForCalls = std::max(MoreStackForCalls,
3259 (HipeLeafWords - 1 - CalleeStkArity) *
SlotSize);
3262 MaxStack += MoreStackForCalls;
3267 if (MaxStack > Guaranteed) {
3271 for (
const auto &LI : PrologueMBB.
liveins()) {
3279 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
3280 unsigned LEAop, CMPop, CALLop;
3285 LEAop = X86::LEA64r;
3286 CMPop = X86::CMP64rm;
3287 CALLop = X86::CALL64pcrel32;
3291 LEAop = X86::LEA32r;
3292 CMPop = X86::CMP32rm;
3293 CALLop = X86::CALLpcrel32;
3298 "HiPE prologue scratch register is live-in");
3302 SPReg,
false, -MaxStack);
3305 .
addReg(ScratchReg), PReg,
false, SPLimitOffset);
3310 addExternalSymbol(
"inc_stack_0");
3312 SPReg,
false, -MaxStack);
3314 .
addReg(ScratchReg), PReg,
false, SPLimitOffset);
3322#ifdef EXPENSIVE_CHECKS
3339 if (NumPops != 1 && NumPops != 2)
3347 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
3351 unsigned FoundRegs = 0;
3357 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
3359 for (
auto Candidate : RegClass) {
3367 if (
MRI.isReserved(Candidate))
3372 if (MO.isReg() && MO.isDef() &&
3373 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
3382 Regs[FoundRegs++] = Candidate;
3383 if (FoundRegs == (
unsigned)NumPops)
3391 while (FoundRegs < (
unsigned)NumPops)
3392 Regs[FoundRegs++] = Regs[0];
3394 for (
int i = 0; i < NumPops; ++i)
3396 TII.get(
STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]);
3405 unsigned Opcode =
I->getOpcode();
3406 bool isDestroy = Opcode ==
TII.getCallFrameDestroyOpcode();
3419 if (!reserveCallFrame) {
3440 bool HasDwarfEHHandlers = !WindowsCFI && !MF.
getLandingPads().empty();
3442 if (HasDwarfEHHandlers && !isDestroy &&
3452 Amount -= InternalAmt;
3462 int64_t StackAdjustment = isDestroy ? Amount : -Amount;
3464 if (StackAdjustment) {
3471 if (StackAdjustment) {
3472 if (!(
F.hasMinSize() &&
3473 adjustStackWithPops(
MBB, InsertPos,
DL, StackAdjustment)))
3474 BuildStackAdjustment(
MBB, InsertPos,
DL, StackAdjustment,
3487 int64_t CfaAdjustment = -StackAdjustment;
3490 if (CfaAdjustment) {
3503 while (CI !=
B && !std::prev(CI)->isCall())
3505 BuildStackAdjustment(
MBB, CI,
DL, -InternalAmt,
false);
3521 if (TLI.hasInlineStackProbe(MF) || TLI.hasStackProbeSymbol(MF))
3557 bool CompactUnwind =
3577 "restoring EBP/ESI on non-32-bit target");
3589 int EHRegSize = MFI.getObjectSize(FI);
3594 X86::EBP,
true, -EHRegSize)
3600 int EndOffset = -EHRegOffset - EHRegSize;
3613 "end of registration object above normal EBP position!");
3614 }
else if (UsedReg == BasePtr) {
3624 assert(UsedReg == BasePtr);
3645struct X86FrameSortingObject {
3646 bool IsValid =
false;
3647 unsigned ObjectIndex = 0;
3648 unsigned ObjectSize = 0;
3650 unsigned ObjectNumUses = 0;
3666struct X86FrameSortingComparator {
3667 inline bool operator()(
const X86FrameSortingObject &
A,
3668 const X86FrameSortingObject &
B)
const {
3669 uint64_t DensityAScaled, DensityBScaled;
3689 DensityAScaled =
static_cast<uint64_t>(
A.ObjectNumUses) *
3691 DensityBScaled =
static_cast<uint64_t>(
B.ObjectNumUses) *
3702 if (DensityAScaled == DensityBScaled)
3703 return A.ObjectAlignment <
B.ObjectAlignment;
3705 return DensityAScaled < DensityBScaled;
3719 if (ObjectsToAllocate.
empty())
3731 for (
auto &Obj : ObjectsToAllocate) {
3732 SortingObjects[Obj].IsValid =
true;
3733 SortingObjects[Obj].ObjectIndex = Obj;
3737 if (ObjectSize == 0)
3739 SortingObjects[Obj].ObjectSize = 4;
3741 SortingObjects[Obj].ObjectSize = ObjectSize;
3745 for (
auto &
MBB : MF) {
3746 for (
auto &
MI :
MBB) {
3747 if (
MI.isDebugInstr())
3753 int Index = MO.getIndex();
3757 SortingObjects[
Index].IsValid)
3758 SortingObjects[
Index].ObjectNumUses++;
3773 for (
auto &Obj : SortingObjects) {
3777 ObjectsToAllocate[i++] = Obj.ObjectIndex;
3781 if (!
TRI->hasStackRealignment(MF) &&
hasFP(MF))
3782 std::reverse(ObjectsToAllocate.
begin(), ObjectsToAllocate.
end());
3794 Offset += getWinEHFuncletFrameSize(MF);
3814 adjustFrameForMsvcCxxEh(MF);
3818void X86FrameLowering::adjustFrameForMsvcCxxEh(
MachineFunction &MF)
const {
3826 int64_t MinFixedObjOffset = -
SlotSize;
3832 int FrameIndex =
H.CatchObj.FrameIndex;
3833 if (FrameIndex != INT_MAX) {
3836 MinFixedObjOffset -= std::abs(MinFixedObjOffset) %
Align;
3844 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
3845 int64_t UnwindHelpOffset = MinFixedObjOffset -
SlotSize;
unsigned const MachineRegisterInfo * MRI
static bool isFuncletReturnInstr(const MachineInstr &MI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const uint64_t kSplitStackAvailable
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Given that RA is a live value
const HexagonInstrInfo * TII
static cl::opt< int > PageSize("imp-null-check-page-size", cl::desc("The page size of the target in bytes"), cl::init(4096), cl::Hidden)
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static bool isTailCallOpcode(unsigned Opc)
unsigned const TargetRegisterInfo * TRI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static bool is64Bit(const char *name)
static unsigned calculateSetFPREG(uint64_t SPAdjust)
static unsigned GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary)
GetScratchRegister - Get a temp register for performing work in the segmented stack and the Erlang/Hi...
static unsigned getMOVriOpcode(bool Use64BitReg, int64_t Imm)
static unsigned getLEArOpcode(bool IsLP64)
static unsigned getADDriOpcode(bool IsLP64, int64_t Imm)
static bool flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB)
Check if the flags need to be preserved before the terminators.
static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm)
static unsigned getANDriOpcode(bool IsLP64, int64_t Imm)
static bool isEAXLiveIn(MachineBasicBlock &MBB)
static unsigned getADDrrOpcode(bool IsLP64)
static bool HasNestArgument(const MachineFunction *MF)
static unsigned getHiPELiteral(NamedMDNode *HiPELiteralsMD, const StringRef LiteralName)
Lookup an ERTS parameter in the !hipe.literals named metadata node.
static bool blockEndIsUnreachable(const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI)
static unsigned getSUBrrOpcode(bool IsLP64)
static const unsigned FramePtr
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
bool empty() const
empty - Check if the array is empty.
LLVM Basic Block Representation.
iterator_range< const_set_bits_iterator > set_bits() const
static BranchProbability getOne()
static BranchProbability getZero()
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
AttributeList getAttributes() const
Return the attribute list for this Function.
bool needsUnwindTableEntry() const
True if this function needs an unwind table.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool usesWindowsCFI() const
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int Offset)
.cfi_def_cfa_offset modifies a rule for computing CFA.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register)
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction createGnuArgsSize(MCSymbol *L, int Size)
A special wrapper for .cfi_escape that indicates GNU_ARGS_SIZE.
static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register)
.cfi_def_cfa_register modifies a rule for computing CFA.
OpType getOperation() const
static MCCFIInstruction createAdjustCfaOffset(MCSymbol *L, int Adjustment)
.cfi_adjust_cfa_offset Same as .cfi_def_cfa_offset, but Offset is a relative value that is added/subt...
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int Offset)
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int Offset)
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
const MCObjectFileInfo * getObjectFileInfo() const
const MCRegisterInfo * getRegisterInfo() const
MCSection * getCompactUnwindSection() const
MCRegAliasIterator enumerates all registers aliasing Reg.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
StringRef getString() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
bool isEHPad() const
Returns true if the block is a landing pad.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator_range< livein_iterator > liveins() const
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI, MCRegister Reg, const_iterator Before, unsigned Neighborhood=10) const
Return whether (physical) register Reg has been defined and not killed as of just before Before.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any DBG_VALUE and DBG_LABEL instructions.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< iterator > terminators()
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
@ LQR_Live
Register is known to be (at least partially) live.
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
bool isCleanupFuncletEntry() const
Returns true if this is the entry block of a cleanup funclet.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool needsSplitStackProlog() const
Return true if this function requires a split stack prolog, even if it uses no stack space.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
bool hasCalls() const
Return true if the current function has any function calls.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
bool hasPatchPoint() const
This method may be called any time after instruction selection is complete to determine if there is a...
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
void setCVBytesOfCalleeSavedRegisters(unsigned S)
int CreateSpillStackObject(uint64_t Size, Align Alignment)
Create a new statically sized stack object that represents a spill slot, returning a nonnegative iden...
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
void setOffsetAdjustment(int Adj)
Set the correction for frame offsets.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool hasStackMap() const
This method may be called any time after instruction selection is complete to determine if there is a...
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
bool hasCopyImplyingStackAdjustment() const
Returns true if the function contains operations which will lower down to instructions which manipula...
bool hasStackObjects() const
Return true if there are any stack objects in this function.
int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)
Create a spill slot at a fixed location on the stack.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setStackSize(uint64_t Size)
Set the size of the stack.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getObjectIndexBegin() const
Return the minimum frame object index.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const WinEHFuncInfo * getWinEHFuncInfo() const
getWinEHFuncInfo - Return information about how the current function uses Windows exception handling.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
unsigned addFrameInst(const MCCFIInstruction &Inst)
void setHasWinCFI(bool v)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const std::vector< MCCFIInstruction > & getFrameInstructions() const
Returns a reference to a list of cfi instructions in the function's prologue.
void makeDebugValueSubstitution(DebugInstrOperandPair, DebugInstrOperandPair, unsigned SubReg=0)
Create a substitution between one <instr,operand> value to a different, new value.
bool needsFrameMoves() const
True if this function needs frame moves for debug or exceptions.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
bool callsUnwindInit() const
void push_front(MachineBasicBlock *MBB)
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
bool callsEHReturn() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const std::vector< LandingPadInfo > & getLandingPads() const
Return a reference to the landing pad info for the current function.
MachineModuleInfo & getMMI() const
bool shouldSplitStack() const
Should we be emitting segmented stack stuff for the function.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineBasicBlock & front() const
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use.
bool hasEHFunclets() const
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getNumOperands() const
Retuns the total number of operands.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
unsigned getDebugInstrNum()
Fetch the instruction number of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
const MCContext & getContext() const
const Module * getModule() const
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
bool isLiveIn(Register Reg) const
NamedMDNode * getNamedMetadata(const Twine &Name) const
Return the first NamedMDNode in the module with the specified name.
unsigned getCodeViewFlag() const
Returns the CodeView Version by checking module flags.
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
MDNode * getOperand(unsigned i) const
unsigned getNumOperands() const
Wrapper class representing virtual and physical registers.
SlotIndex - An opaque wrapper around machine indexes.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
StackOffset holds a fixed and a scalable offset in bytes.