LLVM  mainline
X86FrameLowering.cpp
Go to the documentation of this file.
00001 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file contains the X86 implementation of TargetFrameLowering class.
00011 //
00012 //===----------------------------------------------------------------------===//
00013 
00014 #include "X86FrameLowering.h"
00015 #include "X86InstrBuilder.h"
00016 #include "X86InstrInfo.h"
00017 #include "X86MachineFunctionInfo.h"
00018 #include "X86Subtarget.h"
00019 #include "X86TargetMachine.h"
00020 #include "llvm/ADT/SmallSet.h"
00021 #include "llvm/CodeGen/MachineFrameInfo.h"
00022 #include "llvm/CodeGen/MachineFunction.h"
00023 #include "llvm/CodeGen/MachineInstrBuilder.h"
00024 #include "llvm/CodeGen/MachineModuleInfo.h"
00025 #include "llvm/CodeGen/MachineRegisterInfo.h"
00026 #include "llvm/IR/DataLayout.h"
00027 #include "llvm/IR/Function.h"
00028 #include "llvm/MC/MCAsmInfo.h"
00029 #include "llvm/MC/MCSymbol.h"
00030 #include "llvm/Support/CommandLine.h"
00031 #include "llvm/Target/TargetOptions.h"
00032 #include "llvm/Support/Debug.h"
00033 #include <cstdlib>
00034 
00035 using namespace llvm;
00036 
00037 // FIXME: completely move here.
00038 extern cl::opt<bool> ForceStackAlign;
00039 
00040 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
00041                                    unsigned StackAlignOverride)
00042     : TargetFrameLowering(StackGrowsDown, StackAlignOverride,
00043                           STI.is64Bit() ? -8 : -4),
00044       STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
00045   // Cache a bunch of frame-related predicates for this subtarget.
00046   SlotSize = TRI->getSlotSize();
00047   Is64Bit = STI.is64Bit();
00048   IsLP64 = STI.isTarget64BitLP64();
00049   // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
00050   Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
00051   StackPtr = TRI->getStackRegister();
00052 }
00053 
00054 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
00055   return !MF.getFrameInfo()->hasVarSizedObjects() &&
00056          !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
00057 }
00058 
00059 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the
00060 /// call frame pseudos can be simplified.  Having a FP, as in the default
00061 /// implementation, is not sufficient here since we can't always use it.
00062 /// Use a more nuanced condition.
00063 bool
00064 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
00065   return hasReservedCallFrame(MF) ||
00066          (hasFP(MF) && !TRI->needsStackRealignment(MF)) ||
00067          TRI->hasBasePointer(MF);
00068 }
00069 
00070 // needsFrameIndexResolution - Do we need to perform FI resolution for
00071 // this function. Normally, this is required only when the function
00072 // has any stack objects. However, FI resolution actually has another job,
00073 // not apparent from the title - it resolves callframesetup/destroy 
00074 // that were not simplified earlier.
00075 // So, this is required for x86 functions that have push sequences even
00076 // when there are no stack objects.
00077 bool
00078 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
00079   return MF.getFrameInfo()->hasStackObjects() ||
00080          MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
00081 }
00082 
00083 /// hasFP - Return true if the specified function should have a dedicated frame
00084 /// pointer register.  This is true if the function has variable sized allocas
00085 /// or if frame pointer elimination is disabled.
00086 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
00087   const MachineFrameInfo *MFI = MF.getFrameInfo();
00088   const MachineModuleInfo &MMI = MF.getMMI();
00089 
00090   return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
00091           TRI->needsStackRealignment(MF) ||
00092           MFI->hasVarSizedObjects() ||
00093           MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() ||
00094           MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
00095           MMI.callsUnwindInit() || MMI.callsEHReturn() ||
00096           MFI->hasStackMap() || MFI->hasPatchPoint());
00097 }
00098 
00099 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
00100   if (IsLP64) {
00101     if (isInt<8>(Imm))
00102       return X86::SUB64ri8;
00103     return X86::SUB64ri32;
00104   } else {
00105     if (isInt<8>(Imm))
00106       return X86::SUB32ri8;
00107     return X86::SUB32ri;
00108   }
00109 }
00110 
00111 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
00112   if (IsLP64) {
00113     if (isInt<8>(Imm))
00114       return X86::ADD64ri8;
00115     return X86::ADD64ri32;
00116   } else {
00117     if (isInt<8>(Imm))
00118       return X86::ADD32ri8;
00119     return X86::ADD32ri;
00120   }
00121 }
00122 
00123 static unsigned getSUBrrOpcode(unsigned isLP64) {
00124   return isLP64 ? X86::SUB64rr : X86::SUB32rr;
00125 }
00126 
00127 static unsigned getADDrrOpcode(unsigned isLP64) {
00128   return isLP64 ? X86::ADD64rr : X86::ADD32rr;
00129 }
00130 
00131 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
00132   if (IsLP64) {
00133     if (isInt<8>(Imm))
00134       return X86::AND64ri8;
00135     return X86::AND64ri32;
00136   }
00137   if (isInt<8>(Imm))
00138     return X86::AND32ri8;
00139   return X86::AND32ri;
00140 }
00141 
00142 static unsigned getLEArOpcode(unsigned IsLP64) {
00143   return IsLP64 ? X86::LEA64r : X86::LEA32r;
00144 }
00145 
00146 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
00147 /// when it reaches the "return" instruction. We can then pop a stack object
00148 /// to this register without worry about clobbering it.
00149 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
00150                                        MachineBasicBlock::iterator &MBBI,
00151                                        const TargetRegisterInfo *TRI,
00152                                        bool Is64Bit) {
00153   const MachineFunction *MF = MBB.getParent();
00154   const Function *F = MF->getFunction();
00155   if (!F || MF->getMMI().callsEHReturn())
00156     return 0;
00157 
00158   static const uint16_t CallerSavedRegs32Bit[] = {
00159     X86::EAX, X86::EDX, X86::ECX, 0
00160   };
00161 
00162   static const uint16_t CallerSavedRegs64Bit[] = {
00163     X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
00164     X86::R8,  X86::R9,  X86::R10, X86::R11, 0
00165   };
00166 
00167   unsigned Opc = MBBI->getOpcode();
00168   switch (Opc) {
00169   default: return 0;
00170   case X86::RETL:
00171   case X86::RETQ:
00172   case X86::RETIL:
00173   case X86::RETIQ:
00174   case X86::TCRETURNdi:
00175   case X86::TCRETURNri:
00176   case X86::TCRETURNmi:
00177   case X86::TCRETURNdi64:
00178   case X86::TCRETURNri64:
00179   case X86::TCRETURNmi64:
00180   case X86::EH_RETURN:
00181   case X86::EH_RETURN64: {
00182     SmallSet<uint16_t, 8> Uses;
00183     for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
00184       MachineOperand &MO = MBBI->getOperand(i);
00185       if (!MO.isReg() || MO.isDef())
00186         continue;
00187       unsigned Reg = MO.getReg();
00188       if (!Reg)
00189         continue;
00190       for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
00191         Uses.insert(*AI);
00192     }
00193 
00194     const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
00195     for (; *CS; ++CS)
00196       if (!Uses.count(*CS))
00197         return *CS;
00198   }
00199   }
00200 
00201   return 0;
00202 }
00203 
00204 static bool isEAXLiveIn(MachineFunction &MF) {
00205   for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
00206        EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
00207     unsigned Reg = II->first;
00208 
00209     if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
00210         Reg == X86::AH || Reg == X86::AL)
00211       return true;
00212   }
00213 
00214   return false;
00215 }
00216 
00217 /// Check whether or not the terminators of \p MBB needs to read EFLAGS.
00218 static bool terminatorsNeedFlagsAsInput(const MachineBasicBlock &MBB) {
00219   for (const MachineInstr &MI : MBB.terminators()) {
00220     bool BreakNext = false;
00221     for (const MachineOperand &MO : MI.operands()) {
00222       if (!MO.isReg())
00223         continue;
00224       unsigned Reg = MO.getReg();
00225       if (Reg != X86::EFLAGS)
00226         continue;
00227 
00228       // This terminator needs an eflag that is not defined
00229       // by a previous terminator.
00230       if (!MO.isDef())
00231         return true;
00232       BreakNext = true;
00233     }
00234     if (BreakNext)
00235       break;
00236   }
00237   return false;
00238 }
00239 
00240 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
00241 /// stack pointer by a constant value.
00242 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
00243                                     MachineBasicBlock::iterator &MBBI,
00244                                     int64_t NumBytes, bool InEpilogue) const {
00245   bool isSub = NumBytes < 0;
00246   uint64_t Offset = isSub ? -NumBytes : NumBytes;
00247 
00248   uint64_t Chunk = (1LL << 31) - 1;
00249   DebugLoc DL = MBB.findDebugLoc(MBBI);
00250 
00251   while (Offset) {
00252     if (Offset > Chunk) {
00253       // Rather than emit a long series of instructions for large offsets,
00254       // load the offset into a register and do one sub/add
00255       unsigned Reg = 0;
00256 
00257       if (isSub && !isEAXLiveIn(*MBB.getParent()))
00258         Reg = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
00259       else
00260         Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
00261 
00262       if (Reg) {
00263         unsigned Opc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
00264         BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)
00265           .addImm(Offset);
00266         Opc = isSub
00267           ? getSUBrrOpcode(Is64Bit)
00268           : getADDrrOpcode(Is64Bit);
00269         MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
00270           .addReg(StackPtr)
00271           .addReg(Reg);
00272         MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
00273         Offset = 0;
00274         continue;
00275       }
00276     }
00277 
00278     uint64_t ThisVal = std::min(Offset, Chunk);
00279     if (ThisVal == (Is64Bit ? 8 : 4)) {
00280       // Use push / pop instead.
00281       unsigned Reg = isSub
00282         ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
00283         : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
00284       if (Reg) {
00285         unsigned Opc = isSub
00286           ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
00287           : (Is64Bit ? X86::POP64r  : X86::POP32r);
00288         MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
00289           .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
00290         if (isSub)
00291           MI->setFlag(MachineInstr::FrameSetup);
00292         Offset -= ThisVal;
00293         continue;
00294       }
00295     }
00296 
00297     MachineInstrBuilder MI = BuildStackAdjustment(
00298         MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue);
00299     if (isSub)
00300       MI.setMIFlag(MachineInstr::FrameSetup);
00301 
00302     Offset -= ThisVal;
00303   }
00304 }
00305 
00306 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
00307     MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
00308     int64_t Offset, bool InEpilogue) const {
00309   assert(Offset != 0 && "zero offset stack adjustment requested");
00310 
00311   // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
00312   // is tricky.
00313   bool UseLEA;
00314   if (!InEpilogue) {
00315     UseLEA = STI.useLeaForSP();
00316   } else {
00317     // If we can use LEA for SP but we shouldn't, check that none
00318     // of the terminators uses the eflags. Otherwise we will insert
00319     // a ADD that will redefine the eflags and break the condition.
00320     // Alternatively, we could move the ADD, but this may not be possible
00321     // and is an optimization anyway.
00322     UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
00323     if (UseLEA && !STI.useLeaForSP())
00324       UseLEA = terminatorsNeedFlagsAsInput(MBB);
00325     // If that assert breaks, that means we do not do the right thing
00326     // in canUseAsEpilogue.
00327     assert((UseLEA || !terminatorsNeedFlagsAsInput(MBB)) &&
00328            "We shouldn't have allowed this insertion point");
00329   }
00330 
00331   MachineInstrBuilder MI;
00332   if (UseLEA) {
00333     MI = addRegOffset(BuildMI(MBB, MBBI, DL,
00334                               TII.get(getLEArOpcode(Uses64BitFramePtr)),
00335                               StackPtr),
00336                       StackPtr, false, Offset);
00337   } else {
00338     bool IsSub = Offset < 0;
00339     uint64_t AbsOffset = IsSub ? -Offset : Offset;
00340     unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
00341                          : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
00342     MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
00343              .addReg(StackPtr)
00344              .addImm(AbsOffset);
00345     MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
00346   }
00347   return MI;
00348 }
00349 
00350 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
00351 static
00352 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
00353                       unsigned StackPtr, uint64_t *NumBytes = nullptr) {
00354   if (MBBI == MBB.begin()) return;
00355 
00356   MachineBasicBlock::iterator PI = std::prev(MBBI);
00357   unsigned Opc = PI->getOpcode();
00358   if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
00359        Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
00360        Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
00361       PI->getOperand(0).getReg() == StackPtr) {
00362     if (NumBytes)
00363       *NumBytes += PI->getOperand(2).getImm();
00364     MBB.erase(PI);
00365   } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
00366               Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
00367              PI->getOperand(0).getReg() == StackPtr) {
00368     if (NumBytes)
00369       *NumBytes -= PI->getOperand(2).getImm();
00370     MBB.erase(PI);
00371   }
00372 }
00373 
00374 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
00375                                      MachineBasicBlock::iterator &MBBI,
00376                                      bool doMergeWithPrevious) const {
00377   if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
00378       (!doMergeWithPrevious && MBBI == MBB.end()))
00379     return 0;
00380 
00381   MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
00382   MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
00383                                                        : std::next(MBBI);
00384   unsigned Opc = PI->getOpcode();
00385   int Offset = 0;
00386 
00387   if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
00388        Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
00389        Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
00390       PI->getOperand(0).getReg() == StackPtr){
00391     Offset += PI->getOperand(2).getImm();
00392     MBB.erase(PI);
00393     if (!doMergeWithPrevious) MBBI = NI;
00394   } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
00395               Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
00396              PI->getOperand(0).getReg() == StackPtr) {
00397     Offset -= PI->getOperand(2).getImm();
00398     MBB.erase(PI);
00399     if (!doMergeWithPrevious) MBBI = NI;
00400   }
00401 
00402   return Offset;
00403 }
00404 
00405 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
00406                                 MachineBasicBlock::iterator MBBI, DebugLoc DL,
00407                                 MCCFIInstruction CFIInst) const {
00408   MachineFunction &MF = *MBB.getParent();
00409   unsigned CFIIndex = MF.getMMI().addFrameInst(CFIInst);
00410   BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
00411       .addCFIIndex(CFIIndex);
00412 }
00413 
00414 void
00415 X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
00416                                             MachineBasicBlock::iterator MBBI,
00417                                             DebugLoc DL) const {
00418   MachineFunction &MF = *MBB.getParent();
00419   MachineFrameInfo *MFI = MF.getFrameInfo();
00420   MachineModuleInfo &MMI = MF.getMMI();
00421   const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
00422 
00423   // Add callee saved registers to move list.
00424   const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
00425   if (CSI.empty()) return;
00426 
00427   // Calculate offsets.
00428   for (std::vector<CalleeSavedInfo>::const_iterator
00429          I = CSI.begin(), E = CSI.end(); I != E; ++I) {
00430     int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
00431     unsigned Reg = I->getReg();
00432 
00433     unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
00434     BuildCFI(MBB, MBBI, DL,
00435              MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
00436   }
00437 }
00438 
00439 /// usesTheStack - This function checks if any of the users of EFLAGS
00440 /// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
00441 /// to use the stack, and if we don't adjust the stack we clobber the first
00442 /// frame index.
00443 /// See X86InstrInfo::copyPhysReg.
00444 static bool usesTheStack(const MachineFunction &MF) {
00445   const MachineRegisterInfo &MRI = MF.getRegInfo();
00446 
00447   for (MachineRegisterInfo::reg_instr_iterator
00448        ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
00449        ri != re; ++ri)
00450     if (ri->isCopy())
00451       return true;
00452 
00453   return false;
00454 }
00455 
00456 void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
00457                                           MachineBasicBlock &MBB,
00458                                           MachineBasicBlock::iterator MBBI,
00459                                           DebugLoc DL) const {
00460   bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
00461 
00462   unsigned CallOp;
00463   if (Is64Bit)
00464     CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
00465   else
00466     CallOp = X86::CALLpcrel32;
00467 
00468   const char *Symbol;
00469   if (Is64Bit) {
00470     if (STI.isTargetCygMing()) {
00471       Symbol = "___chkstk_ms";
00472     } else {
00473       Symbol = "__chkstk";
00474     }
00475   } else if (STI.isTargetCygMing())
00476     Symbol = "_alloca";
00477   else
00478     Symbol = "_chkstk";
00479 
00480   MachineInstrBuilder CI;
00481 
00482   // All current stack probes take AX and SP as input, clobber flags, and
00483   // preserve all registers. x86_64 probes leave RSP unmodified.
00484   if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
00485     // For the large code model, we have to call through a register. Use R11,
00486     // as it is scratch in all supported calling conventions.
00487     BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
00488         .addExternalSymbol(Symbol);
00489     CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
00490   } else {
00491     CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);
00492   }
00493 
00494   unsigned AX = Is64Bit ? X86::RAX : X86::EAX;
00495   unsigned SP = Is64Bit ? X86::RSP : X86::ESP;
00496   CI.addReg(AX, RegState::Implicit)
00497       .addReg(SP, RegState::Implicit)
00498       .addReg(AX, RegState::Define | RegState::Implicit)
00499       .addReg(SP, RegState::Define | RegState::Implicit)
00500       .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
00501 
00502   if (Is64Bit) {
00503     // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
00504     // themselves. It also does not clobber %rax so we can reuse it when
00505     // adjusting %rsp.
00506     BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
00507         .addReg(X86::RSP)
00508         .addReg(X86::RAX);
00509   }
00510 }
00511 
00512 static unsigned calculateSetFPREG(uint64_t SPAdjust) {
00513   // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
00514   // and might require smaller successive adjustments.
00515   const uint64_t Win64MaxSEHOffset = 128;
00516   uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
00517   // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
00518   return SEHFrameOffset & -16;
00519 }
00520 
00521 // If we're forcing a stack realignment we can't rely on just the frame
00522 // info, we need to know the ABI stack alignment as well in case we
00523 // have a call out.  Otherwise just make sure we have some alignment - we'll
00524 // go with the minimum SlotSize.
00525 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
00526   const MachineFrameInfo *MFI = MF.getFrameInfo();
00527   uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
00528   unsigned StackAlign = getStackAlignment();
00529   if (ForceStackAlign) {
00530     if (MFI->hasCalls())
00531       MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
00532     else if (MaxAlign < SlotSize)
00533       MaxAlign = SlotSize;
00534   }
00535   return MaxAlign;
00536 }
00537 
00538 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
00539                                           MachineBasicBlock::iterator MBBI,
00540                                           DebugLoc DL,
00541                                           uint64_t MaxAlign) const {
00542   uint64_t Val = -MaxAlign;
00543   MachineInstr *MI =
00544       BuildMI(MBB, MBBI, DL, TII.get(getANDriOpcode(Uses64BitFramePtr, Val)),
00545               StackPtr)
00546           .addReg(StackPtr)
00547           .addImm(Val)
00548           .setMIFlag(MachineInstr::FrameSetup);
00549 
00550   // The EFLAGS implicit def is dead.
00551   MI->getOperand(3).setIsDead();
00552 }
00553 
00554 /// emitPrologue - Push callee-saved registers onto the stack, which
00555 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
00556 /// space for local variables. Also emit labels used by the exception handler to
00557 /// generate the exception handling frames.
00558 
00559 /*
00560   Here's a gist of what gets emitted:
00561 
00562   ; Establish frame pointer, if needed
00563   [if needs FP]
00564       push  %rbp
00565       .cfi_def_cfa_offset 16
00566       .cfi_offset %rbp, -16
00567       .seh_pushreg %rpb
00568       mov  %rsp, %rbp
00569       .cfi_def_cfa_register %rbp
00570 
00571   ; Spill general-purpose registers
00572   [for all callee-saved GPRs]
00573       pushq %<reg>
00574       [if not needs FP]
00575          .cfi_def_cfa_offset (offset from RETADDR)
00576       .seh_pushreg %<reg>
00577 
00578   ; If the required stack alignment > default stack alignment
00579   ; rsp needs to be re-aligned.  This creates a "re-alignment gap"
00580   ; of unknown size in the stack frame.
00581   [if stack needs re-alignment]
00582       and  $MASK, %rsp
00583 
00584   ; Allocate space for locals
00585   [if target is Windows and allocated space > 4096 bytes]
00586       ; Windows needs special care for allocations larger
00587       ; than one page.
00588       mov $NNN, %rax
00589       call ___chkstk_ms/___chkstk
00590       sub  %rax, %rsp
00591   [else]
00592       sub  $NNN, %rsp
00593 
00594   [if needs FP]
00595       .seh_stackalloc (size of XMM spill slots)
00596       .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
00597   [else]
00598       .seh_stackalloc NNN
00599 
00600   ; Spill XMMs
00601   ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
00602   ; they may get spilled on any platform, if the current function
00603   ; calls @llvm.eh.unwind.init
00604   [if needs FP]
00605       [for all callee-saved XMM registers]
00606           movaps  %<xmm reg>, -MMM(%rbp)
00607       [for all callee-saved XMM registers]
00608           .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
00609               ; i.e. the offset relative to (%rbp - SEHFrameOffset)
00610   [else]
00611       [for all callee-saved XMM registers]
00612           movaps  %<xmm reg>, KKK(%rsp)
00613       [for all callee-saved XMM registers]
00614           .seh_savexmm %<xmm reg>, KKK
00615 
00616   .seh_endprologue
00617 
00618   [if needs base pointer]
00619       mov  %rsp, %rbx
00620       [if needs to restore base pointer]
00621           mov %rsp, -MMM(%rbp)
00622 
00623   ; Emit CFI info
00624   [if needs FP]
00625       [for all callee-saved registers]
00626           .cfi_offset %<reg>, (offset from %rbp)
00627   [else]
00628        .cfi_def_cfa_offset (offset from RETADDR)
00629       [for all callee-saved registers]
00630           .cfi_offset %<reg>, (offset from %rsp)
00631 
00632   Notes:
00633   - .seh directives are emitted only for Windows 64 ABI
00634   - .cfi directives are emitted for all other ABIs
00635   - for 32-bit code, substitute %e?? registers for %r??
00636 */
00637 
00638 void X86FrameLowering::emitPrologue(MachineFunction &MF,
00639                                     MachineBasicBlock &MBB) const {
00640   assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
00641          "MF used frame lowering for wrong subtarget");
00642   MachineBasicBlock::iterator MBBI = MBB.begin();
00643   MachineFrameInfo *MFI = MF.getFrameInfo();
00644   const Function *Fn = MF.getFunction();
00645   MachineModuleInfo &MMI = MF.getMMI();
00646   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
00647   uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
00648   uint64_t StackSize = MFI->getStackSize();    // Number of bytes to allocate.
00649   bool HasFP = hasFP(MF);
00650   bool IsWin64CC = STI.isCallingConvWin64(Fn->getCallingConv());
00651   bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
00652   bool NeedsWinCFI = IsWin64Prologue && Fn->needsUnwindTableEntry();
00653   bool NeedsDwarfCFI =
00654       !IsWin64Prologue && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
00655   unsigned FramePtr = TRI->getFrameRegister(MF);
00656   const unsigned MachineFramePtr =
00657       STI.isTarget64BitILP32()
00658           ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
00659           : FramePtr;
00660   unsigned BasePtr = TRI->getBaseRegister();
00661   DebugLoc DL;
00662 
00663   // Add RETADDR move area to callee saved frame size.
00664   int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
00665   if (TailCallReturnAddrDelta && IsWin64Prologue)
00666     report_fatal_error("Can't handle guaranteed tail call under win64 yet");
00667 
00668   if (TailCallReturnAddrDelta < 0)
00669     X86FI->setCalleeSavedFrameSize(
00670       X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
00671 
00672   bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());
00673 
00674   // The default stack probe size is 4096 if the function has no stackprobesize
00675   // attribute.
00676   unsigned StackProbeSize = 4096;
00677   if (Fn->hasFnAttribute("stack-probe-size"))
00678     Fn->getFnAttribute("stack-probe-size")
00679         .getValueAsString()
00680         .getAsInteger(0, StackProbeSize);
00681 
00682   // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
00683   // function, and use up to 128 bytes of stack space, don't have a frame
00684   // pointer, calls, or dynamic alloca then we do not need to adjust the
00685   // stack pointer (we fit in the Red Zone). We also check that we don't
00686   // push and pop from the stack.
00687   if (Is64Bit && !Fn->hasFnAttribute(Attribute::NoRedZone) &&
00688       !TRI->needsStackRealignment(MF) &&
00689       !MFI->hasVarSizedObjects() && // No dynamic alloca.
00690       !MFI->adjustsStack() &&       // No calls.
00691       !IsWin64CC &&                 // Win64 has no Red Zone
00692       !usesTheStack(MF) &&          // Don't push and pop.
00693       !MF.shouldSplitStack()) {     // Regular stack
00694     uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
00695     if (HasFP) MinSize += SlotSize;
00696     StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
00697     MFI->setStackSize(StackSize);
00698   }
00699 
00700   // Insert stack pointer adjustment for later moving of return addr.  Only
00701   // applies to tail call optimized functions where the callee argument stack
00702   // size is bigger than the callers.
00703   if (TailCallReturnAddrDelta < 0) {
00704     BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
00705                          /*InEpilogue=*/false)
00706         .setMIFlag(MachineInstr::FrameSetup);
00707   }
00708 
00709   // Mapping for machine moves:
00710   //
00711   //   DST: VirtualFP AND
00712   //        SRC: VirtualFP              => DW_CFA_def_cfa_offset
00713   //        ELSE                        => DW_CFA_def_cfa
00714   //
00715   //   SRC: VirtualFP AND
00716   //        DST: Register               => DW_CFA_def_cfa_register
00717   //
00718   //   ELSE
00719   //        OFFSET < 0                  => DW_CFA_offset_extended_sf
00720   //        REG < 64                    => DW_CFA_offset + Reg
00721   //        ELSE                        => DW_CFA_offset_extended
00722 
00723   uint64_t NumBytes = 0;
00724   int stackGrowth = -SlotSize;
00725 
00726   if (HasFP) {
00727     // Calculate required stack adjustment.
00728     uint64_t FrameSize = StackSize - SlotSize;
00729     // If required, include space for extra hidden slot for stashing base pointer.
00730     if (X86FI->getRestoreBasePointer())
00731       FrameSize += SlotSize;
00732 
00733     NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
00734 
00735     // Callee-saved registers are pushed on stack before the stack is realigned.
00736     if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
00737       NumBytes = RoundUpToAlignment(NumBytes, MaxAlign);
00738 
00739     // Get the offset of the stack slot for the EBP register, which is
00740     // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
00741     // Update the frame offset adjustment.
00742     MFI->setOffsetAdjustment(-NumBytes);
00743 
00744     // Save EBP/RBP into the appropriate stack slot.
00745     BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
00746       .addReg(MachineFramePtr, RegState::Kill)
00747       .setMIFlag(MachineInstr::FrameSetup);
00748 
00749     if (NeedsDwarfCFI) {
00750       // Mark the place where EBP/RBP was saved.
00751       // Define the current CFA rule to use the provided offset.
00752       assert(StackSize);
00753       BuildCFI(MBB, MBBI, DL,
00754                MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
00755 
00756       // Change the rule for the FramePtr to be an "offset" rule.
00757       unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
00758       BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
00759                                   nullptr, DwarfFramePtr, 2 * stackGrowth));
00760     }
00761 
00762     if (NeedsWinCFI) {
00763       BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
00764           .addImm(FramePtr)
00765           .setMIFlag(MachineInstr::FrameSetup);
00766     }
00767 
00768     if (!IsWin64Prologue) {
00769       // Update EBP with the new base value.
00770       BuildMI(MBB, MBBI, DL,
00771               TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
00772               FramePtr)
00773           .addReg(StackPtr)
00774           .setMIFlag(MachineInstr::FrameSetup);
00775     }
00776 
00777     if (NeedsDwarfCFI) {
00778       // Mark effective beginning of when frame pointer becomes valid.
00779       // Define the current CFA to use the EBP/RBP register.
00780       unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
00781       BuildCFI(MBB, MBBI, DL,
00782                MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
00783     }
00784 
00785     // Mark the FramePtr as live-in in every block.
00786     for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
00787       I->addLiveIn(MachineFramePtr);
00788   } else {
00789     NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
00790   }
00791 
00792   // Skip the callee-saved push instructions.
00793   bool PushedRegs = false;
00794   int StackOffset = 2 * stackGrowth;
00795 
00796   while (MBBI != MBB.end() &&
00797          (MBBI->getOpcode() == X86::PUSH32r ||
00798           MBBI->getOpcode() == X86::PUSH64r)) {
00799     PushedRegs = true;
00800     unsigned Reg = MBBI->getOperand(0).getReg();
00801     ++MBBI;
00802 
00803     if (!HasFP && NeedsDwarfCFI) {
00804       // Mark callee-saved push instruction.
00805       // Define the current CFA rule to use the provided offset.
00806       assert(StackSize);
00807       BuildCFI(MBB, MBBI, DL,
00808                MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
00809       StackOffset += stackGrowth;
00810     }
00811 
00812     if (NeedsWinCFI) {
00813       BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
00814           MachineInstr::FrameSetup);
00815     }
00816   }
00817 
00818   // Realign stack after we pushed callee-saved registers (so that we'll be
00819   // able to calculate their offsets from the frame pointer).
00820   // Don't do this for Win64, it needs to realign the stack after the prologue.
00821   if (!IsWin64Prologue && TRI->needsStackRealignment(MF)) {
00822     assert(HasFP && "There should be a frame pointer if stack is realigned.");
00823     BuildStackAlignAND(MBB, MBBI, DL, MaxAlign);
00824   }
00825 
00826   // If there is an SUB32ri of ESP immediately before this instruction, merge
00827   // the two. This can be the case when tail call elimination is enabled and
00828   // the callee has more arguments then the caller.
00829   NumBytes -= mergeSPUpdates(MBB, MBBI, true);
00830 
00831   // Adjust stack pointer: ESP -= numbytes.
00832 
00833   // Windows and cygwin/mingw require a prologue helper routine when allocating
00834   // more than 4K bytes on the stack.  Windows uses __chkstk and cygwin/mingw
00835   // uses __alloca.  __alloca and the 32-bit version of __chkstk will probe the
00836   // stack and adjust the stack pointer in one go.  The 64-bit version of
00837   // __chkstk is only responsible for probing the stack.  The 64-bit prologue is
00838   // responsible for adjusting the stack pointer.  Touching the stack at 4K
00839   // increments is necessary to ensure that the guard pages used by the OS
00840   // virtual memory manager are allocated in correct sequence.
00841   uint64_t AlignedNumBytes = NumBytes;
00842   if (IsWin64Prologue && TRI->needsStackRealignment(MF))
00843     AlignedNumBytes = RoundUpToAlignment(AlignedNumBytes, MaxAlign);
00844   if (AlignedNumBytes >= StackProbeSize && UseStackProbe) {
00845     // Check whether EAX is livein for this function.
00846     bool isEAXAlive = isEAXLiveIn(MF);
00847 
00848     if (isEAXAlive) {
00849       // Sanity check that EAX is not livein for this function.
00850       // It should not be, so throw an assert.
00851       assert(!Is64Bit && "EAX is livein in x64 case!");
00852 
00853       // Save EAX
00854       BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
00855         .addReg(X86::EAX, RegState::Kill)
00856         .setMIFlag(MachineInstr::FrameSetup);
00857     }
00858 
00859     if (Is64Bit) {
00860       // Handle the 64-bit Windows ABI case where we need to call __chkstk.
00861       // Function prologue is responsible for adjusting the stack pointer.
00862       if (isUInt<32>(NumBytes)) {
00863         BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
00864             .addImm(NumBytes)
00865             .setMIFlag(MachineInstr::FrameSetup);
00866       } else if (isInt<32>(NumBytes)) {
00867         BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
00868             .addImm(NumBytes)
00869             .setMIFlag(MachineInstr::FrameSetup);
00870       } else {
00871         BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
00872             .addImm(NumBytes)
00873             .setMIFlag(MachineInstr::FrameSetup);
00874       }
00875     } else {
00876       // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
00877       // We'll also use 4 already allocated bytes for EAX.
00878       BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
00879         .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
00880         .setMIFlag(MachineInstr::FrameSetup);
00881     }
00882 
00883     // Save a pointer to the MI where we set AX.
00884     MachineBasicBlock::iterator SetRAX = MBBI;
00885     --SetRAX;
00886 
00887     // Call __chkstk, __chkstk_ms, or __alloca.
00888     emitStackProbeCall(MF, MBB, MBBI, DL);
00889 
00890     // Apply the frame setup flag to all inserted instrs.
00891     for (; SetRAX != MBBI; ++SetRAX)
00892       SetRAX->setFlag(MachineInstr::FrameSetup);
00893 
00894     if (isEAXAlive) {
00895       // Restore EAX
00896       MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
00897                                               X86::EAX),
00898                                       StackPtr, false, NumBytes - 4);
00899       MI->setFlag(MachineInstr::FrameSetup);
00900       MBB.insert(MBBI, MI);
00901     }
00902   } else if (NumBytes) {
00903     emitSPUpdate(MBB, MBBI, -(int64_t)NumBytes, /*InEpilogue=*/false);
00904   }
00905 
00906   if (NeedsWinCFI && NumBytes)
00907     BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
00908         .addImm(NumBytes)
00909         .setMIFlag(MachineInstr::FrameSetup);
00910 
00911   int SEHFrameOffset = 0;
00912   if (IsWin64Prologue && HasFP) {
00913     SEHFrameOffset = calculateSetFPREG(NumBytes);
00914     if (SEHFrameOffset)
00915       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
00916                    StackPtr, false, SEHFrameOffset);
00917     else
00918       BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr).addReg(StackPtr);
00919 
00920     if (NeedsWinCFI)
00921       BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
00922           .addImm(FramePtr)
00923           .addImm(SEHFrameOffset)
00924           .setMIFlag(MachineInstr::FrameSetup);
00925   }
00926 
00927   while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
00928     const MachineInstr *FrameInstr = &*MBBI;
00929     ++MBBI;
00930 
00931     if (NeedsWinCFI) {
00932       int FI;
00933       if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
00934         if (X86::FR64RegClass.contains(Reg)) {
00935           int Offset = getFrameIndexOffset(MF, FI);
00936           Offset += SEHFrameOffset;
00937 
00938           BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
00939               .addImm(Reg)
00940               .addImm(Offset)
00941               .setMIFlag(MachineInstr::FrameSetup);
00942         }
00943       }
00944     }
00945   }
00946 
00947   if (NeedsWinCFI)
00948     BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
00949         .setMIFlag(MachineInstr::FrameSetup);
00950 
00951   // Realign stack after we spilled callee-saved registers (so that we'll be
00952   // able to calculate their offsets from the frame pointer).
00953   // Win64 requires aligning the stack after the prologue.
00954   if (IsWin64Prologue && TRI->needsStackRealignment(MF)) {
00955     assert(HasFP && "There should be a frame pointer if stack is realigned.");
00956     BuildStackAlignAND(MBB, MBBI, DL, MaxAlign);
00957   }
00958 
00959   // If we need a base pointer, set it up here. It's whatever the value
00960   // of the stack pointer is at this point. Any variable size objects
00961   // will be allocated after this, so we can still use the base pointer
00962   // to reference locals.
00963   if (TRI->hasBasePointer(MF)) {
00964     // Update the base pointer with the current stack pointer.
00965     unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
00966     BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
00967       .addReg(StackPtr)
00968       .setMIFlag(MachineInstr::FrameSetup);
00969     if (X86FI->getRestoreBasePointer()) {
00970       // Stash value of base pointer.  Saving RSP instead of EBP shortens dependence chain.
00971       unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
00972       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
00973                    FramePtr, true, X86FI->getRestoreBasePointerOffset())
00974         .addReg(StackPtr)
00975         .setMIFlag(MachineInstr::FrameSetup);
00976     }
00977   }
00978 
00979   if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
00980     // Mark end of stack pointer adjustment.
00981     if (!HasFP && NumBytes) {
00982       // Define the current CFA rule to use the provided offset.
00983       assert(StackSize);
00984       BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaOffset(
00985                                   nullptr, -StackSize + stackGrowth));
00986     }
00987 
00988     // Emit DWARF info specifying the offsets of the callee-saved registers.
00989     if (PushedRegs)
00990       emitCalleeSavedFrameMoves(MBB, MBBI, DL);
00991   }
00992 }
00993 
00994 bool X86FrameLowering::canUseLEAForSPInEpilogue(
00995     const MachineFunction &MF) const {
00996   // We can't use LEA instructions for adjusting the stack pointer if this is a
00997   // leaf function in the Win64 ABI.  Only ADD instructions may be used to
00998   // deallocate the stack.
00999   // This means that we can use LEA for SP in two situations:
01000   // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
01001   // 2. We *have* a frame pointer which means we are permitted to use LEA.
01002   return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
01003 }
01004 
01005 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
01006                                     MachineBasicBlock &MBB) const {
01007   const MachineFrameInfo *MFI = MF.getFrameInfo();
01008   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
01009   MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
01010   DebugLoc DL;
01011   if (MBBI != MBB.end())
01012     DL = MBBI->getDebugLoc();
01013   // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
01014   const bool Is64BitILP32 = STI.isTarget64BitILP32();
01015   unsigned FramePtr = TRI->getFrameRegister(MF);
01016   unsigned MachineFramePtr =
01017       Is64BitILP32 ? getX86SubSuperRegister(FramePtr, MVT::i64, false)
01018                    : FramePtr;
01019 
01020   bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
01021   bool NeedsWinCFI =
01022       IsWin64Prologue && MF.getFunction()->needsUnwindTableEntry();
01023 
01024   // Get the number of bytes to allocate from the FrameInfo.
01025   uint64_t StackSize = MFI->getStackSize();
01026   uint64_t MaxAlign = calculateMaxStackAlign(MF);
01027   unsigned CSSize = X86FI->getCalleeSavedFrameSize();
01028   uint64_t NumBytes = 0;
01029 
01030   if (hasFP(MF)) {
01031     // Calculate required stack adjustment.
01032     uint64_t FrameSize = StackSize - SlotSize;
01033     NumBytes = FrameSize - CSSize;
01034 
01035     // Callee-saved registers were pushed on stack before the stack was
01036     // realigned.
01037     if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
01038       NumBytes = RoundUpToAlignment(FrameSize, MaxAlign);
01039 
01040     // Pop EBP.
01041     BuildMI(MBB, MBBI, DL,
01042             TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr);
01043   } else {
01044     NumBytes = StackSize - CSSize;
01045   }
01046   uint64_t SEHStackAllocAmt = NumBytes;
01047 
01048   // Skip the callee-saved pop instructions.
01049   while (MBBI != MBB.begin()) {
01050     MachineBasicBlock::iterator PI = std::prev(MBBI);
01051     unsigned Opc = PI->getOpcode();
01052 
01053     if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
01054         !PI->isTerminator())
01055       break;
01056 
01057     --MBBI;
01058   }
01059   MachineBasicBlock::iterator FirstCSPop = MBBI;
01060 
01061   if (MBBI != MBB.end())
01062     DL = MBBI->getDebugLoc();
01063 
01064   // If there is an ADD32ri or SUB32ri of ESP immediately before this
01065   // instruction, merge the two instructions.
01066   if (NumBytes || MFI->hasVarSizedObjects())
01067     mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
01068 
01069   // If dynamic alloca is used, then reset esp to point to the last callee-saved
01070   // slot before popping them off! Same applies for the case, when stack was
01071   // realigned.
01072   if (TRI->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
01073     if (TRI->needsStackRealignment(MF))
01074       MBBI = FirstCSPop;
01075     unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
01076     uint64_t LEAAmount =
01077         IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
01078 
01079     // There are only two legal forms of epilogue:
01080     // - add SEHAllocationSize, %rsp
01081     // - lea SEHAllocationSize(%FramePtr), %rsp
01082     //
01083     // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
01084     // However, we may use this sequence if we have a frame pointer because the
01085     // effects of the prologue can safely be undone.
01086     if (LEAAmount != 0) {
01087       unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
01088       addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
01089                    FramePtr, false, LEAAmount);
01090       --MBBI;
01091     } else {
01092       unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
01093       BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
01094         .addReg(FramePtr);
01095       --MBBI;
01096     }
01097   } else if (NumBytes) {
01098     // Adjust stack pointer back: ESP += numbytes.
01099     emitSPUpdate(MBB, MBBI, NumBytes, /*InEpilogue=*/true);
01100     --MBBI;
01101   }
01102 
01103   // Windows unwinder will not invoke function's exception handler if IP is
01104   // either in prologue or in epilogue.  This behavior causes a problem when a
01105   // call immediately precedes an epilogue, because the return address points
01106   // into the epilogue.  To cope with that, we insert an epilogue marker here,
01107   // then replace it with a 'nop' if it ends up immediately after a CALL in the
01108   // final emitted code.
01109   if (NeedsWinCFI)
01110     BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
01111 
01112   // Add the return addr area delta back since we are not tail calling.
01113   int Offset = -1 * X86FI->getTCReturnAddrDelta();
01114   assert(Offset >= 0 && "TCDelta should never be positive");
01115   if (Offset) {
01116     MBBI = MBB.getFirstTerminator();
01117 
01118     // Check for possible merge with preceding ADD instruction.
01119     Offset += mergeSPUpdates(MBB, MBBI, true);
01120     emitSPUpdate(MBB, MBBI, Offset, /*InEpilogue=*/true);
01121   }
01122 }
01123 
01124 int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
01125                                           int FI) const {
01126   const MachineFrameInfo *MFI = MF.getFrameInfo();
01127   // Offset will hold the offset from the stack pointer at function entry to the
01128   // object.
01129   // We need to factor in additional offsets applied during the prologue to the
01130   // frame, base, and stack pointer depending on which is used.
01131   int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
01132   const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
01133   unsigned CSSize = X86FI->getCalleeSavedFrameSize();
01134   uint64_t StackSize = MFI->getStackSize();
01135   bool HasFP = hasFP(MF);
01136   bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
01137   int64_t FPDelta = 0;
01138 
01139   if (IsWin64Prologue) {
01140     assert(!MFI->hasCalls() || (StackSize % 16) == 8);
01141 
01142     // Calculate required stack adjustment.
01143     uint64_t FrameSize = StackSize - SlotSize;
01144     // If required, include space for extra hidden slot for stashing base pointer.
01145     if (X86FI->getRestoreBasePointer())
01146       FrameSize += SlotSize;
01147     uint64_t NumBytes = FrameSize - CSSize;
01148 
01149     uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
01150     if (FI && FI == X86FI->getFAIndex())
01151       return -SEHFrameOffset;
01152 
01153     // FPDelta is the offset from the "traditional" FP location of the old base
01154     // pointer followed by return address and the location required by the
01155     // restricted Win64 prologue.
01156     // Add FPDelta to all offsets below that go through the frame pointer.
01157     FPDelta = FrameSize - SEHFrameOffset;
01158     assert((!MFI->hasCalls() || (FPDelta % 16) == 0) &&
01159            "FPDelta isn't aligned per the Win64 ABI!");
01160   }
01161 
01162 
01163   if (TRI->hasBasePointer(MF)) {
01164     assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
01165     if (FI < 0) {
01166       // Skip the saved EBP.
01167       return Offset + SlotSize + FPDelta;
01168     } else {
01169       assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
01170       return Offset + StackSize;
01171     }
01172   } else if (TRI->needsStackRealignment(MF)) {
01173     if (FI < 0) {
01174       // Skip the saved EBP.
01175       return Offset + SlotSize + FPDelta;
01176     } else {
01177       assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
01178       return Offset + StackSize;
01179     }
01180     // FIXME: Support tail calls
01181   } else {
01182     if (!HasFP)
01183       return Offset + StackSize;
01184 
01185     // Skip the saved EBP.
01186     Offset += SlotSize;
01187 
01188     // Skip the RETADDR move area
01189     int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
01190     if (TailCallReturnAddrDelta < 0)
01191       Offset -= TailCallReturnAddrDelta;
01192   }
01193 
01194   return Offset + FPDelta;
01195 }
01196 
01197 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
01198                                              unsigned &FrameReg) const {
01199   // We can't calculate offset from frame pointer if the stack is realigned,
01200   // so enforce usage of stack/base pointer.  The base pointer is used when we
01201   // have dynamic allocas in addition to dynamic realignment.
01202   if (TRI->hasBasePointer(MF))
01203     FrameReg = TRI->getBaseRegister();
01204   else if (TRI->needsStackRealignment(MF))
01205     FrameReg = TRI->getStackRegister();
01206   else
01207     FrameReg = TRI->getFrameRegister(MF);
01208   return getFrameIndexOffset(MF, FI);
01209 }
01210 
01211 // Simplified from getFrameIndexOffset keeping only StackPointer cases
01212 int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int FI) const {
01213   const MachineFrameInfo *MFI = MF.getFrameInfo();
01214   // Does not include any dynamic realign.
01215   const uint64_t StackSize = MFI->getStackSize();
01216   {
01217 #ifndef NDEBUG
01218     // Note: LLVM arranges the stack as:
01219     // Args > Saved RetPC (<--FP) > CSRs > dynamic alignment (<--BP)
01220     //      > "Stack Slots" (<--SP)
01221     // We can always address StackSlots from RSP.  We can usually (unless
01222     // needsStackRealignment) address CSRs from RSP, but sometimes need to
01223     // address them from RBP.  FixedObjects can be placed anywhere in the stack
01224     // frame depending on their specific requirements (i.e. we can actually
01225     // refer to arguments to the function which are stored in the *callers*
01226     // frame).  As a result, THE RESULT OF THIS CALL IS MEANINGLESS FOR CSRs
01227     // AND FixedObjects IFF needsStackRealignment or hasVarSizedObject.
01228 
01229     assert(!TRI->hasBasePointer(MF) && "we don't handle this case");
01230 
01231     // We don't handle tail calls, and shouldn't be seeing them
01232     // either.
01233     int TailCallReturnAddrDelta =
01234         MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
01235     assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
01236 #endif
01237   }
01238 
01239   // This is how the math works out:
01240   //
01241   //  %rsp grows (i.e. gets lower) left to right. Each box below is
01242   //  one word (eight bytes).  Obj0 is the stack slot we're trying to
01243   //  get to.
01244   //
01245   //    ----------------------------------
01246   //    | BP | Obj0 | Obj1 | ... | ObjN |
01247   //    ----------------------------------
01248   //    ^    ^      ^                   ^
01249   //    A    B      C                   E
01250   //
01251   // A is the incoming stack pointer.
01252   // (B - A) is the local area offset (-8 for x86-64) [1]
01253   // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]
01254   //
01255   // |(E - B)| is the StackSize (absolute value, positive).  For a
01256   // stack that grown down, this works out to be (B - E). [3]
01257   //
01258   // E is also the value of %rsp after stack has been set up, and we
01259   // want (C - E) -- the value we can add to %rsp to get to Obj0.  Now
01260   // (C - E) == (C - A) - (B - A) + (B - E)
01261   //            { Using [1], [2] and [3] above }
01262   //         == getObjectOffset - LocalAreaOffset + StackSize
01263   //
01264 
01265   // Get the Offset from the StackPointer
01266   int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
01267 
01268   return Offset + StackSize;
01269 }
01270 // Simplified from getFrameIndexReference keeping only StackPointer cases
01271 int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF,
01272                                                    int FI,
01273                                                    unsigned &FrameReg) const {
01274   assert(!TRI->hasBasePointer(MF) && "we don't handle this case");
01275 
01276   FrameReg = TRI->getStackRegister();
01277   return getFrameIndexOffsetFromSP(MF, FI);
01278 }
01279 
01280 bool X86FrameLowering::assignCalleeSavedSpillSlots(
01281     MachineFunction &MF, const TargetRegisterInfo *TRI,
01282     std::vector<CalleeSavedInfo> &CSI) const {
01283   MachineFrameInfo *MFI = MF.getFrameInfo();
01284   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
01285 
01286   unsigned CalleeSavedFrameSize = 0;
01287   int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
01288 
01289   if (hasFP(MF)) {
01290     // emitPrologue always spills frame register the first thing.
01291     SpillSlotOffset -= SlotSize;
01292     MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
01293 
01294     // Since emitPrologue and emitEpilogue will handle spilling and restoring of
01295     // the frame register, we can delete it from CSI list and not have to worry
01296     // about avoiding it later.
01297     unsigned FPReg = TRI->getFrameRegister(MF);
01298     for (unsigned i = 0; i < CSI.size(); ++i) {
01299       if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
01300         CSI.erase(CSI.begin() + i);
01301         break;
01302       }
01303     }
01304   }
01305 
01306   // Assign slots for GPRs. It increases frame size.
01307   for (unsigned i = CSI.size(); i != 0; --i) {
01308     unsigned Reg = CSI[i - 1].getReg();
01309 
01310     if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
01311       continue;
01312 
01313     SpillSlotOffset -= SlotSize;
01314     CalleeSavedFrameSize += SlotSize;
01315 
01316     int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
01317     CSI[i - 1].setFrameIdx(SlotIndex);
01318   }
01319 
01320   X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
01321 
01322   // Assign slots for XMMs.
01323   for (unsigned i = CSI.size(); i != 0; --i) {
01324     unsigned Reg = CSI[i - 1].getReg();
01325     if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
01326       continue;
01327 
01328     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
01329     // ensure alignment
01330     SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
01331     // spill into slot
01332     SpillSlotOffset -= RC->getSize();
01333     int SlotIndex =
01334         MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
01335     CSI[i - 1].setFrameIdx(SlotIndex);
01336     MFI->ensureMaxAlignment(RC->getAlignment());
01337   }
01338 
01339   return true;
01340 }
01341 
01342 bool X86FrameLowering::spillCalleeSavedRegisters(
01343     MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
01344     const std::vector<CalleeSavedInfo> &CSI,
01345     const TargetRegisterInfo *TRI) const {
01346   DebugLoc DL = MBB.findDebugLoc(MI);
01347 
01348   // Push GPRs. It increases frame size.
01349   unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
01350   for (unsigned i = CSI.size(); i != 0; --i) {
01351     unsigned Reg = CSI[i - 1].getReg();
01352 
01353     if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
01354       continue;
01355     // Add the callee-saved register as live-in. It's killed at the spill.
01356     MBB.addLiveIn(Reg);
01357 
01358     BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
01359       .setMIFlag(MachineInstr::FrameSetup);
01360   }
01361 
01362   // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
01363   // It can be done by spilling XMMs to stack frame.
01364   for (unsigned i = CSI.size(); i != 0; --i) {
01365     unsigned Reg = CSI[i-1].getReg();
01366     if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
01367       continue;
01368     // Add the callee-saved register as live-in. It's killed at the spill.
01369     MBB.addLiveIn(Reg);
01370     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
01371 
01372     TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
01373                             TRI);
01374     --MI;
01375     MI->setFlag(MachineInstr::FrameSetup);
01376     ++MI;
01377   }
01378 
01379   return true;
01380 }
01381 
01382 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
01383                                                MachineBasicBlock::iterator MI,
01384                                         const std::vector<CalleeSavedInfo> &CSI,
01385                                           const TargetRegisterInfo *TRI) const {
01386   if (CSI.empty())
01387     return false;
01388 
01389   DebugLoc DL = MBB.findDebugLoc(MI);
01390 
01391   // Reload XMMs from stack frame.
01392   for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
01393     unsigned Reg = CSI[i].getReg();
01394     if (X86::GR64RegClass.contains(Reg) ||
01395         X86::GR32RegClass.contains(Reg))
01396       continue;
01397 
01398     const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
01399     TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
01400   }
01401 
01402   // POP GPRs.
01403   unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
01404   for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
01405     unsigned Reg = CSI[i].getReg();
01406     if (!X86::GR64RegClass.contains(Reg) &&
01407         !X86::GR32RegClass.contains(Reg))
01408       continue;
01409 
01410     BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
01411   }
01412   return true;
01413 }
01414 
01415 void
01416 X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
01417                                                        RegScavenger *RS) const {
01418   MachineFrameInfo *MFI = MF.getFrameInfo();
01419 
01420   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
01421   int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
01422 
01423   if (TailCallReturnAddrDelta < 0) {
01424     // create RETURNADDR area
01425     //   arg
01426     //   arg
01427     //   RETADDR
01428     //   { ...
01429     //     RETADDR area
01430     //     ...
01431     //   }
01432     //   [EBP]
01433     MFI->CreateFixedObject(-TailCallReturnAddrDelta,
01434                            TailCallReturnAddrDelta - SlotSize, true);
01435   }
01436 
01437   // Spill the BasePtr if it's used.
01438   if (TRI->hasBasePointer(MF))
01439     MF.getRegInfo().setPhysRegUsed(TRI->getBaseRegister());
01440 }
01441 
01442 static bool
01443 HasNestArgument(const MachineFunction *MF) {
01444   const Function *F = MF->getFunction();
01445   for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
01446        I != E; I++) {
01447     if (I->hasNestAttr())
01448       return true;
01449   }
01450   return false;
01451 }
01452 
01453 /// GetScratchRegister - Get a temp register for performing work in the
01454 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
01455 /// and the properties of the function either one or two registers will be
01456 /// needed. Set primary to true for the first register, false for the second.
01457 static unsigned
01458 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
01459   CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
01460 
01461   // Erlang stuff.
01462   if (CallingConvention == CallingConv::HiPE) {
01463     if (Is64Bit)
01464       return Primary ? X86::R14 : X86::R13;
01465     else
01466       return Primary ? X86::EBX : X86::EDI;
01467   }
01468 
01469   if (Is64Bit) {
01470     if (IsLP64)
01471       return Primary ? X86::R11 : X86::R12;
01472     else
01473       return Primary ? X86::R11D : X86::R12D;
01474   }
01475 
01476   bool IsNested = HasNestArgument(&MF);
01477 
01478   if (CallingConvention == CallingConv::X86_FastCall ||
01479       CallingConvention == CallingConv::Fast) {
01480     if (IsNested)
01481       report_fatal_error("Segmented stacks does not support fastcall with "
01482                          "nested function.");
01483     return Primary ? X86::EAX : X86::ECX;
01484   }
01485   if (IsNested)
01486     return Primary ? X86::EDX : X86::EAX;
01487   return Primary ? X86::ECX : X86::EAX;
01488 }
01489 
01490 // The stack limit in the TCB is set to this many bytes above the actual stack
01491 // limit.
01492 static const uint64_t kSplitStackAvailable = 256;
01493 
01494 void X86FrameLowering::adjustForSegmentedStacks(
01495     MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
01496   MachineFrameInfo *MFI = MF.getFrameInfo();
01497   uint64_t StackSize;
01498   unsigned TlsReg, TlsOffset;
01499   DebugLoc DL;
01500 
01501   unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
01502   assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
01503          "Scratch register is live-in");
01504 
01505   if (MF.getFunction()->isVarArg())
01506     report_fatal_error("Segmented stacks do not support vararg functions.");
01507   if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
01508       !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
01509       !STI.isTargetDragonFly())
01510     report_fatal_error("Segmented stacks not supported on this platform.");
01511 
01512   // Eventually StackSize will be calculated by a link-time pass; which will
01513   // also decide whether checking code needs to be injected into this particular
01514   // prologue.
01515   StackSize = MFI->getStackSize();
01516 
01517   // Do not generate a prologue for functions with a stack of size zero
01518   if (StackSize == 0)
01519     return;
01520 
01521   MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
01522   MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
01523   X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
01524   bool IsNested = false;
01525 
01526   // We need to know if the function has a nest argument only in 64 bit mode.
01527   if (Is64Bit)
01528     IsNested = HasNestArgument(&MF);
01529 
01530   // The MOV R10, RAX needs to be in a different block, since the RET we emit in
01531   // allocMBB needs to be last (terminating) instruction.
01532 
01533   for (MachineBasicBlock::livein_iterator i = PrologueMBB.livein_begin(),
01534                                           e = PrologueMBB.livein_end();
01535        i != e; i++) {
01536     allocMBB->addLiveIn(*i);
01537     checkMBB->addLiveIn(*i);
01538   }
01539 
01540   if (IsNested)
01541     allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
01542 
01543   MF.push_front(allocMBB);
01544   MF.push_front(checkMBB);
01545 
01546   // When the frame size is less than 256 we just compare the stack
01547   // boundary directly to the value of the stack pointer, per gcc.
01548   bool CompareStackPointer = StackSize < kSplitStackAvailable;
01549 
01550   // Read the limit off the current stacklet off the stack_guard location.
01551   if (Is64Bit) {
01552     if (STI.isTargetLinux()) {
01553       TlsReg = X86::FS;
01554       TlsOffset = IsLP64 ? 0x70 : 0x40;
01555     } else if (STI.isTargetDarwin()) {
01556       TlsReg = X86::GS;
01557       TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
01558     } else if (STI.isTargetWin64()) {
01559       TlsReg = X86::GS;
01560       TlsOffset = 0x28; // pvArbitrary, reserved for application use
01561     } else if (STI.isTargetFreeBSD()) {
01562       TlsReg = X86::FS;
01563       TlsOffset = 0x18;
01564     } else if (STI.isTargetDragonFly()) {
01565       TlsReg = X86::FS;
01566       TlsOffset = 0x20; // use tls_tcb.tcb_segstack
01567     } else {
01568       report_fatal_error("Segmented stacks not supported on this platform.");
01569     }
01570 
01571     if (CompareStackPointer)
01572       ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
01573     else
01574       BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
01575         .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
01576 
01577     BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
01578       .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
01579   } else {
01580     if (STI.isTargetLinux()) {
01581       TlsReg = X86::GS;
01582       TlsOffset = 0x30;
01583     } else if (STI.isTargetDarwin()) {
01584       TlsReg = X86::GS;
01585       TlsOffset = 0x48 + 90*4;
01586     } else if (STI.isTargetWin32()) {
01587       TlsReg = X86::FS;
01588       TlsOffset = 0x14; // pvArbitrary, reserved for application use
01589     } else if (STI.isTargetDragonFly()) {
01590       TlsReg = X86::FS;
01591       TlsOffset = 0x10; // use tls_tcb.tcb_segstack
01592     } else if (STI.isTargetFreeBSD()) {
01593       report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
01594     } else {
01595       report_fatal_error("Segmented stacks not supported on this platform.");
01596     }
01597 
01598     if (CompareStackPointer)
01599       ScratchReg = X86::ESP;
01600     else
01601       BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
01602         .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
01603 
01604     if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
01605         STI.isTargetDragonFly()) {
01606       BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
01607         .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
01608     } else if (STI.isTargetDarwin()) {
01609 
01610       // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
01611       unsigned ScratchReg2;
01612       bool SaveScratch2;
01613       if (CompareStackPointer) {
01614         // The primary scratch register is available for holding the TLS offset.
01615         ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
01616         SaveScratch2 = false;
01617       } else {
01618         // Need to use a second register to hold the TLS offset
01619         ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
01620 
01621         // Unfortunately, with fastcc the second scratch register may hold an
01622         // argument.
01623         SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
01624       }
01625 
01626       // If Scratch2 is live-in then it needs to be saved.
01627       assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
01628              "Scratch register is live-in and not saved");
01629 
01630       if (SaveScratch2)
01631         BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
01632           .addReg(ScratchReg2, RegState::Kill);
01633 
01634       BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
01635         .addImm(TlsOffset);
01636       BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
01637         .addReg(ScratchReg)
01638         .addReg(ScratchReg2).addImm(1).addReg(0)
01639         .addImm(0)
01640         .addReg(TlsReg);
01641 
01642       if (SaveScratch2)
01643         BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
01644     }
01645   }
01646 
01647   // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
01648   // It jumps to normal execution of the function body.
01649   BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&PrologueMBB);
01650 
01651   // On 32 bit we first push the arguments size and then the frame size. On 64
01652   // bit, we pass the stack frame size in r10 and the argument size in r11.
01653   if (Is64Bit) {
01654     // Functions with nested arguments use R10, so it needs to be saved across
01655     // the call to _morestack
01656 
01657     const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
01658     const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
01659     const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
01660     const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
01661     const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
01662 
01663     if (IsNested)
01664       BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
01665 
01666     BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
01667       .addImm(StackSize);
01668     BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
01669       .addImm(X86FI->getArgumentStackSize());
01670     MF.getRegInfo().setPhysRegUsed(Reg10);
01671     MF.getRegInfo().setPhysRegUsed(Reg11);
01672   } else {
01673     BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
01674       .addImm(X86FI->getArgumentStackSize());
01675     BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
01676       .addImm(StackSize);
01677   }
01678 
01679   // __morestack is in libgcc
01680   if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
01681     // Under the large code model, we cannot assume that __morestack lives
01682     // within 2^31 bytes of the call site, so we cannot use pc-relative
01683     // addressing. We cannot perform the call via a temporary register,
01684     // as the rax register may be used to store the static chain, and all
01685     // other suitable registers may be either callee-save or used for
01686     // parameter passing. We cannot use the stack at this point either
01687     // because __morestack manipulates the stack directly.
01688     //
01689     // To avoid these issues, perform an indirect call via a read-only memory
01690     // location containing the address.
01691     //
01692     // This solution is not perfect, as it assumes that the .rodata section
01693     // is laid out within 2^31 bytes of each function body, but this seems
01694     // to be sufficient for JIT.
01695     BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
01696         .addReg(X86::RIP)
01697         .addImm(0)
01698         .addReg(0)
01699         .addExternalSymbol("__morestack_addr")
01700         .addReg(0);
01701     MF.getMMI().setUsesMorestackAddr(true);
01702   } else {
01703     if (Is64Bit)
01704       BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
01705         .addExternalSymbol("__morestack");
01706     else
01707       BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
01708         .addExternalSymbol("__morestack");
01709   }
01710 
01711   if (IsNested)
01712     BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
01713   else
01714     BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
01715 
01716   allocMBB->addSuccessor(&PrologueMBB);
01717 
01718   checkMBB->addSuccessor(allocMBB);
01719   checkMBB->addSuccessor(&PrologueMBB);
01720 
01721 #ifdef XDEBUG
01722   MF.verify();
01723 #endif
01724 }
01725 
01726 /// Erlang programs may need a special prologue to handle the stack size they
01727 /// might need at runtime. That is because Erlang/OTP does not implement a C
01728 /// stack but uses a custom implementation of hybrid stack/heap architecture.
01729 /// (for more information see Eric Stenman's Ph.D. thesis:
01730 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
01731 ///
01732 /// CheckStack:
01733 ///       temp0 = sp - MaxStack
01734 ///       if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
01735 /// OldStart:
01736 ///       ...
01737 /// IncStack:
01738 ///       call inc_stack   # doubles the stack space
01739 ///       temp0 = sp - MaxStack
01740 ///       if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
01741 void X86FrameLowering::adjustForHiPEPrologue(
01742     MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
01743   MachineFrameInfo *MFI = MF.getFrameInfo();
01744   DebugLoc DL;
01745   // HiPE-specific values
01746   const unsigned HipeLeafWords = 24;
01747   const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
01748   const unsigned Guaranteed = HipeLeafWords * SlotSize;
01749   unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
01750                             MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
01751   unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
01752 
01753   assert(STI.isTargetLinux() &&
01754          "HiPE prologue is only supported on Linux operating systems.");
01755 
01756   // Compute the largest caller's frame that is needed to fit the callees'
01757   // frames. This 'MaxStack' is computed from:
01758   //
01759   // a) the fixed frame size, which is the space needed for all spilled temps,
01760   // b) outgoing on-stack parameter areas, and
01761   // c) the minimum stack space this function needs to make available for the
01762   //    functions it calls (a tunable ABI property).
01763   if (MFI->hasCalls()) {
01764     unsigned MoreStackForCalls = 0;
01765 
01766     for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
01767          MBBI != MBBE; ++MBBI)
01768       for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
01769            MI != ME; ++MI) {
01770         if (!MI->isCall())
01771           continue;
01772 
01773         // Get callee operand.
01774         const MachineOperand &MO = MI->getOperand(0);
01775 
01776         // Only take account of global function calls (no closures etc.).
01777         if (!MO.isGlobal())
01778           continue;
01779 
01780         const Function *F = dyn_cast<Function>(MO.getGlobal());
01781         if (!F)
01782           continue;
01783 
01784         // Do not update 'MaxStack' for primitive and built-in functions
01785         // (encoded with names either starting with "erlang."/"bif_" or not
01786         // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
01787         // "_", such as the BIF "suspend_0") as they are executed on another
01788         // stack.
01789         if (F->getName().find("erlang.") != StringRef::npos ||
01790             F->getName().find("bif_") != StringRef::npos ||
01791             F->getName().find_first_of("._") == StringRef::npos)
01792           continue;
01793 
01794         unsigned CalleeStkArity =
01795           F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
01796         if (HipeLeafWords - 1 > CalleeStkArity)
01797           MoreStackForCalls = std::max(MoreStackForCalls,
01798                                (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
01799       }
01800     MaxStack += MoreStackForCalls;
01801   }
01802 
01803   // If the stack frame needed is larger than the guaranteed then runtime checks
01804   // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
01805   if (MaxStack > Guaranteed) {
01806     MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
01807     MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
01808 
01809     for (MachineBasicBlock::livein_iterator I = PrologueMBB.livein_begin(),
01810                                             E = PrologueMBB.livein_end();
01811          I != E; I++) {
01812       stackCheckMBB->addLiveIn(*I);
01813       incStackMBB->addLiveIn(*I);
01814     }
01815 
01816     MF.push_front(incStackMBB);
01817     MF.push_front(stackCheckMBB);
01818 
01819     unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
01820     unsigned LEAop, CMPop, CALLop;
01821     if (Is64Bit) {
01822       SPReg = X86::RSP;
01823       PReg  = X86::RBP;
01824       LEAop = X86::LEA64r;
01825       CMPop = X86::CMP64rm;
01826       CALLop = X86::CALL64pcrel32;
01827       SPLimitOffset = 0x90;
01828     } else {
01829       SPReg = X86::ESP;
01830       PReg  = X86::EBP;
01831       LEAop = X86::LEA32r;
01832       CMPop = X86::CMP32rm;
01833       CALLop = X86::CALLpcrel32;
01834       SPLimitOffset = 0x4c;
01835     }
01836 
01837     ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
01838     assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
01839            "HiPE prologue scratch register is live-in");
01840 
01841     // Create new MBB for StackCheck:
01842     addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
01843                  SPReg, false, -MaxStack);
01844     // SPLimitOffset is in a fixed heap location (pointed by BP).
01845     addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
01846                  .addReg(ScratchReg), PReg, false, SPLimitOffset);
01847     BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&PrologueMBB);
01848 
01849     // Create new MBB for IncStack:
01850     BuildMI(incStackMBB, DL, TII.get(CALLop)).
01851       addExternalSymbol("inc_stack_0");
01852     addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
01853                  SPReg, false, -MaxStack);
01854     addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
01855                  .addReg(ScratchReg), PReg, false, SPLimitOffset);
01856     BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);
01857 
01858     stackCheckMBB->addSuccessor(&PrologueMBB, 99);
01859     stackCheckMBB->addSuccessor(incStackMBB, 1);
01860     incStackMBB->addSuccessor(&PrologueMBB, 99);
01861     incStackMBB->addSuccessor(incStackMBB, 1);
01862   }
01863 #ifdef XDEBUG
01864   MF.verify();
01865 #endif
01866 }
01867 
01868 void X86FrameLowering::
01869 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
01870                               MachineBasicBlock::iterator I) const {
01871   bool reserveCallFrame = hasReservedCallFrame(MF);
01872   unsigned Opcode = I->getOpcode();
01873   bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
01874   DebugLoc DL = I->getDebugLoc();
01875   uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
01876   uint64_t InternalAmt = (isDestroy || Amount) ? I->getOperand(1).getImm() : 0;
01877   I = MBB.erase(I);
01878 
01879   if (!reserveCallFrame) {
01880     // If the stack pointer can be changed after prologue, turn the
01881     // adjcallstackup instruction into a 'sub ESP, <amt>' and the
01882     // adjcallstackdown instruction into 'add ESP, <amt>'
01883     if (Amount == 0)
01884       return;
01885 
01886     // We need to keep the stack aligned properly.  To do this, we round the
01887     // amount of space needed for the outgoing arguments up to the next
01888     // alignment boundary.
01889     unsigned StackAlign = getStackAlignment();
01890     Amount = RoundUpToAlignment(Amount, StackAlign);
01891 
01892     // Factor out the amount that gets handled inside the sequence
01893     // (Pushes of argument for frame setup, callee pops for frame destroy)
01894     Amount -= InternalAmt;
01895 
01896     if (Amount) {
01897       // Add Amount to SP to destroy a frame, and subtract to setup.
01898       int Offset = isDestroy ? Amount : -Amount;
01899       BuildStackAdjustment(MBB, I, DL, Offset, /*InEpilogue=*/false);
01900     }
01901     return;
01902   }
01903 
01904   if (isDestroy && InternalAmt) {
01905     // If we are performing frame pointer elimination and if the callee pops
01906     // something off the stack pointer, add it back.  We do this until we have
01907     // more advanced stack pointer tracking ability.
01908     // We are not tracking the stack pointer adjustment by the callee, so make
01909     // sure we restore the stack pointer immediately after the call, there may
01910     // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
01911     MachineBasicBlock::iterator B = MBB.begin();
01912     while (I != B && !std::prev(I)->isCall())
01913       --I;
01914     BuildStackAdjustment(MBB, I, DL, -InternalAmt, /*InEpilogue=*/false);
01915   }
01916 }
01917 
01918 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
01919   assert(MBB.getParent() && "Block is not attached to a function!");
01920 
01921   if (canUseLEAForSPInEpilogue(*MBB.getParent()))
01922     return true;
01923 
01924   // If we cannot use LEA to adjust SP, we may need to use ADD, which
01925   // clobbers the EFLAGS. Check that none of the terminators reads the
01926   // EFLAGS, and if one uses it, conservatively assume this is not
01927   // safe to insert the epilogue here.
01928   return !terminatorsNeedFlagsAsInput(MBB);
01929 }