LLVM  mainline
FunctionLoweringInfo.cpp
Go to the documentation of this file.
00001 //===-- FunctionLoweringInfo.cpp ------------------------------------------===//
00002 //
00003 //                     The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This implements routines for translating functions from LLVM IR into
00011 // Machine IR.
00012 //
00013 //===----------------------------------------------------------------------===//
00014 
00015 #include "llvm/CodeGen/FunctionLoweringInfo.h"
00016 #include "llvm/ADT/PostOrderIterator.h"
00017 #include "llvm/CodeGen/Analysis.h"
00018 #include "llvm/CodeGen/MachineFrameInfo.h"
00019 #include "llvm/CodeGen/MachineFunction.h"
00020 #include "llvm/CodeGen/MachineInstrBuilder.h"
00021 #include "llvm/CodeGen/MachineModuleInfo.h"
00022 #include "llvm/CodeGen/MachineRegisterInfo.h"
00023 #include "llvm/CodeGen/WinEHFuncInfo.h"
00024 #include "llvm/IR/DataLayout.h"
00025 #include "llvm/IR/DebugInfo.h"
00026 #include "llvm/IR/DerivedTypes.h"
00027 #include "llvm/IR/Function.h"
00028 #include "llvm/IR/Instructions.h"
00029 #include "llvm/IR/IntrinsicInst.h"
00030 #include "llvm/IR/LLVMContext.h"
00031 #include "llvm/IR/Module.h"
00032 #include "llvm/Support/Debug.h"
00033 #include "llvm/Support/ErrorHandling.h"
00034 #include "llvm/Support/MathExtras.h"
00035 #include "llvm/Support/raw_ostream.h"
00036 #include "llvm/Target/TargetFrameLowering.h"
00037 #include "llvm/Target/TargetInstrInfo.h"
00038 #include "llvm/Target/TargetLowering.h"
00039 #include "llvm/Target/TargetOptions.h"
00040 #include "llvm/Target/TargetRegisterInfo.h"
00041 #include "llvm/Target/TargetSubtargetInfo.h"
00042 #include <algorithm>
00043 using namespace llvm;
00044 
00045 #define DEBUG_TYPE "function-lowering-info"
00046 
00047 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
00048 /// PHI nodes or outside of the basic block that defines it, or used by a
00049 /// switch or atomic instruction, which may expand to multiple basic blocks.
00050 static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
00051   if (I->use_empty()) return false;
00052   if (isa<PHINode>(I)) return true;
00053   const BasicBlock *BB = I->getParent();
00054   for (const User *U : I->users())
00055     if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
00056       return true;
00057 
00058   return false;
00059 }
00060 
00061 static ISD::NodeType getPreferredExtendForValue(const Value *V) {
00062   // For the users of the source value being used for compare instruction, if
00063   // the number of signed predicate is greater than unsigned predicate, we
00064   // prefer to use SIGN_EXTEND.
00065   //
00066   // With this optimization, we would be able to reduce some redundant sign or
00067   // zero extension instruction, and eventually more machine CSE opportunities
00068   // can be exposed.
00069   ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
00070   unsigned NumOfSigned = 0, NumOfUnsigned = 0;
00071   for (const User *U : V->users()) {
00072     if (const auto *CI = dyn_cast<CmpInst>(U)) {
00073       NumOfSigned += CI->isSigned();
00074       NumOfUnsigned += CI->isUnsigned();
00075     }
00076   }
00077   if (NumOfSigned > NumOfUnsigned)
00078     ExtendKind = ISD::SIGN_EXTEND;
00079 
00080   return ExtendKind;
00081 }
00082 
00083 void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
00084                                SelectionDAG *DAG) {
00085   Fn = &fn;
00086   MF = &mf;
00087   TLI = MF->getSubtarget().getTargetLowering();
00088   RegInfo = &MF->getRegInfo();
00089   MachineModuleInfo &MMI = MF->getMMI();
00090   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
00091 
00092   // Check whether the function can return without sret-demotion.
00093   SmallVector<ISD::OutputArg, 4> Outs;
00094   GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
00095                 mf.getDataLayout());
00096   CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF,
00097                                        Fn->isVarArg(), Outs, Fn->getContext());
00098 
00099   // Initialize the mapping of values to registers.  This is only set up for
00100   // instruction values that are used outside of the block that defines
00101   // them.
00102   Function::const_iterator BB = Fn->begin(), EB = Fn->end();
00103   for (; BB != EB; ++BB)
00104     for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
00105          I != E; ++I) {
00106       if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
00107         Type *Ty = AI->getAllocatedType();
00108         unsigned Align =
00109           std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty),
00110                    AI->getAlignment());
00111         unsigned StackAlign = TFI->getStackAlignment();
00112 
00113         // Static allocas can be folded into the initial stack frame
00114         // adjustment. For targets that don't realign the stack, don't
00115         // do this if there is an extra alignment requirement.
00116         if (AI->isStaticAlloca() && 
00117             (TFI->isStackRealignable() || (Align <= StackAlign))) {
00118           const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
00119           uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty);
00120 
00121           TySize *= CUI->getZExtValue();   // Get total allocated size.
00122           if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
00123 
00124           StaticAllocaMap[AI] =
00125             MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI);
00126         } else {
00127           // FIXME: Overaligned static allocas should be grouped into
00128           // a single dynamic allocation instead of using a separate
00129           // stack allocation for each one.
00130           if (Align <= StackAlign)
00131             Align = 0;
00132           // Inform the Frame Information that we have variable-sized objects.
00133           MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1, AI);
00134         }
00135       }
00136 
00137       // Look for inline asm that clobbers the SP register.
00138       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
00139         ImmutableCallSite CS(&*I);
00140         if (isa<InlineAsm>(CS.getCalledValue())) {
00141           unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
00142           const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
00143           std::vector<TargetLowering::AsmOperandInfo> Ops =
00144               TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI, CS);
00145           for (size_t I = 0, E = Ops.size(); I != E; ++I) {
00146             TargetLowering::AsmOperandInfo &Op = Ops[I];
00147             if (Op.Type == InlineAsm::isClobber) {
00148               // Clobbers don't have SDValue operands, hence SDValue().
00149               TLI->ComputeConstraintToUse(Op, SDValue(), DAG);
00150               std::pair<unsigned, const TargetRegisterClass *> PhysReg =
00151                   TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode,
00152                                                     Op.ConstraintVT);
00153               if (PhysReg.first == SP)
00154                 MF->getFrameInfo()->setHasOpaqueSPAdjustment(true);
00155             }
00156           }
00157         }
00158       }
00159 
00160       // Look for calls to the @llvm.va_start intrinsic. We can omit some
00161       // prologue boilerplate for variadic functions that don't examine their
00162       // arguments.
00163       if (const auto *II = dyn_cast<IntrinsicInst>(I)) {
00164         if (II->getIntrinsicID() == Intrinsic::vastart)
00165           MF->getFrameInfo()->setHasVAStart(true);
00166       }
00167 
00168       // If we have a musttail call in a variadic function, we need to ensure we
00169       // forward implicit register parameters.
00170       if (const auto *CI = dyn_cast<CallInst>(I)) {
00171         if (CI->isMustTailCall() && Fn->isVarArg())
00172           MF->getFrameInfo()->setHasMustTailInVarArgFunc(true);
00173       }
00174 
00175       // Mark values used outside their block as exported, by allocating
00176       // a virtual register for them.
00177       if (isUsedOutsideOfDefiningBlock(&*I))
00178         if (!isa<AllocaInst>(I) || !StaticAllocaMap.count(cast<AllocaInst>(I)))
00179           InitializeRegForValue(&*I);
00180 
00181       // Collect llvm.dbg.declare information. This is done now instead of
00182       // during the initial isel pass through the IR so that it is done
00183       // in a predictable order.
00184       if (const DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(I)) {
00185         assert(DI->getVariable() && "Missing variable");
00186         assert(DI->getDebugLoc() && "Missing location");
00187         if (MMI.hasDebugInfo()) {
00188           // Don't handle byval struct arguments or VLAs, for example.
00189           // Non-byval arguments are handled here (they refer to the stack
00190           // temporary alloca at this point).
00191           const Value *Address = DI->getAddress();
00192           if (Address) {
00193             if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
00194               Address = BCI->getOperand(0);
00195             if (const AllocaInst *AI = dyn_cast<AllocaInst>(Address)) {
00196               DenseMap<const AllocaInst *, int>::iterator SI =
00197                 StaticAllocaMap.find(AI);
00198               if (SI != StaticAllocaMap.end()) { // Check for VLAs.
00199                 int FI = SI->second;
00200                 MMI.setVariableDbgInfo(DI->getVariable(), DI->getExpression(),
00201                                        FI, DI->getDebugLoc());
00202               }
00203             }
00204           }
00205         }
00206       }
00207 
00208       // Decide the preferred extend type for a value.
00209       PreferredExtendType[&*I] = getPreferredExtendForValue(&*I);
00210     }
00211 
00212   // Create an initial MachineBasicBlock for each LLVM BasicBlock in F.  This
00213   // also creates the initial PHI MachineInstrs, though none of the input
00214   // operands are populated.
00215   for (BB = Fn->begin(); BB != EB; ++BB) {
00216     // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks
00217     // are really data, and no instructions can live here.
00218     if (BB->isEHPad()) {
00219       const Instruction *I = BB->getFirstNonPHI();
00220       // If this is a non-landingpad EH pad, mark this function as using
00221       // funclets.
00222       // FIXME: SEH catchpads do not create funclets, so we could avoid setting
00223       // this in such cases in order to improve frame layout.
00224       if (!isa<LandingPadInst>(I)) {
00225         MMI.setHasEHFunclets(true);
00226         MF->getFrameInfo()->setHasOpaqueSPAdjustment(true);
00227       }
00228       if (isa<CatchSwitchInst>(I)) {
00229         assert(&*BB->begin() == I &&
00230                "WinEHPrepare failed to remove PHIs from imaginary BBs");
00231         continue;
00232       }
00233       if (isa<FuncletPadInst>(I))
00234         assert(&*BB->begin() == I && "WinEHPrepare failed to demote PHIs");
00235     }
00236 
00237     MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(&*BB);
00238     MBBMap[&*BB] = MBB;
00239     MF->push_back(MBB);
00240 
00241     // Transfer the address-taken flag. This is necessary because there could
00242     // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
00243     // the first one should be marked.
00244     if (BB->hasAddressTaken())
00245       MBB->setHasAddressTaken();
00246 
00247     // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
00248     // appropriate.
00249     for (BasicBlock::const_iterator I = BB->begin();
00250          const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
00251       if (PN->use_empty()) continue;
00252 
00253       // Skip empty types
00254       if (PN->getType()->isEmptyTy())
00255         continue;
00256 
00257       DebugLoc DL = PN->getDebugLoc();
00258       unsigned PHIReg = ValueMap[PN];
00259       assert(PHIReg && "PHI node does not have an assigned virtual register!");
00260 
00261       SmallVector<EVT, 4> ValueVTs;
00262       ComputeValueVTs(*TLI, MF->getDataLayout(), PN->getType(), ValueVTs);
00263       for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) {
00264         EVT VT = ValueVTs[vti];
00265         unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
00266         const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
00267         for (unsigned i = 0; i != NumRegisters; ++i)
00268           BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
00269         PHIReg += NumRegisters;
00270       }
00271     }
00272   }
00273 
00274   // Mark landing pad blocks.
00275   SmallVector<const LandingPadInst *, 4> LPads;
00276   for (BB = Fn->begin(); BB != EB; ++BB) {
00277     const Instruction *FNP = BB->getFirstNonPHI();
00278     if (BB->isEHPad() && MBBMap.count(&*BB))
00279       MBBMap[&*BB]->setIsEHPad();
00280     if (const auto *LPI = dyn_cast<LandingPadInst>(FNP))
00281       LPads.push_back(LPI);
00282   }
00283 
00284   // If this personality uses funclets, we need to do a bit more work.
00285   if (!Fn->hasPersonalityFn())
00286     return;
00287   EHPersonality Personality = classifyEHPersonality(Fn->getPersonalityFn());
00288   if (!isFuncletEHPersonality(Personality))
00289     return;
00290 
00291   // Calculate state numbers if we haven't already.
00292   WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
00293   if (Personality == EHPersonality::MSVC_CXX)
00294     calculateWinCXXEHStateNumbers(&fn, EHInfo);
00295   else if (isAsynchronousEHPersonality(Personality))
00296     calculateSEHStateNumbers(&fn, EHInfo);
00297   else if (Personality == EHPersonality::CoreCLR)
00298     calculateClrEHStateNumbers(&fn, EHInfo);
00299 
00300   // Map all BB references in the WinEH data to MBBs.
00301   for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
00302     for (WinEHHandlerType &H : TBME.HandlerArray) {
00303       if (H.CatchObj.Alloca) {
00304         assert(StaticAllocaMap.count(H.CatchObj.Alloca));
00305         H.CatchObj.FrameIndex = StaticAllocaMap[H.CatchObj.Alloca];
00306       } else {
00307         H.CatchObj.FrameIndex = INT_MAX;
00308       }
00309       if (H.Handler)
00310         H.Handler = MBBMap[H.Handler.get<const BasicBlock *>()];
00311     }
00312   }
00313   for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap)
00314     if (UME.Cleanup)
00315       UME.Cleanup = MBBMap[UME.Cleanup.get<const BasicBlock *>()];
00316   for (SEHUnwindMapEntry &UME : EHInfo.SEHUnwindMap) {
00317     const BasicBlock *BB = UME.Handler.get<const BasicBlock *>();
00318     UME.Handler = MBBMap[BB];
00319   }
00320   for (ClrEHUnwindMapEntry &CME : EHInfo.ClrEHUnwindMap) {
00321     const BasicBlock *BB = CME.Handler.get<const BasicBlock *>();
00322     CME.Handler = MBBMap[BB];
00323   }
00324 }
00325 
00326 /// clear - Clear out all the function-specific state. This returns this
00327 /// FunctionLoweringInfo to an empty state, ready to be used for a
00328 /// different function.
00329 void FunctionLoweringInfo::clear() {
00330   MBBMap.clear();
00331   ValueMap.clear();
00332   StaticAllocaMap.clear();
00333   LiveOutRegInfo.clear();
00334   VisitedBBs.clear();
00335   ArgDbgValues.clear();
00336   ByValArgFrameIndexMap.clear();
00337   RegFixups.clear();
00338   StatepointStackSlots.clear();
00339   StatepointRelocatedValues.clear();
00340   PreferredExtendType.clear();
00341 }
00342 
00343 /// CreateReg - Allocate a single virtual register for the given type.
00344 unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
00345   return RegInfo->createVirtualRegister(
00346       MF->getSubtarget().getTargetLowering()->getRegClassFor(VT));
00347 }
00348 
00349 /// CreateRegs - Allocate the appropriate number of virtual registers of
00350 /// the correctly promoted or expanded types.  Assign these registers
00351 /// consecutive vreg numbers and return the first assigned number.
00352 ///
00353 /// In the case that the given value has struct or array type, this function
00354 /// will assign registers for each member or element.
00355 ///
00356 unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
00357   const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
00358 
00359   SmallVector<EVT, 4> ValueVTs;
00360   ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
00361 
00362   unsigned FirstReg = 0;
00363   for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
00364     EVT ValueVT = ValueVTs[Value];
00365     MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT);
00366 
00367     unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT);
00368     for (unsigned i = 0; i != NumRegs; ++i) {
00369       unsigned R = CreateReg(RegisterVT);
00370       if (!FirstReg) FirstReg = R;
00371     }
00372   }
00373   return FirstReg;
00374 }
00375 
00376 /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
00377 /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
00378 /// the register's LiveOutInfo is for a smaller bit width, it is extended to
00379 /// the larger bit width by zero extension. The bit width must be no smaller
00380 /// than the LiveOutInfo's existing bit width.
00381 const FunctionLoweringInfo::LiveOutInfo *
00382 FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
00383   if (!LiveOutRegInfo.inBounds(Reg))
00384     return nullptr;
00385 
00386   LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
00387   if (!LOI->IsValid)
00388     return nullptr;
00389 
00390   if (BitWidth > LOI->KnownZero.getBitWidth()) {
00391     LOI->NumSignBits = 1;
00392     LOI->KnownZero = LOI->KnownZero.zextOrTrunc(BitWidth);
00393     LOI->KnownOne = LOI->KnownOne.zextOrTrunc(BitWidth);
00394   }
00395 
00396   return LOI;
00397 }
00398 
00399 /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
00400 /// register based on the LiveOutInfo of its operands.
00401 void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
00402   Type *Ty = PN->getType();
00403   if (!Ty->isIntegerTy() || Ty->isVectorTy())
00404     return;
00405 
00406   SmallVector<EVT, 1> ValueVTs;
00407   ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
00408   assert(ValueVTs.size() == 1 &&
00409          "PHIs with non-vector integer types should have a single VT.");
00410   EVT IntVT = ValueVTs[0];
00411 
00412   if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1)
00413     return;
00414   IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT);
00415   unsigned BitWidth = IntVT.getSizeInBits();
00416 
00417   unsigned DestReg = ValueMap[PN];
00418   if (!TargetRegisterInfo::isVirtualRegister(DestReg))
00419     return;
00420   LiveOutRegInfo.grow(DestReg);
00421   LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg];
00422 
00423   Value *V = PN->getIncomingValue(0);
00424   if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
00425     DestLOI.NumSignBits = 1;
00426     APInt Zero(BitWidth, 0);
00427     DestLOI.KnownZero = Zero;
00428     DestLOI.KnownOne = Zero;
00429     return;
00430   }
00431 
00432   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
00433     APInt Val = CI->getValue().zextOrTrunc(BitWidth);
00434     DestLOI.NumSignBits = Val.getNumSignBits();
00435     DestLOI.KnownZero = ~Val;
00436     DestLOI.KnownOne = Val;
00437   } else {
00438     assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
00439                                 "CopyToReg node was created.");
00440     unsigned SrcReg = ValueMap[V];
00441     if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
00442       DestLOI.IsValid = false;
00443       return;
00444     }
00445     const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
00446     if (!SrcLOI) {
00447       DestLOI.IsValid = false;
00448       return;
00449     }
00450     DestLOI = *SrcLOI;
00451   }
00452 
00453   assert(DestLOI.KnownZero.getBitWidth() == BitWidth &&
00454          DestLOI.KnownOne.getBitWidth() == BitWidth &&
00455          "Masks should have the same bit width as the type.");
00456 
00457   for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
00458     Value *V = PN->getIncomingValue(i);
00459     if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
00460       DestLOI.NumSignBits = 1;
00461       APInt Zero(BitWidth, 0);
00462       DestLOI.KnownZero = Zero;
00463       DestLOI.KnownOne = Zero;
00464       return;
00465     }
00466 
00467     if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
00468       APInt Val = CI->getValue().zextOrTrunc(BitWidth);
00469       DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits());
00470       DestLOI.KnownZero &= ~Val;
00471       DestLOI.KnownOne &= Val;
00472       continue;
00473     }
00474 
00475     assert(ValueMap.count(V) && "V should have been placed in ValueMap when "
00476                                 "its CopyToReg node was created.");
00477     unsigned SrcReg = ValueMap[V];
00478     if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
00479       DestLOI.IsValid = false;
00480       return;
00481     }
00482     const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
00483     if (!SrcLOI) {
00484       DestLOI.IsValid = false;
00485       return;
00486     }
00487     DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits);
00488     DestLOI.KnownZero &= SrcLOI->KnownZero;
00489     DestLOI.KnownOne &= SrcLOI->KnownOne;
00490   }
00491 }
00492 
00493 /// setArgumentFrameIndex - Record frame index for the byval
00494 /// argument. This overrides previous frame index entry for this argument,
00495 /// if any.
00496 void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A,
00497                                                  int FI) {
00498   ByValArgFrameIndexMap[A] = FI;
00499 }
00500 
00501 /// getArgumentFrameIndex - Get frame index for the byval argument.
00502 /// If the argument does not have any assigned frame index then 0 is
00503 /// returned.
00504 int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
00505   DenseMap<const Argument *, int>::iterator I =
00506     ByValArgFrameIndexMap.find(A);
00507   if (I != ByValArgFrameIndexMap.end())
00508     return I->second;
00509   DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
00510   return 0;
00511 }
00512 
00513 unsigned FunctionLoweringInfo::getCatchPadExceptionPointerVReg(
00514     const Value *CPI, const TargetRegisterClass *RC) {
00515   MachineRegisterInfo &MRI = MF->getRegInfo();
00516   auto I = CatchPadExceptionPointers.insert({CPI, 0});
00517   unsigned &VReg = I.first->second;
00518   if (I.second)
00519     VReg = MRI.createVirtualRegister(RC);
00520   assert(VReg && "null vreg in exception pointer table!");
00521   return VReg;
00522 }
00523 
00524 /// ComputeUsesVAFloatArgument - Determine if any floating-point values are
00525 /// being passed to this variadic function, and set the MachineModuleInfo's
00526 /// usesVAFloatArgument flag if so. This flag is used to emit an undefined
00527 /// reference to _fltused on Windows, which will link in MSVCRT's
00528 /// floating-point support.
00529 void llvm::ComputeUsesVAFloatArgument(const CallInst &I,
00530                                       MachineModuleInfo *MMI)
00531 {
00532   FunctionType *FT = cast<FunctionType>(
00533     I.getCalledValue()->getType()->getContainedType(0));
00534   if (FT->isVarArg() && !MMI->usesVAFloatArgument()) {
00535     for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
00536       Type* T = I.getArgOperand(i)->getType();
00537       for (auto i : post_order(T)) {
00538         if (i->isFloatingPointTy()) {
00539           MMI->setUsesVAFloatArgument(true);
00540           return;
00541         }
00542       }
00543     }
00544   }
00545 }
00546 
00547 /// AddLandingPadInfo - Extract the exception handling information from the
00548 /// landingpad instruction and add them to the specified machine module info.
00549 void llvm::AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
00550                              MachineBasicBlock *MBB) {
00551   if (const auto *PF = dyn_cast<Function>(
00552       I.getParent()->getParent()->getPersonalityFn()->stripPointerCasts()))
00553     MMI.addPersonality(PF);
00554 
00555   if (I.isCleanup())
00556     MMI.addCleanup(MBB);
00557 
00558   // FIXME: New EH - Add the clauses in reverse order. This isn't 100% correct,
00559   //        but we need to do it this way because of how the DWARF EH emitter
00560   //        processes the clauses.
00561   for (unsigned i = I.getNumClauses(); i != 0; --i) {
00562     Value *Val = I.getClause(i - 1);
00563     if (I.isCatch(i - 1)) {
00564       MMI.addCatchTypeInfo(MBB,
00565                            dyn_cast<GlobalValue>(Val->stripPointerCasts()));
00566     } else {
00567       // Add filters in a list.
00568       Constant *CVal = cast<Constant>(Val);
00569       SmallVector<const GlobalValue*, 4> FilterList;
00570       for (User::op_iterator
00571              II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II)
00572         FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts()));
00573 
00574       MMI.addFilterTypeInfo(MBB, FilterList);
00575     }
00576   }
00577 }