LCOV - code coverage report
Current view: top level - lib/CodeGen/SelectionDAG - FunctionLoweringInfo.cpp (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 242 246 98.4 %
Date: 2018-06-17 00:07:59 Functions: 16 16 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===-- FunctionLoweringInfo.cpp ------------------------------------------===//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : //
      10             : // This implements routines for translating functions from LLVM IR into
      11             : // Machine IR.
      12             : //
      13             : //===----------------------------------------------------------------------===//
      14             : 
      15             : #include "llvm/CodeGen/FunctionLoweringInfo.h"
      16             : #include "llvm/CodeGen/Analysis.h"
      17             : #include "llvm/CodeGen/MachineFrameInfo.h"
      18             : #include "llvm/CodeGen/MachineFunction.h"
      19             : #include "llvm/CodeGen/MachineInstrBuilder.h"
      20             : #include "llvm/CodeGen/MachineRegisterInfo.h"
      21             : #include "llvm/CodeGen/TargetFrameLowering.h"
      22             : #include "llvm/CodeGen/TargetInstrInfo.h"
      23             : #include "llvm/CodeGen/TargetLowering.h"
      24             : #include "llvm/CodeGen/TargetRegisterInfo.h"
      25             : #include "llvm/CodeGen/TargetSubtargetInfo.h"
      26             : #include "llvm/CodeGen/WinEHFuncInfo.h"
      27             : #include "llvm/IR/DataLayout.h"
      28             : #include "llvm/IR/DerivedTypes.h"
      29             : #include "llvm/IR/Function.h"
      30             : #include "llvm/IR/Instructions.h"
      31             : #include "llvm/IR/IntrinsicInst.h"
      32             : #include "llvm/IR/LLVMContext.h"
      33             : #include "llvm/IR/Module.h"
      34             : #include "llvm/Support/Debug.h"
      35             : #include "llvm/Support/ErrorHandling.h"
      36             : #include "llvm/Support/MathExtras.h"
      37             : #include "llvm/Support/raw_ostream.h"
      38             : #include "llvm/Target/TargetOptions.h"
      39             : #include <algorithm>
      40             : using namespace llvm;
      41             : 
      42             : #define DEBUG_TYPE "function-lowering-info"
      43             : 
      44             : /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
      45             : /// PHI nodes or outside of the basic block that defines it, or used by a
      46             : /// switch or atomic instruction, which may expand to multiple basic blocks.
      47     2952116 : static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
      48     2952116 :   if (I->use_empty()) return false;
      49     1767635 :   if (isa<PHINode>(I)) return true;
      50     1724075 :   const BasicBlock *BB = I->getParent();
      51     3516575 :   for (const User *U : I->users())
      52     1930359 :     if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
      53             :       return true;
      54             : 
      55             :   return false;
      56             : }
      57             : 
      58     2952116 : static ISD::NodeType getPreferredExtendForValue(const Value *V) {
      59             :   // For the users of the source value being used for compare instruction, if
      60             :   // the number of signed predicate is greater than unsigned predicate, we
      61             :   // prefer to use SIGN_EXTEND.
      62             :   //
      63             :   // With this optimization, we would be able to reduce some redundant sign or
      64             :   // zero extension instruction, and eventually more machine CSE opportunities
      65             :   // can be exposed.
      66             :   ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
      67             :   unsigned NumOfSigned = 0, NumOfUnsigned = 0;
      68     5173385 :   for (const User *U : V->users()) {
      69             :     if (const auto *CI = dyn_cast<CmpInst>(U)) {
      70       74357 :       NumOfSigned += CI->isSigned();
      71       74357 :       NumOfUnsigned += CI->isUnsigned();
      72             :     }
      73             :   }
      74     2952116 :   if (NumOfSigned > NumOfUnsigned)
      75             :     ExtendKind = ISD::SIGN_EXTEND;
      76             : 
      77     2952116 :   return ExtendKind;
      78             : }
      79             : 
      80      226497 : void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
      81             :                                SelectionDAG *DAG) {
      82      226497 :   Fn = &fn;
      83      226497 :   MF = &mf;
      84      226497 :   TLI = MF->getSubtarget().getTargetLowering();
      85      226497 :   RegInfo = &MF->getRegInfo();
      86      226497 :   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
      87      226497 :   unsigned StackAlign = TFI->getStackAlignment();
      88             : 
      89             :   // Check whether the function can return without sret-demotion.
      90             :   SmallVector<ISD::OutputArg, 4> Outs;
      91      452994 :   GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
      92             :                 mf.getDataLayout());
      93      905988 :   CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF,
      94      452994 :                                        Fn->isVarArg(), Outs, Fn->getContext());
      95             : 
      96             :   // If this personality uses funclets, we need to do a bit more work.
      97             :   DenseMap<const AllocaInst *, TinyPtrVector<int *>> CatchObjects;
      98      226495 :   EHPersonality Personality = classifyEHPersonality(
      99      452990 :       Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr);
     100             :   if (isFuncletEHPersonality(Personality)) {
     101             :     // Calculate state numbers if we haven't already.
     102          89 :     WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
     103          89 :     if (Personality == EHPersonality::MSVC_CXX)
     104          58 :       calculateWinCXXEHStateNumbers(&fn, EHInfo);
     105             :     else if (isAsynchronousEHPersonality(Personality))
     106          24 :       calculateSEHStateNumbers(&fn, EHInfo);
     107           7 :     else if (Personality == EHPersonality::CoreCLR)
     108           7 :       calculateClrEHStateNumbers(&fn, EHInfo);
     109             : 
     110             :     // Map all BB references in the WinEH data to MBBs.
     111         207 :     for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
     112         191 :       for (WinEHHandlerType &H : TBME.HandlerArray) {
     113          66 :         if (const AllocaInst *AI = H.CatchObj.Alloca)
     114          24 :           CatchObjects.insert({AI, {}}).first->second.push_back(
     115             :               &H.CatchObj.FrameIndex);
     116             :         else
     117          58 :           H.CatchObj.FrameIndex = INT_MAX;
     118             :       }
     119             :     }
     120             :   }
     121             : 
     122             :   // Initialize the mapping of values to registers.  This is only set up for
     123             :   // instruction values that are used outside of the block that defines
     124             :   // them.
     125      879624 :   for (const BasicBlock &BB : *Fn) {
     126     3378750 :     for (const Instruction &I : BB) {
     127     2952116 :       if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
     128      114985 :         Type *Ty = AI->getAllocatedType();
     129             :         unsigned Align =
     130      229970 :           std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty),
     131      344955 :                    AI->getAlignment());
     132             : 
     133             :         // Static allocas can be folded into the initial stack frame
     134             :         // adjustment. For targets that don't realign the stack, don't
     135             :         // do this if there is an extra alignment requirement.
     136      229503 :         if (AI->isStaticAlloca() &&
     137      114800 :             (TFI->isStackRealignable() || (Align <= StackAlign))) {
     138      114511 :           const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
     139      114511 :           uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty);
     140             : 
     141      114511 :           TySize *= CUI->getZExtValue();   // Get total allocated size.
     142      114511 :           if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
     143             :           int FrameIndex = INT_MAX;
     144      114511 :           auto Iter = CatchObjects.find(AI);
     145      114511 :           if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) {
     146           5 :             FrameIndex = MF->getFrameInfo().CreateFixedObject(
     147             :                 TySize, 0, /*Immutable=*/false, /*isAliased=*/true);
     148           5 :             MF->getFrameInfo().setObjectAlignment(FrameIndex, Align);
     149             :           } else {
     150      114506 :             FrameIndex =
     151      114506 :                 MF->getFrameInfo().CreateStackObject(TySize, Align, false, AI);
     152             :           }
     153             : 
     154      229022 :           StaticAllocaMap[AI] = FrameIndex;
     155             :           // Update the catch handler information.
     156      114511 :           if (Iter != CatchObjects.end()) {
     157          23 :             for (int *CatchObjPtr : Iter->second)
     158           8 :               *CatchObjPtr = FrameIndex;
     159             :           }
     160             :         } else {
     161             :           // FIXME: Overaligned static allocas should be grouped into
     162             :           // a single dynamic allocation instead of using a separate
     163             :           // stack allocation for each one.
     164         474 :           if (Align <= StackAlign)
     165             :             Align = 0;
     166             :           // Inform the Frame Information that we have variable-sized objects.
     167         474 :           MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, AI);
     168             :         }
     169             :       }
     170             : 
     171             :       // Look for inline asm that clobbers the SP register.
     172     2952116 :       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
     173             :         ImmutableCallSite CS(&I);
     174      498747 :         if (isa<InlineAsm>(CS.getCalledValue())) {
     175       16477 :           unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
     176       16477 :           const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
     177             :           std::vector<TargetLowering::AsmOperandInfo> Ops =
     178       32954 :               TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI, CS);
     179       83258 :           for (TargetLowering::AsmOperandInfo &Op : Ops) {
     180       66781 :             if (Op.Type == InlineAsm::isClobber) {
     181             :               // Clobbers don't have SDValue operands, hence SDValue().
     182       51652 :               TLI->ComputeConstraintToUse(Op, SDValue(), DAG);
     183             :               std::pair<unsigned, const TargetRegisterClass *> PhysReg =
     184       51652 :                   TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode,
     185      103304 :                                                     Op.ConstraintVT);
     186       51652 :               if (PhysReg.first == SP)
     187          67 :                 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
     188             :             }
     189             :           }
     190             :         }
     191             :       }
     192             : 
     193             :       // Look for calls to the @llvm.va_start intrinsic. We can omit some
     194             :       // prologue boilerplate for variadic functions that don't examine their
     195             :       // arguments.
     196             :       if (const auto *II = dyn_cast<IntrinsicInst>(&I)) {
     197      248745 :         if (II->getIntrinsicID() == Intrinsic::vastart)
     198         247 :           MF->getFrameInfo().setHasVAStart(true);
     199             :       }
     200             : 
     201             :       // If we have a musttail call in a variadic function, we need to ensure we
     202             :       // forward implicit register parameters.
     203             :       if (const auto *CI = dyn_cast<CallInst>(&I)) {
     204      456075 :         if (CI->isMustTailCall() && Fn->isVarArg())
     205          33 :           MF->getFrameInfo().setHasMustTailInVarArgFunc(true);
     206             :       }
     207             : 
     208             :       // Mark values used outside their block as exported, by allocating
     209             :       // a virtual register for them.
     210     2952116 :       if (isUsedOutsideOfDefiningBlock(&I))
     211      181419 :         if (!isa<AllocaInst>(I) || !StaticAllocaMap.count(cast<AllocaInst>(&I)))
     212      139159 :           InitializeRegForValue(&I);
     213             : 
     214             :       // Decide the preferred extend type for a value.
     215     5904232 :       PreferredExtendType[&I] = getPreferredExtendForValue(&I);
     216             :     }
     217             :   }
     218             : 
     219             :   // Create an initial MachineBasicBlock for each LLVM BasicBlock in F.  This
     220             :   // also creates the initial PHI MachineInstrs, though none of the input
     221             :   // operands are populated.
     222      879624 :   for (const BasicBlock &BB : *Fn) {
     223             :     // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks
     224             :     // are really data, and no instructions can live here.
     225      426634 :     if (BB.isEHPad()) {
     226       28666 :       const Instruction *PadInst = BB.getFirstNonPHI();
     227             :       // If this is a non-landingpad EH pad, mark this function as using
     228             :       // funclets.
     229             :       // FIXME: SEH catchpads do not create EH scope/funclets, so we could avoid
     230             :       // setting this in such cases in order to improve frame layout.
     231       28666 :       if (!isa<LandingPadInst>(PadInst)) {
     232         245 :         MF->setHasEHScopes(true);
     233         245 :         MF->setHasEHFunclets(true);
     234         245 :         MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
     235             :       }
     236       28666 :       if (isa<CatchSwitchInst>(PadInst)) {
     237             :         assert(&*BB.begin() == PadInst &&
     238             :                "WinEHPrepare failed to remove PHIs from imaginary BBs");
     239             :         continue;
     240             :       }
     241             :       if (isa<FuncletPadInst>(PadInst))
     242             :         assert(&*BB.begin() == PadInst && "WinEHPrepare failed to demote PHIs");
     243             :     }
     244             : 
     245      426536 :     MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(&BB);
     246      853072 :     MBBMap[&BB] = MBB;
     247      426536 :     MF->push_back(MBB);
     248             : 
     249             :     // Transfer the address-taken flag. This is necessary because there could
     250             :     // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
     251             :     // the first one should be marked.
     252      426536 :     if (BB.hasAddressTaken())
     253             :       MBB->setHasAddressTaken();
     254             : 
     255             :     // Mark landing pad blocks.
     256      426536 :     if (BB.isEHPad())
     257             :       MBB->setIsEHPad();
     258             : 
     259             :     // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
     260             :     // appropriate.
     261      471562 :     for (const PHINode &PN : BB.phis()) {
     262       45026 :       if (PN.use_empty())
     263        2934 :         continue;
     264             : 
     265             :       // Skip empty types
     266       43560 :       if (PN.getType()->isEmptyTy())
     267           2 :         continue;
     268             : 
     269             :       DebugLoc DL = PN.getDebugLoc();
     270       87116 :       unsigned PHIReg = ValueMap[&PN];
     271             :       assert(PHIReg && "PHI node does not have an assigned virtual register!");
     272             : 
     273             :       SmallVector<EVT, 4> ValueVTs;
     274       43558 :       ComputeValueVTs(*TLI, MF->getDataLayout(), PN.getType(), ValueVTs);
     275      131888 :       for (EVT VT : ValueVTs) {
     276       44165 :         unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
     277       44165 :         const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
     278      134247 :         for (unsigned i = 0; i != NumRegisters; ++i)
     279       45041 :           BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
     280       44165 :         PHIReg += NumRegisters;
     281             :       }
     282             :     }
     283             :   }
     284             : 
     285             :   if (!isFuncletEHPersonality(Personality))
     286             :     return;
     287             : 
     288          89 :   WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
     289             : 
     290             :   // Map all BB references in the WinEH data to MBBs.
     291         207 :   for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
     292         191 :     for (WinEHHandlerType &H : TBME.HandlerArray) {
     293          66 :       if (H.Handler)
     294         198 :         H.Handler = MBBMap[H.Handler.get<const BasicBlock *>()];
     295             :     }
     296             :   }
     297         373 :   for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap)
     298         142 :     if (UME.Cleanup)
     299          72 :       UME.Cleanup = MBBMap[UME.Cleanup.get<const BasicBlock *>()];
     300         161 :   for (SEHUnwindMapEntry &UME : EHInfo.SEHUnwindMap) {
     301          36 :     const BasicBlock *BB = UME.Handler.get<const BasicBlock *>();
     302          72 :     UME.Handler = MBBMap[BB];
     303             :   }
     304         127 :   for (ClrEHUnwindMapEntry &CME : EHInfo.ClrEHUnwindMap) {
     305          19 :     const BasicBlock *BB = CME.Handler.get<const BasicBlock *>();
     306          38 :     CME.Handler = MBBMap[BB];
     307             :   }
     308             : }
     309             : 
     310             : /// clear - Clear out all the function-specific state. This returns this
     311             : /// FunctionLoweringInfo to an empty state, ready to be used for a
     312             : /// different function.
     313      226419 : void FunctionLoweringInfo::clear() {
     314      226419 :   MBBMap.clear();
     315      226419 :   ValueMap.clear();
     316      226419 :   VirtReg2Value.clear();
     317      226419 :   StaticAllocaMap.clear();
     318             :   LiveOutRegInfo.clear();
     319      226419 :   VisitedBBs.clear();
     320             :   ArgDbgValues.clear();
     321      226419 :   ByValArgFrameIndexMap.clear();
     322      226419 :   RegFixups.clear();
     323             :   RegsWithFixups.clear();
     324             :   StatepointStackSlots.clear();
     325      226419 :   StatepointSpillMaps.clear();
     326      226419 :   PreferredExtendType.clear();
     327      226419 : }
     328             : 
     329             : /// CreateReg - Allocate a single virtual register for the given type.
     330      394458 : unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
     331     1577832 :   return RegInfo->createVirtualRegister(
     332     1577832 :       MF->getSubtarget().getTargetLowering()->getRegClassFor(VT));
     333             : }
     334             : 
     335             : /// CreateRegs - Allocate the appropriate number of virtual registers of
     336             : /// the correctly promoted or expanded types.  Assign these registers
     337             : /// consecutive vreg numbers and return the first assigned number.
     338             : ///
     339             : /// In the case that the given value has struct or array type, this function
     340             : /// will assign registers for each member or element.
     341             : ///
     342      405788 : unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
     343      405788 :   const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
     344             : 
     345             :   SmallVector<EVT, 4> ValueVTs;
     346      405788 :   ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
     347             : 
     348             :   unsigned FirstReg = 0;
     349      793944 :   for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
     350      776312 :     EVT ValueVT = ValueVTs[Value];
     351      388156 :     MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT);
     352             : 
     353      388156 :     unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT);
     354     1176508 :     for (unsigned i = 0; i != NumRegs; ++i) {
     355      394176 :       unsigned R = CreateReg(RegisterVT);
     356      394176 :       if (!FirstReg) FirstReg = R;
     357             :     }
     358             :   }
     359      405788 :   return FirstReg;
     360             : }
     361             : 
     362             : /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
     363             : /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
     364             : /// the register's LiveOutInfo is for a smaller bit width, it is extended to
     365             : /// the larger bit width by zero extension. The bit width must be no smaller
     366             : /// than the LiveOutInfo's existing bit width.
     367             : const FunctionLoweringInfo::LiveOutInfo *
     368       27587 : FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
     369       27587 :   if (!LiveOutRegInfo.inBounds(Reg))
     370             :     return nullptr;
     371             : 
     372             :   LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
     373       27310 :   if (!LOI->IsValid)
     374             :     return nullptr;
     375             : 
     376       26438 :   if (BitWidth > LOI->Known.getBitWidth()) {
     377       17028 :     LOI->NumSignBits = 1;
     378       17028 :     LOI->Known = LOI->Known.zextOrTrunc(BitWidth);
     379             :   }
     380             : 
     381             :   return LOI;
     382             : }
     383             : 
     384             : /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
     385             : /// register based on the LiveOutInfo of its operands.
     386       34254 : void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
     387       34254 :   Type *Ty = PN->getType();
     388       34254 :   if (!Ty->isIntegerTy() || Ty->isVectorTy())
     389       20734 :     return;
     390             : 
     391             :   SmallVector<EVT, 1> ValueVTs;
     392       16388 :   ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
     393             :   assert(ValueVTs.size() == 1 &&
     394             :          "PHIs with non-vector integer types should have a single VT.");
     395       16388 :   EVT IntVT = ValueVTs[0];
     396             : 
     397       16388 :   if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1)
     398             :     return;
     399       32582 :   IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT);
     400       16291 :   unsigned BitWidth = IntVT.getSizeInBits();
     401             : 
     402       32582 :   unsigned DestReg = ValueMap[PN];
     403       16291 :   if (!TargetRegisterInfo::isVirtualRegister(DestReg))
     404             :     return;
     405       14860 :   LiveOutRegInfo.grow(DestReg);
     406             :   LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg];
     407             : 
     408             :   Value *V = PN->getIncomingValue(0);
     409       14860 :   if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
     410          64 :     DestLOI.NumSignBits = 1;
     411          64 :     DestLOI.Known = KnownBits(BitWidth);
     412          64 :     return;
     413             :   }
     414             : 
     415             :   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
     416        1964 :     APInt Val = CI->getValue().zextOrTrunc(BitWidth);
     417        1964 :     DestLOI.NumSignBits = Val.getNumSignBits();
     418        1964 :     DestLOI.Known.Zero = ~Val;
     419        1964 :     DestLOI.Known.One = Val;
     420             :   } else {
     421             :     assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
     422             :                                 "CopyToReg node was created.");
     423       25664 :     unsigned SrcReg = ValueMap[V];
     424       12832 :     if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
     425           0 :       DestLOI.IsValid = false;
     426           0 :       return;
     427             :     }
     428       12832 :     const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
     429       12832 :     if (!SrcLOI) {
     430         594 :       DestLOI.IsValid = false;
     431         594 :       return;
     432             :     }
     433       12238 :     DestLOI = *SrcLOI;
     434             :   }
     435             : 
     436             :   assert(DestLOI.Known.Zero.getBitWidth() == BitWidth &&
     437             :          DestLOI.Known.One.getBitWidth() == BitWidth &&
     438             :          "Masks should have the same bit width as the type.");
     439             : 
     440       46400 :   for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
     441             :     Value *V = PN->getIncomingValue(i);
     442       16781 :     if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
     443         127 :       DestLOI.NumSignBits = 1;
     444         127 :       DestLOI.Known = KnownBits(BitWidth);
     445         127 :       return;
     446             :     }
     447             : 
     448             :     if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
     449        1899 :       APInt Val = CI->getValue().zextOrTrunc(BitWidth);
     450        3798 :       DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits());
     451        5697 :       DestLOI.Known.Zero &= ~Val;
     452        1899 :       DestLOI.Known.One &= Val;
     453             :       continue;
     454             :     }
     455             : 
     456             :     assert(ValueMap.count(V) && "V should have been placed in ValueMap when "
     457             :                                 "its CopyToReg node was created.");
     458       29510 :     unsigned SrcReg = ValueMap[V];
     459       14755 :     if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
     460           0 :       DestLOI.IsValid = false;
     461           0 :       return;
     462             :     }
     463       14755 :     const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
     464       14755 :     if (!SrcLOI) {
     465         555 :       DestLOI.IsValid = false;
     466         555 :       return;
     467             :     }
     468       28400 :     DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits);
     469       14200 :     DestLOI.Known.Zero &= SrcLOI->Known.Zero;
     470       14200 :     DestLOI.Known.One &= SrcLOI->Known.One;
     471             :   }
     472             : }
     473             : 
     474             : /// setArgumentFrameIndex - Record frame index for the byval
     475             : /// argument. This overrides previous frame index entry for this argument,
     476             : /// if any.
     477        1683 : void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A,
     478             :                                                  int FI) {
     479        3366 :   ByValArgFrameIndexMap[A] = FI;
     480        1683 : }
     481             : 
     482             : /// getArgumentFrameIndex - Get frame index for the byval argument.
     483             : /// If the argument does not have any assigned frame index then 0 is
     484             : /// returned.
     485        5149 : int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
     486        5149 :   auto I = ByValArgFrameIndexMap.find(A);
     487        5149 :   if (I != ByValArgFrameIndexMap.end())
     488         113 :     return I->second;
     489             :   LLVM_DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
     490             :   return INT_MAX;
     491             : }
     492             : 
     493          12 : unsigned FunctionLoweringInfo::getCatchPadExceptionPointerVReg(
     494             :     const Value *CPI, const TargetRegisterClass *RC) {
     495          12 :   MachineRegisterInfo &MRI = MF->getRegInfo();
     496          24 :   auto I = CatchPadExceptionPointers.insert({CPI, 0});
     497          12 :   unsigned &VReg = I.first->second;
     498          12 :   if (I.second)
     499           6 :     VReg = MRI.createVirtualRegister(RC);
     500             :   assert(VReg && "null vreg in exception pointer table!");
     501          12 :   return VReg;
     502             : }
     503             : 
     504             : unsigned
     505         507 : FunctionLoweringInfo::getOrCreateSwiftErrorVReg(const MachineBasicBlock *MBB,
     506             :                                                 const Value *Val) {
     507         507 :   auto Key = std::make_pair(MBB, Val);
     508         507 :   auto It = SwiftErrorVRegDefMap.find(Key);
     509             :   // If this is the first use of this swifterror value in this basic block,
     510             :   // create a new virtual register.
     511             :   // After we processed all basic blocks we will satisfy this "upwards exposed
     512             :   // use" by inserting a copy or phi at the beginning of this block.
     513         507 :   if (It == SwiftErrorVRegDefMap.end()) {
     514          45 :     auto &DL = MF->getDataLayout();
     515          90 :     const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
     516          90 :     auto VReg = MF->getRegInfo().createVirtualRegister(RC);
     517          45 :     SwiftErrorVRegDefMap[Key] = VReg;
     518          90 :     SwiftErrorVRegUpwardsUse[Key] = VReg;
     519          45 :     return VReg;
     520         462 :   } else return It->second;
     521             : }
     522             : 
     523         504 : void FunctionLoweringInfo::setCurrentSwiftErrorVReg(
     524             :     const MachineBasicBlock *MBB, const Value *Val, unsigned VReg) {
     525        1008 :   SwiftErrorVRegDefMap[std::make_pair(MBB, Val)] = VReg;
     526         504 : }
     527             : 
     528             : std::pair<unsigned, bool>
     529         300 : FunctionLoweringInfo::getOrCreateSwiftErrorVRegDefAt(const Instruction *I) {
     530             :   auto Key = PointerIntPair<const Instruction *, 1, bool>(I, true);
     531         300 :   auto It = SwiftErrorVRegDefUses.find(Key);
     532         300 :   if (It == SwiftErrorVRegDefUses.end()) {
     533         218 :     auto &DL = MF->getDataLayout();
     534         436 :     const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
     535         436 :     unsigned VReg =  MF->getRegInfo().createVirtualRegister(RC);
     536         218 :     SwiftErrorVRegDefUses[Key] = VReg;
     537             :     return std::make_pair(VReg, true);
     538             :   }
     539          82 :   return std::make_pair(It->second, false);
     540             : }
     541             : 
     542             : std::pair<unsigned, bool>
     543         381 : FunctionLoweringInfo::getOrCreateSwiftErrorVRegUseAt(const Instruction *I, const MachineBasicBlock *MBB, const Value *Val) {
     544             :   auto Key = PointerIntPair<const Instruction *, 1, bool>(I, false);
     545         381 :   auto It = SwiftErrorVRegDefUses.find(Key);
     546         381 :   if (It == SwiftErrorVRegDefUses.end()) {
     547         275 :     unsigned VReg = getOrCreateSwiftErrorVReg(MBB, Val);
     548         275 :     SwiftErrorVRegDefUses[Key] = VReg;
     549             :     return std::make_pair(VReg, true);
     550             :   }
     551         106 :   return std::make_pair(It->second, false);
     552             : }
     553             : 
     554             : const Value *
     555      280847 : FunctionLoweringInfo::getValueFromVirtualReg(unsigned Vreg) {
     556      280847 :   if (VirtReg2Value.empty()) {
     557       37116 :     for (auto &P : ValueMap) {
     558       12408 :       VirtReg2Value[P.second] = P.first;
     559             :     }
     560             :   }
     561      561694 :   return VirtReg2Value[Vreg];
     562             : }

Generated by: LCOV version 1.13