LCOV - code coverage report
Current view: top level - lib/CodeGen/SelectionDAG - FunctionLoweringInfo.cpp (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 259 263 98.5 %
Date: 2018-07-13 00:08:38 Functions: 16 16 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===-- FunctionLoweringInfo.cpp ------------------------------------------===//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : //
      10             : // This implements routines for translating functions from LLVM IR into
      11             : // Machine IR.
      12             : //
      13             : //===----------------------------------------------------------------------===//
      14             : 
      15             : #include "llvm/CodeGen/FunctionLoweringInfo.h"
      16             : #include "llvm/CodeGen/Analysis.h"
      17             : #include "llvm/CodeGen/MachineFrameInfo.h"
      18             : #include "llvm/CodeGen/MachineFunction.h"
      19             : #include "llvm/CodeGen/MachineInstrBuilder.h"
      20             : #include "llvm/CodeGen/MachineRegisterInfo.h"
      21             : #include "llvm/CodeGen/TargetFrameLowering.h"
      22             : #include "llvm/CodeGen/TargetInstrInfo.h"
      23             : #include "llvm/CodeGen/TargetLowering.h"
      24             : #include "llvm/CodeGen/TargetRegisterInfo.h"
      25             : #include "llvm/CodeGen/TargetSubtargetInfo.h"
      26             : #include "llvm/CodeGen/WasmEHFuncInfo.h"
      27             : #include "llvm/CodeGen/WinEHFuncInfo.h"
      28             : #include "llvm/IR/DataLayout.h"
      29             : #include "llvm/IR/DerivedTypes.h"
      30             : #include "llvm/IR/Function.h"
      31             : #include "llvm/IR/Instructions.h"
      32             : #include "llvm/IR/IntrinsicInst.h"
      33             : #include "llvm/IR/LLVMContext.h"
      34             : #include "llvm/IR/Module.h"
      35             : #include "llvm/Support/Debug.h"
      36             : #include "llvm/Support/ErrorHandling.h"
      37             : #include "llvm/Support/MathExtras.h"
      38             : #include "llvm/Support/raw_ostream.h"
      39             : #include "llvm/Target/TargetOptions.h"
      40             : #include <algorithm>
      41             : using namespace llvm;
      42             : 
      43             : #define DEBUG_TYPE "function-lowering-info"
      44             : 
      45             : /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
      46             : /// PHI nodes or outside of the basic block that defines it, or used by a
      47             : /// switch or atomic instruction, which may expand to multiple basic blocks.
      48     2971435 : static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
      49     2971435 :   if (I->use_empty()) return false;
      50     1833758 :   if (isa<PHINode>(I)) return true;
      51     1790275 :   const BasicBlock *BB = I->getParent();
      52     3672752 :   for (const User *U : I->users())
      53     2014842 :     if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
      54             :       return true;
      55             : 
      56             :   return false;
      57             : }
      58             : 
      59     2971435 : static ISD::NodeType getPreferredExtendForValue(const Value *V) {
      60             :   // For the users of the source value being used for compare instruction, if
      61             :   // the number of signed predicate is greater than unsigned predicate, we
      62             :   // prefer to use SIGN_EXTEND.
      63             :   //
      64             :   // With this optimization, we would be able to reduce some redundant sign or
      65             :   // zero extension instruction, and eventually more machine CSE opportunities
      66             :   // can be exposed.
      67             :   ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
      68             :   unsigned NumOfSigned = 0, NumOfUnsigned = 0;
      69     5265116 :   for (const User *U : V->users()) {
      70             :     if (const auto *CI = dyn_cast<CmpInst>(U)) {
      71       75455 :       NumOfSigned += CI->isSigned();
      72       75455 :       NumOfUnsigned += CI->isUnsigned();
      73             :     }
      74             :   }
      75     2971435 :   if (NumOfSigned > NumOfUnsigned)
      76             :     ExtendKind = ISD::SIGN_EXTEND;
      77             : 
      78     2971435 :   return ExtendKind;
      79             : }
      80             : 
      81      220732 : void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
      82             :                                SelectionDAG *DAG) {
      83      220732 :   Fn = &fn;
      84      220732 :   MF = &mf;
      85      220732 :   TLI = MF->getSubtarget().getTargetLowering();
      86      220732 :   RegInfo = &MF->getRegInfo();
      87      220732 :   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
      88      220732 :   unsigned StackAlign = TFI->getStackAlignment();
      89             : 
      90             :   // Check whether the function can return without sret-demotion.
      91             :   SmallVector<ISD::OutputArg, 4> Outs;
      92      441464 :   GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
      93             :                 mf.getDataLayout());
      94      882928 :   CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF,
      95      441464 :                                        Fn->isVarArg(), Outs, Fn->getContext());
      96             : 
      97             :   // If this personality uses funclets, we need to do a bit more work.
      98             :   DenseMap<const AllocaInst *, TinyPtrVector<int *>> CatchObjects;
      99      220730 :   EHPersonality Personality = classifyEHPersonality(
     100      441460 :       Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr);
     101             :   if (isFuncletEHPersonality(Personality)) {
     102             :     // Calculate state numbers if we haven't already.
     103          89 :     WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
     104          89 :     if (Personality == EHPersonality::MSVC_CXX)
     105          58 :       calculateWinCXXEHStateNumbers(&fn, EHInfo);
     106             :     else if (isAsynchronousEHPersonality(Personality))
     107          24 :       calculateSEHStateNumbers(&fn, EHInfo);
     108           7 :     else if (Personality == EHPersonality::CoreCLR)
     109           7 :       calculateClrEHStateNumbers(&fn, EHInfo);
     110             : 
     111             :     // Map all BB references in the WinEH data to MBBs.
     112         207 :     for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
     113         191 :       for (WinEHHandlerType &H : TBME.HandlerArray) {
     114          66 :         if (const AllocaInst *AI = H.CatchObj.Alloca)
     115          24 :           CatchObjects.insert({AI, {}}).first->second.push_back(
     116             :               &H.CatchObj.FrameIndex);
     117             :         else
     118          58 :           H.CatchObj.FrameIndex = INT_MAX;
     119             :       }
     120             :     }
     121             :   }
     122      220730 :   if (Personality == EHPersonality::Wasm_CXX) {
     123           3 :     WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo();
     124           3 :     calculateWasmEHInfo(&fn, EHInfo);
     125             :   }
     126             : 
     127             :   // Initialize the mapping of values to registers.  This is only set up for
     128             :   // instruction values that are used outside of the block that defines
     129             :   // them.
     130      853568 :   for (const BasicBlock &BB : *Fn) {
     131     3383543 :     for (const Instruction &I : BB) {
     132     2971435 :       if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
     133       96842 :         Type *Ty = AI->getAllocatedType();
     134             :         unsigned Align =
     135      193684 :           std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty),
     136      290526 :                    AI->getAlignment());
     137             : 
     138             :         // Static allocas can be folded into the initial stack frame
     139             :         // adjustment. For targets that don't realign the stack, don't
     140             :         // do this if there is an extra alignment requirement.
     141      193217 :         if (AI->isStaticAlloca() &&
     142       96657 :             (TFI->isStackRealignable() || (Align <= StackAlign))) {
     143       96368 :           const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
     144       96368 :           uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty);
     145             : 
     146       96368 :           TySize *= CUI->getZExtValue();   // Get total allocated size.
     147       96368 :           if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
     148             :           int FrameIndex = INT_MAX;
     149       96368 :           auto Iter = CatchObjects.find(AI);
     150       96368 :           if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) {
     151           5 :             FrameIndex = MF->getFrameInfo().CreateFixedObject(
     152             :                 TySize, 0, /*Immutable=*/false, /*isAliased=*/true);
     153           5 :             MF->getFrameInfo().setObjectAlignment(FrameIndex, Align);
     154             :           } else {
     155       96363 :             FrameIndex =
     156       96363 :                 MF->getFrameInfo().CreateStackObject(TySize, Align, false, AI);
     157             :           }
     158             : 
     159      192736 :           StaticAllocaMap[AI] = FrameIndex;
     160             :           // Update the catch handler information.
     161       96368 :           if (Iter != CatchObjects.end()) {
     162          23 :             for (int *CatchObjPtr : Iter->second)
     163           8 :               *CatchObjPtr = FrameIndex;
     164             :           }
     165             :         } else {
     166             :           // FIXME: Overaligned static allocas should be grouped into
     167             :           // a single dynamic allocation instead of using a separate
     168             :           // stack allocation for each one.
     169         474 :           if (Align <= StackAlign)
     170             :             Align = 0;
     171             :           // Inform the Frame Information that we have variable-sized objects.
     172         474 :           MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, AI);
     173             :         }
     174             :       }
     175             : 
     176             :       // Look for inline asm that clobbers the SP register.
     177     2971435 :       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
     178             :         ImmutableCallSite CS(&I);
     179      481129 :         if (isa<InlineAsm>(CS.getCalledValue())) {
     180       16590 :           unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
     181       16590 :           const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
     182             :           std::vector<TargetLowering::AsmOperandInfo> Ops =
     183       33180 :               TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI, CS);
     184       84300 :           for (TargetLowering::AsmOperandInfo &Op : Ops) {
     185       67710 :             if (Op.Type == InlineAsm::isClobber) {
     186             :               // Clobbers don't have SDValue operands, hence SDValue().
     187       52530 :               TLI->ComputeConstraintToUse(Op, SDValue(), DAG);
     188             :               std::pair<unsigned, const TargetRegisterClass *> PhysReg =
     189       52530 :                   TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode,
     190      105060 :                                                     Op.ConstraintVT);
     191       52530 :               if (PhysReg.first == SP)
     192          69 :                 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
     193             :             }
     194             :           }
     195             :         }
     196             :       }
     197             : 
     198             :       // Look for calls to the @llvm.va_start intrinsic. We can omit some
     199             :       // prologue boilerplate for variadic functions that don't examine their
     200             :       // arguments.
     201             :       if (const auto *II = dyn_cast<IntrinsicInst>(&I)) {
     202      248281 :         if (II->getIntrinsicID() == Intrinsic::vastart)
     203         237 :           MF->getFrameInfo().setHasVAStart(true);
     204             :       }
     205             : 
     206             :       // If we have a musttail call in a variadic function, we need to ensure we
     207             :       // forward implicit register parameters.
     208             :       if (const auto *CI = dyn_cast<CallInst>(&I)) {
     209      441215 :         if (CI->isMustTailCall() && Fn->isVarArg())
     210          33 :           MF->getFrameInfo().setHasMustTailInVarArgFunc(true);
     211             :       }
     212             : 
     213             :       // Mark values used outside their block as exported, by allocating
     214             :       // a virtual register for them.
     215     2971435 :       if (isUsedOutsideOfDefiningBlock(&I))
     216      175848 :         if (!isa<AllocaInst>(I) || !StaticAllocaMap.count(cast<AllocaInst>(&I)))
     217      138035 :           InitializeRegForValue(&I);
     218             : 
     219             :       // Decide the preferred extend type for a value.
     220     5942870 :       PreferredExtendType[&I] = getPreferredExtendForValue(&I);
     221             :     }
     222             :   }
     223             : 
     224             :   // Create an initial MachineBasicBlock for each LLVM BasicBlock in F.  This
     225             :   // also creates the initial PHI MachineInstrs, though none of the input
     226             :   // operands are populated.
     227      853568 :   for (const BasicBlock &BB : *Fn) {
     228             :     // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks
     229             :     // are really data, and no instructions can live here.
     230      412108 :     if (BB.isEHPad()) {
     231       27517 :       const Instruction *PadInst = BB.getFirstNonPHI();
     232             :       // If this is a non-landingpad EH pad, mark this function as using
     233             :       // funclets.
     234             :       // FIXME: SEH catchpads do not create EH scope/funclets, so we could avoid
     235             :       // setting this in such cases in order to improve frame layout.
     236       27517 :       if (!isa<LandingPadInst>(PadInst)) {
     237         251 :         MF->setHasEHScopes(true);
     238         251 :         MF->setHasEHFunclets(true);
     239         251 :         MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
     240             :       }
     241       27517 :       if (isa<CatchSwitchInst>(PadInst)) {
     242             :         assert(&*BB.begin() == PadInst &&
     243             :                "WinEHPrepare failed to remove PHIs from imaginary BBs");
     244             :         continue;
     245             :       }
     246             :       if (isa<FuncletPadInst>(PadInst))
     247             :         assert(&*BB.begin() == PadInst && "WinEHPrepare failed to demote PHIs");
     248             :     }
     249             : 
     250      412009 :     MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(&BB);
     251      824018 :     MBBMap[&BB] = MBB;
     252      412009 :     MF->push_back(MBB);
     253             : 
     254             :     // Transfer the address-taken flag. This is necessary because there could
     255             :     // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
     256             :     // the first one should be marked.
     257      412009 :     if (BB.hasAddressTaken())
     258             :       MBB->setHasAddressTaken();
     259             : 
     260             :     // Mark landing pad blocks.
     261      412009 :     if (BB.isEHPad())
     262             :       MBB->setIsEHPad();
     263             : 
     264             :     // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
     265             :     // appropriate.
     266      456957 :     for (const PHINode &PN : BB.phis()) {
     267       44948 :       if (PN.use_empty())
     268        2932 :         continue;
     269             : 
     270             :       // Skip empty types
     271       43483 :       if (PN.getType()->isEmptyTy())
     272           2 :         continue;
     273             : 
     274             :       DebugLoc DL = PN.getDebugLoc();
     275       86962 :       unsigned PHIReg = ValueMap[&PN];
     276             :       assert(PHIReg && "PHI node does not have an assigned virtual register!");
     277             : 
     278             :       SmallVector<EVT, 4> ValueVTs;
     279       43481 :       ComputeValueVTs(*TLI, MF->getDataLayout(), PN.getType(), ValueVTs);
     280      131657 :       for (EVT VT : ValueVTs) {
     281       44088 :         unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
     282       44088 :         const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
     283      134016 :         for (unsigned i = 0; i != NumRegisters; ++i)
     284       44964 :           BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
     285       44088 :         PHIReg += NumRegisters;
     286             :       }
     287             :     }
     288             :   }
     289             : 
     290             :   if (isFuncletEHPersonality(Personality)) {
     291          89 :     WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
     292             : 
     293             :     // Map all BB references in the WinEH data to MBBs.
     294         207 :     for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
     295         191 :       for (WinEHHandlerType &H : TBME.HandlerArray) {
     296          66 :         if (H.Handler)
     297         198 :           H.Handler = MBBMap[H.Handler.get<const BasicBlock *>()];
     298             :       }
     299             :     }
     300         373 :     for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap)
     301         142 :       if (UME.Cleanup)
     302          72 :         UME.Cleanup = MBBMap[UME.Cleanup.get<const BasicBlock *>()];
     303         161 :     for (SEHUnwindMapEntry &UME : EHInfo.SEHUnwindMap) {
     304          36 :       const auto *BB = UME.Handler.get<const BasicBlock *>();
     305          72 :       UME.Handler = MBBMap[BB];
     306             :     }
     307         127 :     for (ClrEHUnwindMapEntry &CME : EHInfo.ClrEHUnwindMap) {
     308          19 :       const auto *BB = CME.Handler.get<const BasicBlock *>();
     309          38 :       CME.Handler = MBBMap[BB];
     310             :     }
     311             :   }
     312             : 
     313      220641 :   else if (Personality == EHPersonality::Wasm_CXX) {
     314           3 :     WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo();
     315             :     // Map all BB references in the WinEH data to MBBs.
     316             :     DenseMap<BBOrMBB, BBOrMBB> NewMap;
     317           7 :     for (auto &KV : EHInfo.EHPadUnwindMap) {
     318           1 :       const auto *Src = KV.first.get<const BasicBlock *>();
     319           1 :       const auto *Dst = KV.second.get<const BasicBlock *>();
     320           4 :       NewMap[MBBMap[Src]] = MBBMap[Dst];
     321             :     }
     322           3 :     EHInfo.EHPadUnwindMap = std::move(NewMap);
     323           3 :     NewMap.clear();
     324          14 :     for (auto &KV : EHInfo.ThrowUnwindMap) {
     325           8 :       const auto *Src = KV.first.get<const BasicBlock *>();
     326           8 :       const auto *Dst = KV.second.get<const BasicBlock *>();
     327          32 :       NewMap[MBBMap[Src]] = MBBMap[Dst];
     328             :     }
     329           3 :     EHInfo.ThrowUnwindMap = std::move(NewMap);
     330             :   }
     331      220730 : }
     332             : 
     333             : /// clear - Clear out all the function-specific state. This returns this
     334             : /// FunctionLoweringInfo to an empty state, ready to be used for a
     335             : /// different function.
     336      220653 : void FunctionLoweringInfo::clear() {
     337      220653 :   MBBMap.clear();
     338      220653 :   ValueMap.clear();
     339      220653 :   VirtReg2Value.clear();
     340      220653 :   StaticAllocaMap.clear();
     341             :   LiveOutRegInfo.clear();
     342      220653 :   VisitedBBs.clear();
     343             :   ArgDbgValues.clear();
     344      220653 :   ByValArgFrameIndexMap.clear();
     345      220653 :   RegFixups.clear();
     346             :   RegsWithFixups.clear();
     347             :   StatepointStackSlots.clear();
     348      220653 :   StatepointSpillMaps.clear();
     349      220653 :   PreferredExtendType.clear();
     350      220653 : }
     351             : 
     352             : /// CreateReg - Allocate a single virtual register for the given type.
     353      352724 : unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
     354     1410896 :   return RegInfo->createVirtualRegister(
     355     1410896 :       MF->getSubtarget().getTargetLowering()->getRegClassFor(VT));
     356             : }
     357             : 
     358             : /// CreateRegs - Allocate the appropriate number of virtual registers of
     359             : /// the correctly promoted or expanded types.  Assign these registers
     360             : /// consecutive vreg numbers and return the first assigned number.
     361             : ///
     362             : /// In the case that the given value has struct or array type, this function
     363             : /// will assign registers for each member or element.
     364             : ///
     365      359528 : unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
     366      359528 :   const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
     367             : 
     368             :   SmallVector<EVT, 4> ValueVTs;
     369      359528 :   ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
     370             : 
     371             :   unsigned FirstReg = 0;
     372      705941 :   for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
     373      692826 :     EVT ValueVT = ValueVTs[Value];
     374      346413 :     MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT);
     375             : 
     376      346413 :     unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT);
     377     1051325 :     for (unsigned i = 0; i != NumRegs; ++i) {
     378      352456 :       unsigned R = CreateReg(RegisterVT);
     379      352456 :       if (!FirstReg) FirstReg = R;
     380             :     }
     381             :   }
     382      359528 :   return FirstReg;
     383             : }
     384             : 
     385             : /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
     386             : /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
     387             : /// the register's LiveOutInfo is for a smaller bit width, it is extended to
     388             : /// the larger bit width by zero extension. The bit width must be no smaller
     389             : /// than the LiveOutInfo's existing bit width.
     390             : const FunctionLoweringInfo::LiveOutInfo *
     391       27627 : FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
     392       27627 :   if (!LiveOutRegInfo.inBounds(Reg))
     393             :     return nullptr;
     394             : 
     395             :   LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
     396       27342 :   if (!LOI->IsValid)
     397             :     return nullptr;
     398             : 
     399       26472 :   if (BitWidth > LOI->Known.getBitWidth()) {
     400       17056 :     LOI->NumSignBits = 1;
     401       17056 :     LOI->Known = LOI->Known.zextOrTrunc(BitWidth);
     402             :   }
     403             : 
     404             :   return LOI;
     405             : }
     406             : 
     407             : /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
     408             : /// register based on the LiveOutInfo of its operands.
     409       34285 : void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
     410       34285 :   Type *Ty = PN->getType();
     411       34285 :   if (!Ty->isIntegerTy() || Ty->isVectorTy())
     412       20744 :     return;
     413             : 
     414             :   SmallVector<EVT, 1> ValueVTs;
     415       16415 :   ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
     416             :   assert(ValueVTs.size() == 1 &&
     417             :          "PHIs with non-vector integer types should have a single VT.");
     418       16415 :   EVT IntVT = ValueVTs[0];
     419             : 
     420       16415 :   if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1)
     421             :     return;
     422       32636 :   IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT);
     423       16318 :   unsigned BitWidth = IntVT.getSizeInBits();
     424             : 
     425       32636 :   unsigned DestReg = ValueMap[PN];
     426       16318 :   if (!TargetRegisterInfo::isVirtualRegister(DestReg))
     427             :     return;
     428       14887 :   LiveOutRegInfo.grow(DestReg);
     429             :   LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg];
     430             : 
     431             :   Value *V = PN->getIncomingValue(0);
     432       14887 :   if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
     433          64 :     DestLOI.NumSignBits = 1;
     434          64 :     DestLOI.Known = KnownBits(BitWidth);
     435          64 :     return;
     436             :   }
     437             : 
     438             :   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
     439        1966 :     APInt Val = CI->getValue().zextOrTrunc(BitWidth);
     440        1966 :     DestLOI.NumSignBits = Val.getNumSignBits();
     441        1966 :     DestLOI.Known.Zero = ~Val;
     442        1966 :     DestLOI.Known.One = Val;
     443             :   } else {
     444             :     assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
     445             :                                 "CopyToReg node was created.");
     446       25714 :     unsigned SrcReg = ValueMap[V];
     447       12857 :     if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
     448           0 :       DestLOI.IsValid = false;
     449           0 :       return;
     450             :     }
     451       12857 :     const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
     452       12857 :     if (!SrcLOI) {
     453         591 :       DestLOI.IsValid = false;
     454         591 :       return;
     455             :     }
     456       12266 :     DestLOI = *SrcLOI;
     457             :   }
     458             : 
     459             :   assert(DestLOI.Known.Zero.getBitWidth() == BitWidth &&
     460             :          DestLOI.Known.One.getBitWidth() == BitWidth &&
     461             :          "Masks should have the same bit width as the type.");
     462             : 
     463       46478 :   for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
     464             :     Value *V = PN->getIncomingValue(i);
     465       16814 :     if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
     466         127 :       DestLOI.NumSignBits = 1;
     467         127 :       DestLOI.Known = KnownBits(BitWidth);
     468         127 :       return;
     469             :     }
     470             : 
     471             :     if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
     472        1917 :       APInt Val = CI->getValue().zextOrTrunc(BitWidth);
     473        3834 :       DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits());
     474        5751 :       DestLOI.Known.Zero &= ~Val;
     475        1917 :       DestLOI.Known.One &= Val;
     476             :       continue;
     477             :     }
     478             : 
     479             :     assert(ValueMap.count(V) && "V should have been placed in ValueMap when "
     480             :                                 "its CopyToReg node was created.");
     481       29540 :     unsigned SrcReg = ValueMap[V];
     482       14770 :     if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
     483           0 :       DestLOI.IsValid = false;
     484           0 :       return;
     485             :     }
     486       14770 :     const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
     487       14770 :     if (!SrcLOI) {
     488         564 :       DestLOI.IsValid = false;
     489         564 :       return;
     490             :     }
     491       28412 :     DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits);
     492       14206 :     DestLOI.Known.Zero &= SrcLOI->Known.Zero;
     493       14206 :     DestLOI.Known.One &= SrcLOI->Known.One;
     494             :   }
     495             : }
     496             : 
     497             : /// setArgumentFrameIndex - Record frame index for the byval
     498             : /// argument. This overrides previous frame index entry for this argument,
     499             : /// if any.
     500        1802 : void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A,
     501             :                                                  int FI) {
     502        3604 :   ByValArgFrameIndexMap[A] = FI;
     503        1802 : }
     504             : 
     505             : /// getArgumentFrameIndex - Get frame index for the byval argument.
     506             : /// If the argument does not have any assigned frame index then 0 is
     507             : /// returned.
     508        5030 : int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
     509        5030 :   auto I = ByValArgFrameIndexMap.find(A);
     510        5030 :   if (I != ByValArgFrameIndexMap.end())
     511          95 :     return I->second;
     512             :   LLVM_DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
     513             :   return INT_MAX;
     514             : }
     515             : 
     516          12 : unsigned FunctionLoweringInfo::getCatchPadExceptionPointerVReg(
     517             :     const Value *CPI, const TargetRegisterClass *RC) {
     518          12 :   MachineRegisterInfo &MRI = MF->getRegInfo();
     519          24 :   auto I = CatchPadExceptionPointers.insert({CPI, 0});
     520          12 :   unsigned &VReg = I.first->second;
     521          12 :   if (I.second)
     522           6 :     VReg = MRI.createVirtualRegister(RC);
     523             :   assert(VReg && "null vreg in exception pointer table!");
     524          12 :   return VReg;
     525             : }
     526             : 
     527             : unsigned
     528         507 : FunctionLoweringInfo::getOrCreateSwiftErrorVReg(const MachineBasicBlock *MBB,
     529             :                                                 const Value *Val) {
     530         507 :   auto Key = std::make_pair(MBB, Val);
     531         507 :   auto It = SwiftErrorVRegDefMap.find(Key);
     532             :   // If this is the first use of this swifterror value in this basic block,
     533             :   // create a new virtual register.
     534             :   // After we processed all basic blocks we will satisfy this "upwards exposed
     535             :   // use" by inserting a copy or phi at the beginning of this block.
     536         507 :   if (It == SwiftErrorVRegDefMap.end()) {
     537          45 :     auto &DL = MF->getDataLayout();
     538          90 :     const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
     539          90 :     auto VReg = MF->getRegInfo().createVirtualRegister(RC);
     540          45 :     SwiftErrorVRegDefMap[Key] = VReg;
     541          90 :     SwiftErrorVRegUpwardsUse[Key] = VReg;
     542          45 :     return VReg;
     543         462 :   } else return It->second;
     544             : }
     545             : 
     546         504 : void FunctionLoweringInfo::setCurrentSwiftErrorVReg(
     547             :     const MachineBasicBlock *MBB, const Value *Val, unsigned VReg) {
     548        1008 :   SwiftErrorVRegDefMap[std::make_pair(MBB, Val)] = VReg;
     549         504 : }
     550             : 
     551             : std::pair<unsigned, bool>
     552         300 : FunctionLoweringInfo::getOrCreateSwiftErrorVRegDefAt(const Instruction *I) {
     553             :   auto Key = PointerIntPair<const Instruction *, 1, bool>(I, true);
     554         300 :   auto It = SwiftErrorVRegDefUses.find(Key);
     555         300 :   if (It == SwiftErrorVRegDefUses.end()) {
     556         218 :     auto &DL = MF->getDataLayout();
     557         436 :     const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
     558         436 :     unsigned VReg =  MF->getRegInfo().createVirtualRegister(RC);
     559         218 :     SwiftErrorVRegDefUses[Key] = VReg;
     560             :     return std::make_pair(VReg, true);
     561             :   }
     562          82 :   return std::make_pair(It->second, false);
     563             : }
     564             : 
     565             : std::pair<unsigned, bool>
     566         381 : FunctionLoweringInfo::getOrCreateSwiftErrorVRegUseAt(const Instruction *I, const MachineBasicBlock *MBB, const Value *Val) {
     567             :   auto Key = PointerIntPair<const Instruction *, 1, bool>(I, false);
     568         381 :   auto It = SwiftErrorVRegDefUses.find(Key);
     569         381 :   if (It == SwiftErrorVRegDefUses.end()) {
     570         275 :     unsigned VReg = getOrCreateSwiftErrorVReg(MBB, Val);
     571         275 :     SwiftErrorVRegDefUses[Key] = VReg;
     572             :     return std::make_pair(VReg, true);
     573             :   }
     574         106 :   return std::make_pair(It->second, false);
     575             : }
     576             : 
     577             : const Value *
     578      270182 : FunctionLoweringInfo::getValueFromVirtualReg(unsigned Vreg) {
     579      270182 :   if (VirtReg2Value.empty()) {
     580       37997 :     for (auto &P : ValueMap) {
     581       14142 :       VirtReg2Value[P.second] = P.first;
     582             :     }
     583             :   }
     584      540364 :   return VirtReg2Value[Vreg];
     585             : }

Generated by: LCOV version 1.13