LCOV - code coverage report
Current view: top level - lib/CodeGen/SelectionDAG - FunctionLoweringInfo.cpp (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 257 261 98.5 %
Date: 2018-10-20 13:21:21 Functions: 16 16 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===-- FunctionLoweringInfo.cpp ------------------------------------------===//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : //
      10             : // This implements routines for translating functions from LLVM IR into
      11             : // Machine IR.
      12             : //
      13             : //===----------------------------------------------------------------------===//
      14             : 
      15             : #include "llvm/CodeGen/FunctionLoweringInfo.h"
      16             : #include "llvm/CodeGen/Analysis.h"
      17             : #include "llvm/CodeGen/MachineFrameInfo.h"
      18             : #include "llvm/CodeGen/MachineFunction.h"
      19             : #include "llvm/CodeGen/MachineInstrBuilder.h"
      20             : #include "llvm/CodeGen/MachineRegisterInfo.h"
      21             : #include "llvm/CodeGen/TargetFrameLowering.h"
      22             : #include "llvm/CodeGen/TargetInstrInfo.h"
      23             : #include "llvm/CodeGen/TargetLowering.h"
      24             : #include "llvm/CodeGen/TargetRegisterInfo.h"
      25             : #include "llvm/CodeGen/TargetSubtargetInfo.h"
      26             : #include "llvm/CodeGen/WasmEHFuncInfo.h"
      27             : #include "llvm/CodeGen/WinEHFuncInfo.h"
      28             : #include "llvm/IR/DataLayout.h"
      29             : #include "llvm/IR/DerivedTypes.h"
      30             : #include "llvm/IR/Function.h"
      31             : #include "llvm/IR/Instructions.h"
      32             : #include "llvm/IR/IntrinsicInst.h"
      33             : #include "llvm/IR/LLVMContext.h"
      34             : #include "llvm/IR/Module.h"
      35             : #include "llvm/Support/Debug.h"
      36             : #include "llvm/Support/ErrorHandling.h"
      37             : #include "llvm/Support/MathExtras.h"
      38             : #include "llvm/Support/raw_ostream.h"
      39             : #include "llvm/Target/TargetOptions.h"
      40             : #include <algorithm>
      41             : using namespace llvm;
      42             : 
      43             : #define DEBUG_TYPE "function-lowering-info"
      44             : 
      45             : /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
      46             : /// PHI nodes or outside of the basic block that defines it, or used by a
      47             : /// switch or atomic instruction, which may expand to multiple basic blocks.
      48    37178365 : static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
      49    37178365 :   if (I->use_empty()) return false;
      50    24602964 :   if (isa<PHINode>(I)) return true;
      51    24411862 :   const BasicBlock *BB = I->getParent();
      52    45074645 :   for (const User *U : I->users())
      53    26661936 :     if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
      54             :       return true;
      55             : 
      56             :   return false;
      57             : }
      58             : 
      59    37178365 : static ISD::NodeType getPreferredExtendForValue(const Value *V) {
      60             :   // For the users of the source value being used for compare instruction, if
      61             :   // the number of signed predicate is greater than unsigned predicate, we
      62             :   // prefer to use SIGN_EXTEND.
      63             :   //
      64             :   // With this optimization, we would be able to reduce some redundant sign or
      65             :   // zero extension instruction, and eventually more machine CSE opportunities
      66             :   // can be exposed.
      67             :   ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
      68             :   unsigned NumOfSigned = 0, NumOfUnsigned = 0;
      69    72312363 :   for (const User *U : V->users()) {
      70             :     if (const auto *CI = dyn_cast<CmpInst>(U)) {
      71      737779 :       NumOfSigned += CI->isSigned();
      72      737779 :       NumOfUnsigned += CI->isUnsigned();
      73             :     }
      74             :   }
      75    37178365 :   if (NumOfSigned > NumOfUnsigned)
      76             :     ExtendKind = ISD::SIGN_EXTEND;
      77             : 
      78    37178365 :   return ExtendKind;
      79             : }
      80             : 
      81      405294 : void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
      82             :                                SelectionDAG *DAG) {
      83      405294 :   Fn = &fn;
      84      405294 :   MF = &mf;
      85      405294 :   TLI = MF->getSubtarget().getTargetLowering();
      86      405294 :   RegInfo = &MF->getRegInfo();
      87      405294 :   const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
      88      405294 :   unsigned StackAlign = TFI->getStackAlignment();
      89             : 
      90             :   // Check whether the function can return without sret-demotion.
      91             :   SmallVector<ISD::OutputArg, 4> Outs;
      92      405294 :   CallingConv::ID CC = Fn->getCallingConv();
      93             : 
      94      405294 :   GetReturnInfo(CC, Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
      95             :                 mf.getDataLayout());
      96      405292 :   CanLowerReturn =
      97      405294 :       TLI->CanLowerReturn(CC, *MF, Fn->isVarArg(), Outs, Fn->getContext());
      98             : 
      99             :   // If this personality uses funclets, we need to do a bit more work.
     100             :   DenseMap<const AllocaInst *, TinyPtrVector<int *>> CatchObjects;
     101      465063 :   EHPersonality Personality = classifyEHPersonality(
     102      405292 :       Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr);
     103             :   if (isFuncletEHPersonality(Personality)) {
     104             :     // Calculate state numbers if we haven't already.
     105          91 :     WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
     106          91 :     if (Personality == EHPersonality::MSVC_CXX)
     107          59 :       calculateWinCXXEHStateNumbers(&fn, EHInfo);
     108             :     else if (isAsynchronousEHPersonality(Personality))
     109          25 :       calculateSEHStateNumbers(&fn, EHInfo);
     110           7 :     else if (Personality == EHPersonality::CoreCLR)
     111           7 :       calculateClrEHStateNumbers(&fn, EHInfo);
     112             : 
     113             :     // Map all BB references in the WinEH data to MBBs.
     114         151 :     for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
     115         127 :       for (WinEHHandlerType &H : TBME.HandlerArray) {
     116          67 :         if (const AllocaInst *AI = H.CatchObj.Alloca)
     117          16 :           CatchObjects.insert({AI, {}}).first->second.push_back(
     118             :               &H.CatchObj.FrameIndex);
     119             :         else
     120          59 :           H.CatchObj.FrameIndex = INT_MAX;
     121             :       }
     122             :     }
     123             :   }
     124      405292 :   if (Personality == EHPersonality::Wasm_CXX) {
     125           8 :     WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo();
     126           8 :     calculateWasmEHInfo(&fn, EHInfo);
     127             :   }
     128             : 
     129             :   // Initialize the mapping of values to registers.  This is only set up for
     130             :   // instruction values that are used outside of the block that defines
     131             :   // them.
     132     3623581 :   for (const BasicBlock &BB : *Fn) {
     133    40396654 :     for (const Instruction &I : BB) {
     134    37178365 :       if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
     135     6313250 :         Type *Ty = AI->getAllocatedType();
     136             :         unsigned Align =
     137     6313250 :           std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty),
     138     6313250 :                    AI->getAlignment());
     139             : 
     140             :         // Static allocas can be folded into the initial stack frame
     141             :         // adjustment. For targets that don't realign the stack, don't
     142             :         // do this if there is an extra alignment requirement.
     143     6313250 :         if (AI->isStaticAlloca() &&
     144     6312689 :             (TFI->isStackRealignable() || (Align <= StackAlign))) {
     145     6312682 :           const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
     146     6312682 :           uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty);
     147             : 
     148     6312682 :           TySize *= CUI->getZExtValue();   // Get total allocated size.
     149     6312682 :           if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
     150             :           int FrameIndex = INT_MAX;
     151     6312682 :           auto Iter = CatchObjects.find(AI);
     152     6312682 :           if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) {
     153           5 :             FrameIndex = MF->getFrameInfo().CreateFixedObject(
     154             :                 TySize, 0, /*Immutable=*/false, /*isAliased=*/true);
     155           5 :             MF->getFrameInfo().setObjectAlignment(FrameIndex, Align);
     156             :           } else {
     157             :             FrameIndex =
     158     6312677 :                 MF->getFrameInfo().CreateStackObject(TySize, Align, false, AI);
     159             :           }
     160             : 
     161     6312682 :           StaticAllocaMap[AI] = FrameIndex;
     162             :           // Update the catch handler information.
     163     6312682 :           if (Iter != CatchObjects.end()) {
     164          15 :             for (int *CatchObjPtr : Iter->second)
     165           8 :               *CatchObjPtr = FrameIndex;
     166             :           }
     167             :         } else {
     168             :           // FIXME: Overaligned static allocas should be grouped into
     169             :           // a single dynamic allocation instead of using a separate
     170             :           // stack allocation for each one.
     171         568 :           if (Align <= StackAlign)
     172             :             Align = 0;
     173             :           // Inform the Frame Information that we have variable-sized objects.
     174        1057 :           MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, AI);
     175             :         }
     176             :       }
     177             : 
     178             :       // Look for inline asm that clobbers the SP register.
     179    37178365 :       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
     180             :         ImmutableCallSite CS(&I);
     181     2945173 :         if (isa<InlineAsm>(CS.getCalledValue())) {
     182       16924 :           unsigned SP = TLI->getStackPointerRegisterToSaveRestore();
     183       16924 :           const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
     184             :           std::vector<TargetLowering::AsmOperandInfo> Ops =
     185       33848 :               TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI, CS);
     186       85506 :           for (TargetLowering::AsmOperandInfo &Op : Ops) {
     187       68582 :             if (Op.Type == InlineAsm::isClobber) {
     188             :               // Clobbers don't have SDValue operands, hence SDValue().
     189       53219 :               TLI->ComputeConstraintToUse(Op, SDValue(), DAG);
     190             :               std::pair<unsigned, const TargetRegisterClass *> PhysReg =
     191       53219 :                   TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode,
     192      106438 :                                                     Op.ConstraintVT);
     193       53219 :               if (PhysReg.first == SP)
     194          73 :                 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
     195             :             }
     196             :           }
     197             :         }
     198             :       }
     199             : 
     200             :       // Look for calls to the @llvm.va_start intrinsic. We can omit some
     201             :       // prologue boilerplate for variadic functions that don't examine their
     202             :       // arguments.
     203             :       if (const auto *II = dyn_cast<IntrinsicInst>(&I)) {
     204      811719 :         if (II->getIntrinsicID() == Intrinsic::vastart)
     205         260 :           MF->getFrameInfo().setHasVAStart(true);
     206             :       }
     207             : 
     208             :       // If we have a musttail call in a variadic function, we need to ensure we
     209             :       // forward implicit register parameters.
     210             :       if (const auto *CI = dyn_cast<CallInst>(&I)) {
     211     2448182 :         if (CI->isMustTailCall() && Fn->isVarArg())
     212          39 :           MF->getFrameInfo().setHasMustTailInVarArgFunc(true);
     213             :       }
     214             : 
     215             :       // Mark values used outside their block as exported, by allocating
     216             :       // a virtual register for them.
     217    37178365 :       if (isUsedOutsideOfDefiningBlock(&I))
     218    11409960 :         if (!isa<AllocaInst>(I) || !StaticAllocaMap.count(cast<AllocaInst>(&I)))
     219      970655 :           InitializeRegForValue(&I);
     220             : 
     221             :       // Decide the preferred extend type for a value.
     222    37178365 :       PreferredExtendType[&I] = getPreferredExtendForValue(&I);
     223             :     }
     224             :   }
     225             : 
     226             :   // Create an initial MachineBasicBlock for each LLVM BasicBlock in F.  This
     227             :   // also creates the initial PHI MachineInstrs, though none of the input
     228             :   // operands are populated.
     229     3623581 :   for (const BasicBlock &BB : *Fn) {
     230             :     // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks
     231             :     // are really data, and no instructions can live here.
     232     3218289 :     if (BB.isEHPad()) {
     233      338453 :       const Instruction *PadInst = BB.getFirstNonPHI();
     234             :       // If this is a non-landingpad EH pad, mark this function as using
     235             :       // funclets.
     236             :       // FIXME: SEH catchpads do not create EH scope/funclets, so we could avoid
     237             :       // setting this in such cases in order to improve frame layout.
     238      338453 :       if (!isa<LandingPadInst>(PadInst)) {
     239         272 :         MF->setHasEHScopes(true);
     240         272 :         MF->setHasEHFunclets(true);
     241         272 :         MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
     242             :       }
     243      338453 :       if (isa<CatchSwitchInst>(PadInst)) {
     244             :         assert(&*BB.begin() == PadInst &&
     245             :                "WinEHPrepare failed to remove PHIs from imaginary BBs");
     246             :         continue;
     247             :       }
     248             :       if (isa<FuncletPadInst>(PadInst))
     249             :         assert(&*BB.begin() == PadInst && "WinEHPrepare failed to demote PHIs");
     250             :     }
     251             : 
     252     3218182 :     MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(&BB);
     253     3218182 :     MBBMap[&BB] = MBB;
     254     3218182 :     MF->push_back(MBB);
     255             : 
     256             :     // Transfer the address-taken flag. This is necessary because there could
     257             :     // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
     258             :     // the first one should be marked.
     259     3218182 :     if (BB.hasAddressTaken())
     260             :       MBB->setHasAddressTaken();
     261             : 
     262             :     // Mark landing pad blocks.
     263     3218182 :     if (BB.isEHPad())
     264             :       MBB->setIsEHPad();
     265             : 
     266             :     // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
     267             :     // appropriate.
     268     3410890 :     for (const PHINode &PN : BB.phis()) {
     269      192708 :       if (PN.use_empty())
     270        1608 :         continue;
     271             : 
     272             :       // Skip empty types
     273      191102 :       if (PN.getType()->isEmptyTy())
     274             :         continue;
     275             : 
     276             :       DebugLoc DL = PN.getDebugLoc();
     277      191100 :       unsigned PHIReg = ValueMap[&PN];
     278             :       assert(PHIReg && "PHI node does not have an assigned virtual register!");
     279             : 
     280             :       SmallVector<EVT, 4> ValueVTs;
     281      191100 :       ComputeValueVTs(*TLI, MF->getDataLayout(), PN.getType(), ValueVTs);
     282      389596 :       for (EVT VT : ValueVTs) {
     283      198496 :         unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
     284      198496 :         const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
     285      398033 :         for (unsigned i = 0; i != NumRegisters; ++i)
     286      199537 :           BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
     287      198496 :         PHIReg += NumRegisters;
     288             :       }
     289             :     }
     290             :   }
     291             : 
     292             :   if (isFuncletEHPersonality(Personality)) {
     293          91 :     WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
     294             : 
     295             :     // Map all BB references in the WinEH data to MBBs.
     296         151 :     for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
     297         127 :       for (WinEHHandlerType &H : TBME.HandlerArray) {
     298          67 :         if (H.Handler)
     299          67 :           H.Handler = MBBMap[H.Handler.get<const BasicBlock *>()];
     300             :       }
     301             :     }
     302         235 :     for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap)
     303         144 :       if (UME.Cleanup)
     304          24 :         UME.Cleanup = MBBMap[UME.Cleanup.get<const BasicBlock *>()];
     305         128 :     for (SEHUnwindMapEntry &UME : EHInfo.SEHUnwindMap) {
     306          37 :       const auto *BB = UME.Handler.get<const BasicBlock *>();
     307          37 :       UME.Handler = MBBMap[BB];
     308             :     }
     309         110 :     for (ClrEHUnwindMapEntry &CME : EHInfo.ClrEHUnwindMap) {
     310          19 :       const auto *BB = CME.Handler.get<const BasicBlock *>();
     311          19 :       CME.Handler = MBBMap[BB];
     312             :     }
     313             :   }
     314             : 
     315      405201 :   else if (Personality == EHPersonality::Wasm_CXX) {
     316           8 :     WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo();
     317             :     // Map all BB references in the WinEH data to MBBs.
     318             :     DenseMap<BBOrMBB, BBOrMBB> NewMap;
     319          18 :     for (auto &KV : EHInfo.EHPadUnwindMap) {
     320           2 :       const auto *Src = KV.first.get<const BasicBlock *>();
     321           2 :       const auto *Dst = KV.second.get<const BasicBlock *>();
     322           4 :       NewMap[MBBMap[Src]] = MBBMap[Dst];
     323             :     }
     324           8 :     EHInfo.EHPadUnwindMap = std::move(NewMap);
     325           8 :     NewMap.clear();
     326          36 :     for (auto &KV : EHInfo.ThrowUnwindMap) {
     327          20 :       const auto *Src = KV.first.get<const BasicBlock *>();
     328          20 :       const auto *Dst = KV.second.get<const BasicBlock *>();
     329          40 :       NewMap[MBBMap[Src]] = MBBMap[Dst];
     330             :     }
     331           8 :     EHInfo.ThrowUnwindMap = std::move(NewMap);
     332             :   }
     333      405292 : }
     334             : 
     335             : /// clear - Clear out all the function-specific state. This returns this
     336             : /// FunctionLoweringInfo to an empty state, ready to be used for a
     337             : /// different function.
     338      405212 : void FunctionLoweringInfo::clear() {
     339      405212 :   MBBMap.clear();
     340      405211 :   ValueMap.clear();
     341      405212 :   VirtReg2Value.clear();
     342      405211 :   StaticAllocaMap.clear();
     343             :   LiveOutRegInfo.clear();
     344      405212 :   VisitedBBs.clear();
     345             :   ArgDbgValues.clear();
     346      405212 :   ByValArgFrameIndexMap.clear();
     347      405212 :   RegFixups.clear();
     348             :   RegsWithFixups.clear();
     349             :   StatepointStackSlots.clear();
     350      405212 :   StatepointSpillMaps.clear();
     351      405212 :   PreferredExtendType.clear();
     352      405212 : }
     353             : 
     354             : /// CreateReg - Allocate a single virtual register for the given type.
     355    10398377 : unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
     356    20796754 :   return RegInfo->createVirtualRegister(
     357    10398377 :       MF->getSubtarget().getTargetLowering()->getRegClassFor(VT));
     358             : }
     359             : 
     360             : /// CreateRegs - Allocate the appropriate number of virtual registers of
     361             : /// the correctly promoted or expanded types.  Assign these registers
     362             : /// consecutive vreg numbers and return the first assigned number.
     363             : ///
     364             : /// In the case that the given value has struct or array type, this function
     365             : /// will assign registers for each member or element.
     366             : ///
     367    10918542 : unsigned FunctionLoweringInfo::CreateRegs(Type *Ty) {
     368    10918542 :   const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
     369             : 
     370             :   SmallVector<EVT, 4> ValueVTs;
     371    10918542 :   ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
     372             : 
     373             :   unsigned FirstReg = 0;
     374    21306606 :   for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
     375    10388064 :     EVT ValueVT = ValueVTs[Value];
     376    10388064 :     MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT);
     377             : 
     378    10388064 :     unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT);
     379    20783191 :     for (unsigned i = 0; i != NumRegs; ++i) {
     380    10395127 :       unsigned R = CreateReg(RegisterVT);
     381    10395127 :       if (!FirstReg) FirstReg = R;
     382             :     }
     383             :   }
     384    10918542 :   return FirstReg;
     385             : }
     386             : 
     387             : /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
     388             : /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
     389             : /// the register's LiveOutInfo is for a smaller bit width, it is extended to
     390             : /// the larger bit width by zero extension. The bit width must be no smaller
     391             : /// than the LiveOutInfo's existing bit width.
     392             : const FunctionLoweringInfo::LiveOutInfo *
     393       33503 : FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
     394       33503 :   if (!LiveOutRegInfo.inBounds(Reg))
     395             :     return nullptr;
     396             : 
     397             :   LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
     398       33211 :   if (!LOI->IsValid)
     399             :     return nullptr;
     400             : 
     401       31891 :   if (BitWidth > LOI->Known.getBitWidth()) {
     402       20579 :     LOI->NumSignBits = 1;
     403       20579 :     LOI->Known = LOI->Known.zextOrTrunc(BitWidth);
     404             :   }
     405             : 
     406             :   return LOI;
     407             : }
     408             : 
     409             : /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
     410             : /// register based on the LiveOutInfo of its operands.
     411       78977 : void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
     412       78977 :   Type *Ty = PN->getType();
     413       78977 :   if (!Ty->isIntegerTy() || Ty->isVectorTy())
     414       62786 :     return;
     415             : 
     416             :   SmallVector<EVT, 1> ValueVTs;
     417       19802 :   ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
     418             :   assert(ValueVTs.size() == 1 &&
     419             :          "PHIs with non-vector integer types should have a single VT.");
     420       19802 :   EVT IntVT = ValueVTs[0];
     421             : 
     422       19802 :   if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1)
     423             :     return;
     424       19673 :   IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT);
     425       19673 :   unsigned BitWidth = IntVT.getSizeInBits();
     426             : 
     427       19673 :   unsigned DestReg = ValueMap[PN];
     428       19673 :   if (!TargetRegisterInfo::isVirtualRegister(DestReg))
     429             :     return;
     430             :   LiveOutRegInfo.grow(DestReg);
     431             :   LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg];
     432             : 
     433             :   Value *V = PN->getIncomingValue(0);
     434       18105 :   if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
     435         177 :     DestLOI.NumSignBits = 1;
     436         177 :     DestLOI.Known = KnownBits(BitWidth);
     437         177 :     return;
     438             :   }
     439             : 
     440             :   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
     441        2224 :     APInt Val = CI->getValue().zextOrTrunc(BitWidth);
     442        2224 :     DestLOI.NumSignBits = Val.getNumSignBits();
     443        2224 :     DestLOI.Known.Zero = ~Val;
     444        2224 :     DestLOI.Known.One = Val;
     445             :   } else {
     446             :     assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
     447             :                                 "CopyToReg node was created.");
     448       15704 :     unsigned SrcReg = ValueMap[V];
     449       15704 :     if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
     450           0 :       DestLOI.IsValid = false;
     451           0 :       return;
     452             :     }
     453       15704 :     const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
     454       15704 :     if (!SrcLOI) {
     455         846 :       DestLOI.IsValid = false;
     456         846 :       return;
     457             :     }
     458       14858 :     DestLOI = *SrcLOI;
     459             :   }
     460             : 
     461             :   assert(DestLOI.Known.Zero.getBitWidth() == BitWidth &&
     462             :          DestLOI.Known.One.getBitWidth() == BitWidth &&
     463             :          "Masks should have the same bit width as the type.");
     464             : 
     465       38111 :   for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
     466             :     Value *V = PN->getIncomingValue(i);
     467       21920 :     if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
     468         125 :       DestLOI.NumSignBits = 1;
     469         125 :       DestLOI.Known = KnownBits(BitWidth);
     470         125 :       return;
     471             :     }
     472             : 
     473             :     if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
     474        3996 :       APInt Val = CI->getValue().zextOrTrunc(BitWidth);
     475        4464 :       DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits());
     476        7992 :       DestLOI.Known.Zero &= ~Val;
     477        3996 :       DestLOI.Known.One &= Val;
     478             :       continue;
     479             :     }
     480             : 
     481             :     assert(ValueMap.count(V) && "V should have been placed in ValueMap when "
     482             :                                 "its CopyToReg node was created.");
     483       17799 :     unsigned SrcReg = ValueMap[V];
     484       17799 :     if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
     485           0 :       DestLOI.IsValid = false;
     486           0 :       return;
     487             :     }
     488       17799 :     const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
     489       17799 :     if (!SrcLOI) {
     490         766 :       DestLOI.IsValid = false;
     491         766 :       return;
     492             :     }
     493       17033 :     DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits);
     494       17033 :     DestLOI.Known.Zero &= SrcLOI->Known.Zero;
     495       17033 :     DestLOI.Known.One &= SrcLOI->Known.One;
     496             :   }
     497             : }
     498             : 
     499             : /// setArgumentFrameIndex - Record frame index for the byval
     500             : /// argument. This overrides previous frame index entry for this argument,
     501             : /// if any.
     502        2586 : void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A,
     503             :                                                  int FI) {
     504        2586 :   ByValArgFrameIndexMap[A] = FI;
     505        2586 : }
     506             : 
     507             : /// getArgumentFrameIndex - Get frame index for the byval argument.
     508             : /// If the argument does not have any assigned frame index then 0 is
     509             : /// returned.
     510       23836 : int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
     511       23836 :   auto I = ByValArgFrameIndexMap.find(A);
     512       23836 :   if (I != ByValArgFrameIndexMap.end())
     513          63 :     return I->second;
     514             :   LLVM_DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
     515             :   return INT_MAX;
     516             : }
     517             : 
     518          12 : unsigned FunctionLoweringInfo::getCatchPadExceptionPointerVReg(
     519             :     const Value *CPI, const TargetRegisterClass *RC) {
     520          12 :   MachineRegisterInfo &MRI = MF->getRegInfo();
     521          12 :   auto I = CatchPadExceptionPointers.insert({CPI, 0});
     522          12 :   unsigned &VReg = I.first->second;
     523          12 :   if (I.second)
     524           6 :     VReg = MRI.createVirtualRegister(RC);
     525             :   assert(VReg && "null vreg in exception pointer table!");
     526          12 :   return VReg;
     527             : }
     528             : 
     529             : unsigned
     530         511 : FunctionLoweringInfo::getOrCreateSwiftErrorVReg(const MachineBasicBlock *MBB,
     531             :                                                 const Value *Val) {
     532         511 :   auto Key = std::make_pair(MBB, Val);
     533         511 :   auto It = SwiftErrorVRegDefMap.find(Key);
     534             :   // If this is the first use of this swifterror value in this basic block,
     535             :   // create a new virtual register.
     536             :   // After we processed all basic blocks we will satisfy this "upwards exposed
     537             :   // use" by inserting a copy or phi at the beginning of this block.
     538         511 :   if (It == SwiftErrorVRegDefMap.end()) {
     539          45 :     auto &DL = MF->getDataLayout();
     540          90 :     const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
     541          90 :     auto VReg = MF->getRegInfo().createVirtualRegister(RC);
     542          45 :     SwiftErrorVRegDefMap[Key] = VReg;
     543          45 :     SwiftErrorVRegUpwardsUse[Key] = VReg;
     544          45 :     return VReg;
     545         466 :   } else return It->second;
     546             : }
     547             : 
     548         512 : void FunctionLoweringInfo::setCurrentSwiftErrorVReg(
     549             :     const MachineBasicBlock *MBB, const Value *Val, unsigned VReg) {
     550         512 :   SwiftErrorVRegDefMap[std::make_pair(MBB, Val)] = VReg;
     551         512 : }
     552             : 
     553             : std::pair<unsigned, bool>
     554         304 : FunctionLoweringInfo::getOrCreateSwiftErrorVRegDefAt(const Instruction *I) {
     555             :   auto Key = PointerIntPair<const Instruction *, 1, bool>(I, true);
     556         304 :   auto It = SwiftErrorVRegDefUses.find(Key);
     557         304 :   if (It == SwiftErrorVRegDefUses.end()) {
     558         222 :     auto &DL = MF->getDataLayout();
     559         444 :     const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
     560         444 :     unsigned VReg =  MF->getRegInfo().createVirtualRegister(RC);
     561         222 :     SwiftErrorVRegDefUses[Key] = VReg;
     562             :     return std::make_pair(VReg, true);
     563             :   }
     564          82 :   return std::make_pair(It->second, false);
     565             : }
     566             : 
     567             : std::pair<unsigned, bool>
     568         385 : FunctionLoweringInfo::getOrCreateSwiftErrorVRegUseAt(const Instruction *I, const MachineBasicBlock *MBB, const Value *Val) {
     569             :   auto Key = PointerIntPair<const Instruction *, 1, bool>(I, false);
     570         385 :   auto It = SwiftErrorVRegDefUses.find(Key);
     571         385 :   if (It == SwiftErrorVRegDefUses.end()) {
     572         279 :     unsigned VReg = getOrCreateSwiftErrorVReg(MBB, Val);
     573         279 :     SwiftErrorVRegDefUses[Key] = VReg;
     574             :     return std::make_pair(VReg, true);
     575             :   }
     576         106 :   return std::make_pair(It->second, false);
     577             : }
     578             : 
     579             : const Value *
     580       96322 : FunctionLoweringInfo::getValueFromVirtualReg(unsigned Vreg) {
     581       96322 :   if (VirtReg2Value.empty()) {
     582       24897 :     for (auto &P : ValueMap) {
     583        8035 :       VirtReg2Value[P.second] = P.first;
     584             :     }
     585             :   }
     586       96322 :   return VirtReg2Value[Vreg];
     587             : }

Generated by: LCOV version 1.13