LCOV - code coverage report
Current view: top level - lib/Target/AMDGPU - AMDGPULowerKernelArguments.cpp (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 97 97 100.0 %
Date: 2018-07-13 00:08:38 Functions: 6 7 85.7 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===-- AMDGPULowerKernelArguments.cpp ------------------------------------------===//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : //===----------------------------------------------------------------------===//
       9             : //
      10             : /// \file This pass replaces accesses to kernel arguments with loads from
      11             : /// offsets from the kernarg base pointer.
      12             : //
      13             : //===----------------------------------------------------------------------===//
      14             : 
      15             : #include "AMDGPU.h"
      16             : #include "AMDGPUSubtarget.h"
      17             : #include "AMDGPUTargetMachine.h"
      18             : #include "llvm/ADT/StringRef.h"
      19             : #include "llvm/Analysis/DivergenceAnalysis.h"
      20             : #include "llvm/Analysis/Loads.h"
      21             : #include "llvm/CodeGen/Passes.h"
      22             : #include "llvm/CodeGen/TargetPassConfig.h"
      23             : #include "llvm/IR/Attributes.h"
      24             : #include "llvm/IR/BasicBlock.h"
      25             : #include "llvm/IR/Constants.h"
      26             : #include "llvm/IR/DerivedTypes.h"
      27             : #include "llvm/IR/Function.h"
      28             : #include "llvm/IR/IRBuilder.h"
      29             : #include "llvm/IR/InstrTypes.h"
      30             : #include "llvm/IR/Instruction.h"
      31             : #include "llvm/IR/Instructions.h"
      32             : #include "llvm/IR/LLVMContext.h"
      33             : #include "llvm/IR/MDBuilder.h"
      34             : #include "llvm/IR/Metadata.h"
      35             : #include "llvm/IR/Operator.h"
      36             : #include "llvm/IR/Type.h"
      37             : #include "llvm/IR/Value.h"
      38             : #include "llvm/Pass.h"
      39             : #include "llvm/Support/Casting.h"
      40             : 
      41             : #define DEBUG_TYPE "amdgpu-lower-kernel-arguments"
      42             : 
      43             : using namespace llvm;
      44             : 
      45             : namespace {
      46             : 
      47        3576 : class AMDGPULowerKernelArguments : public FunctionPass{
      48             : public:
      49             :   static char ID;
      50             : 
      51        1796 :   AMDGPULowerKernelArguments() : FunctionPass(ID) {}
      52             : 
      53             :   bool runOnFunction(Function &F) override;
      54             : 
      55        1784 :   void getAnalysisUsage(AnalysisUsage &AU) const override {
      56             :     AU.addRequired<TargetPassConfig>();
      57             :     AU.setPreservesAll();
      58        1784 :  }
      59             : };
      60             : 
      61             : } // end anonymous namespace
      62             : 
      63       17794 : bool AMDGPULowerKernelArguments::runOnFunction(Function &F) {
      64             :   CallingConv::ID CC = F.getCallingConv();
      65       32916 :   if (CC != CallingConv::AMDGPU_KERNEL || F.arg_empty())
      66             :     return false;
      67             : 
      68       14163 :   auto &TPC = getAnalysis<TargetPassConfig>();
      69             : 
      70       14163 :   const TargetMachine &TM = TPC.getTM<TargetMachine>();
      71             :   const SISubtarget &ST = TM.getSubtarget<SISubtarget>(F);
      72       14163 :   LLVMContext &Ctx = F.getParent()->getContext();
      73       14163 :   const DataLayout &DL = F.getParent()->getDataLayout();
      74             :   BasicBlock &EntryBlock = *F.begin();
      75       14163 :   IRBuilder<> Builder(&*EntryBlock.begin());
      76             : 
      77       14163 :   const unsigned KernArgBaseAlign = 16; // FIXME: Increase if necessary
      78       28326 :   const uint64_t BaseOffset = ST.getExplicitKernelArgOffset(F);
      79             : 
      80             :   // FIXME: Alignment is broken broken with explicit arg offset.;
      81       14163 :   const uint64_t TotalKernArgSize = ST.getKernArgSegmentSize(F);
      82       14163 :   if (TotalKernArgSize == 0)
      83             :     return false;
      84             : 
      85             :   CallInst *KernArgSegment =
      86             :     Builder.CreateIntrinsic(Intrinsic::amdgcn_kernarg_segment_ptr, nullptr,
      87       28322 :                             F.getName() + ".kernarg.segment");
      88             : 
      89       14161 :   KernArgSegment->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
      90       14161 :   KernArgSegment->addAttribute(AttributeList::ReturnIndex,
      91             :     Attribute::getWithDereferenceableBytes(Ctx, TotalKernArgSize));
      92             : 
      93       14161 :   unsigned AS = KernArgSegment->getType()->getPointerAddressSpace();
      94       14161 :   unsigned MaxAlign = 1;
      95             :   uint64_t ExplicitArgOffset = 0;
      96             : 
      97       47037 :   for (Argument &Arg : F.args()) {
      98       32876 :     Type *ArgTy = Arg.getType();
      99       32876 :     unsigned Align = DL.getABITypeAlignment(ArgTy);
     100       32876 :     MaxAlign = std::max(Align, MaxAlign);
     101       32876 :     unsigned Size = DL.getTypeSizeInBits(ArgTy);
     102       32876 :     unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
     103             : 
     104             : 
     105             :     // Clover seems to always pad i8/i16 to i32, but doesn't properly align
     106             :     // them?
     107             :     // Make sure the struct elements have correct size and alignment for ext
     108             :     // args. These seem to be padded up to 4-bytes but not correctly aligned.
     109       32962 :     bool IsExtArg = AllocSize < 32 && (Arg.hasZExtAttr() || Arg.hasSExtAttr()) &&
     110             :                     !ST.isAmdHsaOS();
     111             :     if (IsExtArg)
     112             :       AllocSize = 4;
     113             : 
     114       65752 :     uint64_t EltOffset = alignTo(ExplicitArgOffset, Align) + BaseOffset;
     115       32876 :     ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize;
     116             : 
     117       32876 :     if (Arg.use_empty())
     118        5954 :       continue;
     119             : 
     120             :     if (PointerType *PT = dyn_cast<PointerType>(ArgTy)) {
     121             :       // FIXME: Hack. We rely on AssertZext to be able to fold DS addressing
     122             :       // modes on SI to know the high bits are 0 so pointer adds don't wrap. We
     123             :       // can't represent this with range metadata because it's only allowed for
     124             :       // integer types.
     125       25993 :       if (PT->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
     126        2615 :           ST.getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS)
     127         638 :         continue;
     128             : 
     129             :       // FIXME: We can replace this with equivalent alias.scope/noalias
     130             :       // metadata, but this appears to be a lot of work.
     131       23348 :       if (Arg.hasNoAliasAttr())
     132        1246 :         continue;
     133             :     }
     134             : 
     135             :     VectorType *VT = dyn_cast<VectorType>(ArgTy);
     136        1350 :     bool IsV3 = VT && VT->getNumElements() == 3;
     137             :     VectorType *V4Ty = nullptr;
     138             : 
     139             :     int64_t AlignDownOffset = alignDown(EltOffset, 4);
     140       28957 :     int64_t OffsetDiff = EltOffset - AlignDownOffset;
     141       28957 :     unsigned AdjustedAlign = MinAlign(KernArgBaseAlign, AlignDownOffset);
     142             : 
     143             :     Value *ArgPtr;
     144       28957 :     if (Size < 32 && !ArgTy->isAggregateType()) { // FIXME: Handle aggregate types
     145             :       // Since we don't have sub-dword scalar loads, avoid doing an extload by
     146             :       // loading earlier than the argument address, and extracting the relevant
     147             :       // bits.
     148             :       //
     149             :       // Additionally widen any sub-dword load to i32 even if suitably aligned,
     150             :       // so that CSE between different argument loads works easily.
     151             : 
     152         576 :       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
     153             :         KernArgSegment,
     154             :         AlignDownOffset,
     155        1152 :         Arg.getName() + ".kernarg.offset.align.down");
     156             :       ArgPtr = Builder.CreateBitCast(ArgPtr,
     157        1152 :                                      Builder.getInt32Ty()->getPointerTo(AS),
     158        1152 :                                      ArgPtr->getName() + ".cast");
     159             :     } else {
     160       28381 :       ArgPtr = Builder.CreateConstInBoundsGEP1_64(
     161             :         KernArgSegment,
     162             :         AlignDownOffset,
     163       56762 :         Arg.getName() + ".kernarg.offset");
     164       28381 :       ArgPtr = Builder.CreateBitCast(ArgPtr, ArgTy->getPointerTo(AS),
     165       56762 :                                      ArgPtr->getName() + ".cast");
     166             :     }
     167             : 
     168             :     assert((!IsExtArg || !IsV3) && "incompatible situation");
     169             : 
     170       28957 :     if (IsV3 && Size >= 32) {
     171         202 :       V4Ty = VectorType::get(VT->getVectorElementType(), 4);
     172             :       // Use the hack that clang uses to avoid SelectionDAG ruining v3 loads
     173         202 :       ArgPtr = Builder.CreateBitCast(ArgPtr, V4Ty->getPointerTo(AS));
     174             :     }
     175             : 
     176       28957 :     LoadInst *Load = Builder.CreateAlignedLoad(ArgPtr, AdjustedAlign);
     177       57914 :     Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(Ctx, {}));
     178             : 
     179             :     MDBuilder MDB(Ctx);
     180             : 
     181       28957 :     if (isa<PointerType>(ArgTy)) {
     182       20856 :       if (Arg.hasNonNullAttr())
     183           4 :         Load->setMetadata(LLVMContext::MD_nonnull, MDNode::get(Ctx, {}));
     184             : 
     185       20856 :       uint64_t DerefBytes = Arg.getDereferenceableBytes();
     186       20856 :       if (DerefBytes != 0) {
     187           4 :         Load->setMetadata(
     188             :           LLVMContext::MD_dereferenceable,
     189             :           MDNode::get(Ctx,
     190           8 :                       MDB.createConstant(
     191           8 :                         ConstantInt::get(Builder.getInt64Ty(), DerefBytes))));
     192             :       }
     193             : 
     194       20856 :       uint64_t DerefOrNullBytes = Arg.getDereferenceableOrNullBytes();
     195       20856 :       if (DerefOrNullBytes != 0) {
     196           2 :         Load->setMetadata(
     197             :           LLVMContext::MD_dereferenceable_or_null,
     198             :           MDNode::get(Ctx,
     199           6 :                       MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
     200             :                                                           DerefOrNullBytes))));
     201             :       }
     202             : 
     203       20856 :       unsigned ParamAlign = Arg.getParamAlignment();
     204       20856 :       if (ParamAlign != 0) {
     205           2 :         Load->setMetadata(
     206             :           LLVMContext::MD_align,
     207             :           MDNode::get(Ctx,
     208           6 :                       MDB.createConstant(ConstantInt::get(Builder.getInt64Ty(),
     209             :                                                           ParamAlign))));
     210             :       }
     211             :     }
     212             : 
     213             :     // TODO: Convert noalias arg to !noalias
     214             : 
     215       28957 :     if (Size < 32 && !ArgTy->isAggregateType()) {
     216         576 :       if (IsExtArg && OffsetDiff == 0) {
     217          27 :         Type *I32Ty = Builder.getInt32Ty();
     218          27 :         bool IsSext = Arg.hasSExtAttr();
     219             :         Metadata *LowAndHigh[] = {
     220          54 :           ConstantAsMetadata::get(
     221             :             ConstantInt::get(I32Ty, IsSext ? minIntN(Size) : 0)),
     222          54 :           ConstantAsMetadata::get(
     223             :             ConstantInt::get(I32Ty,
     224           8 :                              IsSext ? maxIntN(Size) + 1 : maxUIntN(Size) + 1))
     225          54 :         };
     226             : 
     227          27 :         Load->setMetadata(LLVMContext::MD_range, MDNode::get(Ctx, LowAndHigh));
     228             :       }
     229             : 
     230         683 :       Value *ExtractBits = OffsetDiff == 0 ?
     231         683 :         Load : Builder.CreateLShr(Load, OffsetDiff * 8);
     232             : 
     233         576 :       IntegerType *ArgIntTy = Builder.getIntNTy(Size);
     234         576 :       Value *Trunc = Builder.CreateTrunc(ExtractBits, ArgIntTy);
     235             :       Value *NewVal = Builder.CreateBitCast(Trunc, ArgTy,
     236        1152 :                                             Arg.getName() + ".load");
     237         576 :       Arg.replaceAllUsesWith(NewVal);
     238       28381 :     } else if (IsV3) {
     239         202 :       Value *Shuf = Builder.CreateShuffleVector(Load, UndefValue::get(V4Ty),
     240             :                                                 {0, 1, 2},
     241         303 :                                                 Arg.getName() + ".load");
     242         101 :       Arg.replaceAllUsesWith(Shuf);
     243             :     } else {
     244       56560 :       Load->setName(Arg.getName() + ".load");
     245       28280 :       Arg.replaceAllUsesWith(Load);
     246             :     }
     247             :   }
     248             : 
     249       14161 :   KernArgSegment->addAttribute(
     250             :     AttributeList::ReturnIndex,
     251       14161 :     Attribute::getWithAlignment(Ctx, std::max(KernArgBaseAlign, MaxAlign)));
     252             : 
     253       14161 :   return true;
     254             : }
     255             : 
     256       73254 : INITIALIZE_PASS_BEGIN(AMDGPULowerKernelArguments, DEBUG_TYPE,
     257             :                       "AMDGPU Lower Kernel Arguments", false, false)
     258      342570 : INITIALIZE_PASS_END(AMDGPULowerKernelArguments, DEBUG_TYPE, "AMDGPU Lower Kernel Arguments",
     259             :                     false, false)
     260             : 
     261             : char AMDGPULowerKernelArguments::ID = 0;
     262             : 
     263        1794 : FunctionPass *llvm::createAMDGPULowerKernelArgumentsPass() {
     264        3588 :   return new AMDGPULowerKernelArguments();
     265             : }

Generated by: LCOV version 1.13