LCOV - code coverage report
Current view: top level - lib/Target/AMDGPU/MCTargetDesc - AMDGPUAsmBackend.cpp (source / functions) Hit Total Coverage
Test: llvm-toolchain.info Lines: 45 58 77.6 %
Date: 2017-09-14 15:23:50 Functions: 10 14 71.4 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : //===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
       2             : //
       3             : //                     The LLVM Compiler Infrastructure
       4             : //
       5             : // This file is distributed under the University of Illinois Open Source
       6             : // License. See LICENSE.TXT for details.
       7             : //
       8             : /// \file
       9             : //===----------------------------------------------------------------------===//
      10             : 
      11             : #include "MCTargetDesc/AMDGPUFixupKinds.h"
      12             : #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
      13             : #include "llvm/ADT/StringRef.h"
      14             : #include "llvm/MC/MCAsmBackend.h"
      15             : #include "llvm/MC/MCAssembler.h"
      16             : #include "llvm/MC/MCContext.h"
      17             : #include "llvm/MC/MCFixupKindInfo.h"
      18             : #include "llvm/MC/MCObjectWriter.h"
      19             : #include "llvm/MC/MCValue.h"
      20             : #include "llvm/Support/TargetRegistry.h"
      21             : 
      22             : using namespace llvm;
      23             : 
      24             : namespace {
      25             : 
      26        1954 : class AMDGPUAsmBackend : public MCAsmBackend {
      27             : public:
      28             :   AMDGPUAsmBackend(const Target &T)
      29        1963 :     : MCAsmBackend() {}
      30             : 
      31           0 :   unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
      32             : 
      33             :   void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
      34             :                   const MCValue &Target, MutableArrayRef<char> Data,
      35             :                   uint64_t Value, bool IsResolved) const override;
      36           0 :   bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
      37             :                             const MCRelaxableFragment *DF,
      38             :                             const MCAsmLayout &Layout) const override {
      39           0 :     return false;
      40             :   }
      41           0 :   void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
      42             :                         MCInst &Res) const override {
      43           0 :     llvm_unreachable("Not implemented");
      44             :   }
      45        1063 :   bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
      46             : 
      47             :   unsigned getMinimumNopSize() const override;
      48             :   bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
      49             : 
      50             :   const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
      51             : };
      52             : 
      53             : } //End anonymous namespace
      54             : 
      55          97 : static unsigned getFixupKindNumBytes(unsigned Kind) {
      56          97 :   switch (Kind) {
      57             :   case AMDGPU::fixup_si_sopp_br:
      58             :     return 2;
      59           0 :   case FK_SecRel_1:
      60             :   case FK_Data_1:
      61           0 :     return 1;
      62             :   case FK_SecRel_2:
      63             :   case FK_Data_2:
      64             :     return 2;
      65          82 :   case FK_SecRel_4:
      66             :   case FK_Data_4:
      67             :   case FK_PCRel_4:
      68          82 :     return 4;
      69           0 :   case FK_SecRel_8:
      70             :   case FK_Data_8:
      71           0 :     return 8;
      72           0 :   default:
      73           0 :     llvm_unreachable("Unknown fixup kind!");
      74             :   }
      75             : }
      76             : 
      77         284 : static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
      78             :                                  MCContext *Ctx) {
      79         284 :   int64_t SignedValue = static_cast<int64_t>(Value);
      80             : 
      81         284 :   switch (static_cast<unsigned>(Fixup.getKind())) {
      82          17 :   case AMDGPU::fixup_si_sopp_br: {
      83          17 :     int64_t BrImm = (SignedValue - 4) / 4;
      84             : 
      85          34 :     if (Ctx && !isInt<16>(BrImm))
      86           1 :       Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
      87             : 
      88          17 :     return BrImm;
      89             :   }
      90             :   case FK_Data_1:
      91             :   case FK_Data_2:
      92             :   case FK_Data_4:
      93             :   case FK_Data_8:
      94             :   case FK_PCRel_4:
      95             :   case FK_SecRel_4:
      96             :     return Value;
      97           0 :   default:
      98           0 :     llvm_unreachable("unhandled fixup kind");
      99             :   }
     100             : }
     101             : 
     102         284 : void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
     103             :                                   const MCValue &Target,
     104             :                                   MutableArrayRef<char> Data, uint64_t Value,
     105             :                                   bool IsResolved) const {
     106         284 :   Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
     107         284 :   if (!Value)
     108             :     return; // Doesn't change encoding.
     109             : 
     110         194 :   MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
     111             : 
     112             :   // Shift the value into position.
     113          97 :   Value <<= Info.TargetOffset;
     114             : 
     115          97 :   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
     116          97 :   uint32_t Offset = Fixup.getOffset();
     117             :   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
     118             : 
     119             :   // For each byte of the fragment that the fixup touches, mask in the bits from
     120             :   // the fixup value.
     121         455 :   for (unsigned i = 0; i != NumBytes; ++i)
     122         716 :     Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
     123             : }
     124             : 
     125         922 : const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
     126             :                                                        MCFixupKind Kind) const {
     127             :   const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
     128             :     // name                   offset bits  flags
     129             :     { "fixup_si_sopp_br",     0,     16,   MCFixupKindInfo::FKF_IsPCRel },
     130             :   };
     131             : 
     132        1019 :   if (Kind < FirstTargetFixupKind)
     133         944 :     return MCAsmBackend::getFixupKindInfo(Kind);
     134             : 
     135          75 :   return Infos[Kind - FirstTargetFixupKind];
     136             : }
     137             : 
     138         402 : unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
     139         402 :   return 4;
     140             : }
     141             : 
     142         308 : bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
     143             :   // If the count is not 4-byte aligned, we must be writing data into the text
     144             :   // section (otherwise we have unaligned instructions, and thus have far
     145             :   // bigger problems), so just write zeros instead.
     146         308 :   OW->WriteZeros(Count % 4);
     147             : 
     148             :   // We are properly aligned, so write NOPs as requested.
     149         308 :   Count /= 4;
     150             : 
     151             :   // FIXME: R600 support.
     152             :   // s_nop 0
     153         308 :   const uint32_t Encoded_S_NOP_0 = 0xbf800000;
     154             : 
     155       12609 :   for (uint64_t I = 0; I != Count; ++I)
     156       12301 :     OW->write32(Encoded_S_NOP_0);
     157             : 
     158         308 :   return true;
     159             : }
     160             : 
     161             : //===----------------------------------------------------------------------===//
     162             : // ELFAMDGPUAsmBackend class
     163             : //===----------------------------------------------------------------------===//
     164             : 
     165             : namespace {
     166             : 
     167        3908 : class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
     168             :   bool Is64Bit;
     169             :   bool HasRelocationAddend;
     170             : 
     171             : public:
     172        1963 :   ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
     173        1963 :       AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
     174        5889 :       HasRelocationAddend(TT.getOS() == Triple::AMDHSA) { }
     175             : 
     176          61 :   MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
     177          61 :     return createAMDGPUELFObjectWriter(Is64Bit, HasRelocationAddend, OS);
     178             :   }
     179             : };
     180             : 
     181             : } // end anonymous namespace
     182             : 
     183        1963 : MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
     184             :                                            const MCRegisterInfo &MRI,
     185             :                                            const Triple &TT, StringRef CPU,
     186             :                                            const MCTargetOptions &Options) {
     187             :   // Use 64-bit ELF for amdgcn
     188        3926 :   return new ELFAMDGPUAsmBackend(T, TT);
     189             : }

Generated by: LCOV version 1.13