56 void getMachineOpValueT16Lo128(
const MCInst &
MI,
unsigned OpNo,
APInt &
Op,
83 uint64_t getImplicitOpSelHiEncoding(
int Opcode)
const;
90 std::optional<uint32_t> getLitEncoding(
const MCOperand &MO,
108template <
typename IntTy>
110 if (Imm >= 0 && Imm <= 64)
113 if (Imm >= -16 && Imm <= -1)
114 return 192 + std::abs(Imm);
149 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
162 case 0x3F00:
return 240;
163 case 0xBF00:
return 241;
164 case 0x3F80:
return 242;
165 case 0xBF80:
return 243;
166 case 0x4000:
return 244;
167 case 0xC000:
return 245;
168 case 0x4080:
return 246;
169 case 0xC080:
return 247;
170 case 0x3E22:
return 248;
181 if (Val == llvm::bit_cast<uint32_t>(0.5f))
184 if (Val == llvm::bit_cast<uint32_t>(-0.5f))
187 if (Val == llvm::bit_cast<uint32_t>(1.0f))
190 if (Val == llvm::bit_cast<uint32_t>(-1.0f))
193 if (Val == llvm::bit_cast<uint32_t>(2.0f))
196 if (Val == llvm::bit_cast<uint32_t>(-2.0f))
199 if (Val == llvm::bit_cast<uint32_t>(4.0f))
202 if (Val == llvm::bit_cast<uint32_t>(-4.0f))
205 if (Val == 0x3e22f983 &&
206 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
221 if (Val == llvm::bit_cast<uint64_t>(0.5))
224 if (Val == llvm::bit_cast<uint64_t>(-0.5))
227 if (Val == llvm::bit_cast<uint64_t>(1.0))
230 if (Val == llvm::bit_cast<uint64_t>(-1.0))
233 if (Val == llvm::bit_cast<uint64_t>(2.0))
236 if (Val == llvm::bit_cast<uint64_t>(-2.0))
239 if (Val == llvm::bit_cast<uint64_t>(4.0))
242 if (Val == llvm::bit_cast<uint64_t>(-4.0))
245 if (Val == 0x3fc45f306dc9c882 &&
246 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
252std::optional<uint32_t>
253AMDGPUMCCodeEmitter::getLitEncoding(
const MCOperand &MO,
258 const auto *
C = dyn_cast<MCConstantExpr>(MO.
getExpr());
342uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(
int Opcode)
const {
343 using namespace AMDGPU::VOP3PEncoding;
344 using namespace AMDGPU::OpName;
359 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
362void AMDGPUMCCodeEmitter::encodeInstruction(
const MCInst &
MI,
366 int Opcode =
MI.getOpcode();
367 APInt Encoding, Scratch;
368 getBinaryCodeForInstr(
MI, Fixups, Encoding, Scratch, STI);
370 unsigned bytes =
Desc.getSize();
375 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
376 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
377 Encoding |= getImplicitOpSelHiEncoding(Opcode);
387 assert((Encoding & 0xFF) == 0);
388 Encoding |=
MRI.getEncodingValue(AMDGPU::EXEC_LO) &
392 for (
unsigned i = 0; i < bytes; i++) {
399 AMDGPU::OpName::vaddr0);
401 AMDGPU::OpName::srsrc);
402 assert(vaddr0 >= 0 && srsrc > vaddr0);
403 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
404 unsigned NumPadding = (-NumExtraAddrs) & 3;
406 for (
unsigned i = 0; i < NumExtraAddrs; ++i) {
407 getMachineOpValue(
MI,
MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
414 if ((bytes > 8 && STI.
hasFeature(AMDGPU::FeatureVOP3Literal)) ||
415 (bytes > 4 && !STI.
hasFeature(AMDGPU::FeatureVOP3Literal)))
423 for (
unsigned i = 0, e =
Desc.getNumOperands(); i < e; ++i) {
431 auto Enc = getLitEncoding(
Op,
Desc.operands()[i], STI);
432 if (!Enc || *Enc != 255)
440 else if (
Op.isExpr()) {
441 if (
const auto *
C = dyn_cast<MCConstantExpr>(
Op.getExpr()))
456void AMDGPUMCCodeEmitter::getSOPPBrEncoding(
const MCInst &
MI,
unsigned OpNo,
468 getMachineOpValue(
MI, MO,
Op, Fixups, STI);
472void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding(
475 auto Offset =
MI.getOperand(OpNo).getImm();
481void AMDGPUMCCodeEmitter::getSDWASrcEncoding(
const MCInst &
MI,
unsigned OpNo,
485 using namespace AMDGPU::SDWA;
493 RegEnc |=
MRI.getEncodingValue(Reg);
494 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
496 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
502 auto Enc = getLitEncoding(MO,
Desc.operands()[OpNo], STI);
503 if (Enc && *Enc != 255) {
504 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
512void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding(
515 using namespace AMDGPU::SDWA;
522 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
523 RegEnc |=
MRI.getEncodingValue(Reg);
524 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
525 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
530void AMDGPUMCCodeEmitter::getAVOperandEncoding(
533 unsigned Reg =
MI.getOperand(OpNo).getReg();
534 unsigned Enc =
MRI.getEncodingValue(Reg);
544 Op =
Idx | (IsVGPROrAGPR << 8) | (IsAGPR << 9);
550 auto *SE = cast<MCSymbolRefExpr>(Expr);
556 auto *BE = cast<MCBinaryExpr>(Expr);
562 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
570void AMDGPUMCCodeEmitter::getMachineOpValue(
const MCInst &
MI,
575 unsigned Enc =
MRI.getEncodingValue(MO.
getReg());
579 Op =
Idx | (IsVGPROrAGPR << 8);
582 unsigned OpNo = &MO -
MI.begin();
583 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
586void AMDGPUMCCodeEmitter::getMachineOpValueT16(
591 unsigned Enc =
MRI.getEncodingValue(MO.
getReg());
594 Op =
Idx | (IsVGPR << 8);
597 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
605 AMDGPU::OpName::src0_modifiers)) {
609 if (VDstMOIdx != -1) {
610 auto DstReg =
MI.getOperand(VDstMOIdx).getReg();
615 MI.getOpcode(), AMDGPU::OpName::src1_modifiers))
618 MI.getOpcode(), AMDGPU::OpName::src2_modifiers))
626 auto SrcReg = SrcMO.
getReg();
633void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128(
642 assert((!IsVGPR || isUInt<7>(RegIdx)) &&
"VGPR0-VGPR127 expected!");
643 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx;
646 getMachineOpValueCommon(
MI, MO, OpNo,
Op, Fixups, STI);
649void AMDGPUMCCodeEmitter::getMachineOpValueCommon(
684 if (
auto Enc = getLitEncoding(MO,
Desc.operands()[OpNo], STI)) {
688 }
else if (MO.
isImm()) {
696#include "AMDGPUGenMCCodeEmitter.inc"
unsigned const MachineRegisterInfo * MRI
static uint32_t getLit16IntEncoding(uint32_t Val, const MCSubtargetInfo &STI)
static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI)
static uint32_t getLitBF16Encoding(uint16_t Val)
static bool isVCMPX64(const MCInstrDesc &Desc)
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getIntInlineImmEncoding(IntTy Imm)
static bool needsPCRel(const MCExpr *Expr)
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI)
Provides AMDGPU specific target descriptions.
This file implements a class to represent arbitrary precision integral constant values and operations...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
This class represents an Operation in the Expression.
MCCodeEmitter - Generic instruction encoding interface.
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
Context object for machine code objects.
const MCRegisterInfo * getRegisterInfo() const
Base class for the full range of assembler expressions which are needed for parsing.
@ Unary
Unary expressions.
@ Constant
Constant expressions.
@ SymbolRef
References to labels and assigned expressions.
@ Target
Target specific expression.
@ Binary
Binary expressions.
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
Interface to description of machine instruction set.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint8_t OperandType
Information about the type of the operand.
Instances of this class represent operands of the MCInst class.
unsigned getReg() const
Returns the register number.
const MCExpr * getExpr() const
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
unsigned mc2PseudoReg(unsigned Reg)
Convert hardware register Reg to a pseudo register.
@ fixup_si_sopp_br
16-bit PC relative fixup for SOPP branch instructions.
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this an AMDGPU specific source operand? These include registers, inline constants,...
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, uint64_t NamedIdx)
bool isSGPR(unsigned Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
@ OPERAND_REG_INLINE_C_V2INT32
@ OPERAND_REG_INLINE_C_FP64
@ OPERAND_REG_INLINE_C_BF16
@ OPERAND_REG_INLINE_C_V2BF16
@ OPERAND_REG_IMM_V2INT16
@ OPERAND_REG_INLINE_AC_V2FP16
@ OPERAND_REG_IMM_INT32
Operands with register or 32-bit immediate.
@ OPERAND_REG_IMM_BF16_DEFERRED
@ OPERAND_REG_INLINE_C_INT64
@ OPERAND_REG_INLINE_AC_BF16
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
@ OPERAND_REG_INLINE_AC_INT16
Operands with an AccVGPR register or inline constant.
@ OPERAND_REG_INLINE_C_V2FP16
@ OPERAND_REG_INLINE_AC_V2INT16
@ OPERAND_REG_INLINE_AC_FP16
@ OPERAND_REG_INLINE_AC_INT32
@ OPERAND_REG_INLINE_AC_FP32
@ OPERAND_REG_INLINE_AC_V2BF16
@ OPERAND_REG_IMM_V2INT32
@ OPERAND_REG_INLINE_C_FP32
@ OPERAND_REG_INLINE_C_INT32
@ OPERAND_REG_INLINE_C_V2INT16
@ OPERAND_REG_INLINE_AC_FP64
@ OPERAND_REG_INLINE_C_FP16
@ OPERAND_REG_INLINE_C_V2FP32
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
@ OPERAND_REG_IMM_FP32_DEFERRED
@ OPERAND_REG_IMM_FP16_DEFERRED
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
bool isVI(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
@ C
The default llvm calling convention, compatible with C.
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
MCFixupKind
Extensible enumeration to represent the type of a fixup.
@ FK_PCRel_4
A four-byte pc relative fixup.
@ FK_Data_4
A four-byte fixup.
MCCodeEmitter * createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Description of the encoding of one expression Op.