39 std::optional<uint32_t> getLitEncoding(
const MCOperand &MO,
46 SIMCCodeEmitter(
const SIMCCodeEmitter &) =
delete;
47 SIMCCodeEmitter &
operator=(
const SIMCCodeEmitter &) =
delete;
81 uint64_t getImplicitOpSelHiEncoding(
int Opcode)
const;
83 unsigned OpNo,
APInt &Op,
92 return new SIMCCodeEmitter(MCII, Ctx);
97template <
typename IntTy>
99 if (Imm >= 0 && Imm <= 64)
102 if (Imm >= -16 && Imm <= -1)
103 return 192 + std::abs(Imm);
110 return IntImm == 0 ? 255 : IntImm;
143 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
154 if (Val == llvm::bit_cast<uint32_t>(0.5f))
157 if (Val == llvm::bit_cast<uint32_t>(-0.5f))
160 if (Val == llvm::bit_cast<uint32_t>(1.0f))
163 if (Val == llvm::bit_cast<uint32_t>(-1.0f))
166 if (Val == llvm::bit_cast<uint32_t>(2.0f))
169 if (Val == llvm::bit_cast<uint32_t>(-2.0f))
172 if (Val == llvm::bit_cast<uint32_t>(4.0f))
175 if (Val == llvm::bit_cast<uint32_t>(-4.0f))
178 if (Val == 0x3e22f983 &&
179 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
190 if (Val == llvm::bit_cast<uint64_t>(0.5))
193 if (Val == llvm::bit_cast<uint64_t>(-0.5))
196 if (Val == llvm::bit_cast<uint64_t>(1.0))
199 if (Val == llvm::bit_cast<uint64_t>(-1.0))
202 if (Val == llvm::bit_cast<uint64_t>(2.0))
205 if (Val == llvm::bit_cast<uint64_t>(-2.0))
208 if (Val == llvm::bit_cast<uint64_t>(4.0))
211 if (Val == llvm::bit_cast<uint64_t>(-4.0))
214 if (Val == 0x3fc45f306dc9c882 &&
215 STI.
hasFeature(AMDGPU::FeatureInv2PiInlineImm))
221std::optional<uint32_t>
222SIMCCodeEmitter::getLitEncoding(
const MCOperand &MO,
227 const auto *
C = dyn_cast<MCConstantExpr>(MO.
getExpr());
243 case AMDGPU::OPERAND_REG_IMM_INT32:
244 case AMDGPU::OPERAND_REG_IMM_FP32:
245 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
246 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
247 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
248 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
249 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
250 case AMDGPU::OPERAND_REG_IMM_V2INT32:
251 case AMDGPU::OPERAND_REG_IMM_V2FP32:
252 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
253 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
256 case AMDGPU::OPERAND_REG_IMM_INT64:
257 case AMDGPU::OPERAND_REG_IMM_FP64:
258 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
259 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
260 case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
263 case AMDGPU::OPERAND_REG_IMM_INT16:
264 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
265 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
267 case AMDGPU::OPERAND_REG_IMM_FP16:
268 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
269 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
270 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
274 case AMDGPU::OPERAND_REG_IMM_V2INT16:
275 case AMDGPU::OPERAND_REG_IMM_V2FP16: {
276 if (!isUInt<16>(Imm) && STI.
hasFeature(AMDGPU::FeatureVOP3Literal))
278 if (OpInfo.
OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16)
282 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
283 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
285 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
286 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
291 case AMDGPU::OPERAND_KIMM32:
292 case AMDGPU::OPERAND_KIMM16:
299uint64_t SIMCCodeEmitter::getImplicitOpSelHiEncoding(
int Opcode)
const {
300 using namespace AMDGPU::VOP3PEncoding;
301 using namespace AMDGPU::OpName;
303 if (AMDGPU::hasNamedOperand(Opcode, op_sel_hi)) {
304 if (AMDGPU::hasNamedOperand(Opcode, src2))
306 if (AMDGPU::hasNamedOperand(Opcode, src1))
308 if (AMDGPU::hasNamedOperand(Opcode, src0))
322 int Opcode =
MI.getOpcode();
323 APInt Encoding, Scratch;
324 getBinaryCodeForInstr(
MI, Fixups, Encoding, Scratch, STI);
326 unsigned bytes = Desc.
getSize();
331 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
332 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
333 Encoding |= getImplicitOpSelHiEncoding(Opcode);
342 if (AMDGPU::isGFX10Plus(STI) &&
isVCMPX64(Desc)) {
343 assert((Encoding & 0xFF) == 0);
344 Encoding |=
MRI.getEncodingValue(AMDGPU::EXEC_LO);
347 for (
unsigned i = 0; i < bytes; i++) {
353 int vaddr0 = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
354 AMDGPU::OpName::vaddr0);
355 int srsrc = AMDGPU::getNamedOperandIdx(
MI.getOpcode(),
356 AMDGPU::OpName::srsrc);
357 assert(vaddr0 >= 0 && srsrc > vaddr0);
358 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
359 unsigned NumPadding = (-NumExtraAddrs) & 3;
361 for (
unsigned i = 0; i < NumExtraAddrs; ++i) {
362 getMachineOpValue(
MI,
MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
366 for (
unsigned i = 0; i < NumPadding; ++i)
370 if ((bytes > 8 && STI.
hasFeature(AMDGPU::FeatureVOP3Literal)) ||
371 (bytes > 4 && !STI.
hasFeature(AMDGPU::FeatureVOP3Literal)))
375 if (AMDGPU::hasNamedOperand(
MI.getOpcode(), AMDGPU::OpName::imm))
382 if (!AMDGPU::isSISrcOperand(Desc, i))
387 auto Enc = getLitEncoding(Op, Desc.
operands()[i], STI);
388 if (!Enc || *Enc != 255)
396 else if (
Op.isExpr()) {
397 if (
const auto *
C = dyn_cast<MCConstantExpr>(
Op.getExpr()))
400 }
else if (!
Op.isExpr())
403 for (
unsigned j = 0;
j < 4;
j++) {
404 OS.
write((uint8_t) ((Imm >> (8 * j)) & 0xff));
412void SIMCCodeEmitter::getSOPPBrEncoding(
const MCInst &
MI,
unsigned OpNo,
424 getMachineOpValue(
MI, MO, Op, Fixups, STI);
428void SIMCCodeEmitter::getSMEMOffsetEncoding(
const MCInst &
MI,
unsigned OpNo,
432 auto Offset =
MI.getOperand(OpNo).getImm();
438void SIMCCodeEmitter::getSDWASrcEncoding(
const MCInst &
MI,
unsigned OpNo,
442 using namespace AMDGPU::SDWA;
450 RegEnc |=
MRI.getEncodingValue(Reg);
451 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
452 if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &
MRI)) {
453 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
459 auto Enc = getLitEncoding(MO, Desc.
operands()[OpNo], STI);
460 if (Enc && *Enc != 255) {
461 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
469void SIMCCodeEmitter::getSDWAVopcDstEncoding(
const MCInst &
MI,
unsigned OpNo,
473 using namespace AMDGPU::SDWA;
480 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
481 RegEnc |=
MRI.getEncodingValue(Reg);
482 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
483 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
488void SIMCCodeEmitter::getAVOperandEncoding(
const MCInst &
MI,
unsigned OpNo,
492 unsigned Reg =
MI.getOperand(OpNo).getReg();
498 if (
MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) ||
499 MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) ||
500 MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) ||
501 MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) ||
502 MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) ||
503 MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) ||
504 MRI.getRegClass(AMDGPU::AReg_224RegClassID).contains(Reg) ||
505 MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) ||
506 MRI.getRegClass(AMDGPU::AReg_288RegClassID).contains(Reg) ||
507 MRI.getRegClass(AMDGPU::AReg_320RegClassID).contains(Reg) ||
508 MRI.getRegClass(AMDGPU::AReg_352RegClassID).contains(Reg) ||
509 MRI.getRegClass(AMDGPU::AReg_384RegClassID).contains(Reg) ||
510 MRI.getRegClass(AMDGPU::AReg_512RegClassID).contains(Reg) ||
511 MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg))
520 auto *SE = cast<MCSymbolRefExpr>(Expr);
526 auto *BE = cast<MCBinaryExpr>(Expr);
532 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
540void SIMCCodeEmitter::getMachineOpValue(
const MCInst &
MI,
548 unsigned OpNo = &MO -
MI.begin();
549 getMachineOpValueCommon(
MI, MO, OpNo, Op, Fixups, STI);
552void SIMCCodeEmitter::getMachineOpValueCommon(
581 if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
582 if (
auto Enc = getLitEncoding(MO, Desc.
operands()[OpNo], STI)) {
586 }
else if (MO.
isImm()) {
594#include "AMDGPUGenMCCodeEmitter.inc"
unsigned const MachineRegisterInfo * MRI
CodeEmitter interface for SI codegen.
Provides AMDGPU specific target descriptions.
This file implements a class to represent arbitrary precision integral constant values and operations...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static uint32_t getLit16IntEncoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI)
static bool isVCMPX64(const MCInstrDesc &Desc)
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getIntInlineImmEncoding(IntTy Imm)
static bool needsPCRel(const MCExpr *Expr)
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI)
virtual void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
virtual void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
virtual void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
virtual void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
virtual void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
virtual void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Class for arbitrary precision integers.
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
MCCodeEmitter - Generic instruction encoding interface.
virtual void encodeInstruction(const MCInst &Inst, raw_ostream &OS, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
EncodeInstruction - Encode the given Inst to bytes on the output stream OS.
MCCodeEmitter & operator=(const MCCodeEmitter &)=delete
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
@ Unary
Unary expressions.
@ Constant
Constant expressions.
@ SymbolRef
References to labels and assigned expressions.
@ Target
Target specific expression.
@ Binary
Binary expressions.
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool hasImplicitDefOfPhysReg(unsigned Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
Interface to description of machine instruction set.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint8_t OperandType
Information about the type of the operand.
Instances of this class represent operands of the MCInst class.
unsigned getReg() const
Returns the register number.
const MCExpr * getExpr() const
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & write(unsigned char C)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
MCCodeEmitter * createSIMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
MCFixupKind
Extensible enumeration to represent the type of a fixup.
@ FK_PCRel_4
A four-byte pc relative fixup.
@ FK_Data_4
A four-byte fixup.