LLVM 19.0.0git
AMDGPUMCCodeEmitter.cpp
Go to the documentation of this file.
1//===-- AMDGPUMCCodeEmitter.cpp - AMDGPU Code Emitter ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU code emitter produces machine code that can be executed
11/// directly on the GPU device.
12//
13//===----------------------------------------------------------------------===//
14
17#include "SIDefines.h"
19#include "llvm/ADT/APInt.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInstrInfo.h"
29#include <optional>
30
31using namespace llvm;
32
33namespace {
34
35class AMDGPUMCCodeEmitter : public MCCodeEmitter {
36 const MCRegisterInfo &MRI;
37 const MCInstrInfo &MCII;
38
39public:
40 AMDGPUMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI)
41 : MRI(MRI), MCII(MCII) {}
42
43 /// Encode the instruction and write it to the OS.
46 const MCSubtargetInfo &STI) const override;
47
48 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op,
50 const MCSubtargetInfo &STI) const;
51
52 void getMachineOpValueT16(const MCInst &MI, unsigned OpNo, APInt &Op,
54 const MCSubtargetInfo &STI) const;
55
56 void getMachineOpValueT16Lo128(const MCInst &MI, unsigned OpNo, APInt &Op,
58 const MCSubtargetInfo &STI) const;
59
60 /// Use a fixup to encode the simm16 field for SOPP branch
61 /// instructions.
62 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
64 const MCSubtargetInfo &STI) const;
65
66 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
68 const MCSubtargetInfo &STI) const;
69
70 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
72 const MCSubtargetInfo &STI) const;
73
74 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
76 const MCSubtargetInfo &STI) const;
77
78 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
80 const MCSubtargetInfo &STI) const;
81
82private:
83 uint64_t getImplicitOpSelHiEncoding(int Opcode) const;
84 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO,
85 unsigned OpNo, APInt &Op,
87 const MCSubtargetInfo &STI) const;
88
89 /// Encode an fp or int literal.
90 std::optional<uint32_t> getLitEncoding(const MCOperand &MO,
91 const MCOperandInfo &OpInfo,
92 const MCSubtargetInfo &STI) const;
93
94 void getBinaryCodeForInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
95 APInt &Inst, APInt &Scratch,
96 const MCSubtargetInfo &STI) const;
97};
98
99} // end anonymous namespace
100
102 MCContext &Ctx) {
103 return new AMDGPUMCCodeEmitter(MCII, *Ctx.getRegisterInfo());
104}
105
106// Returns the encoding value to use if the given integer is an integer inline
107// immediate value, or 0 if it is not.
108template <typename IntTy>
110 if (Imm >= 0 && Imm <= 64)
111 return 128 + Imm;
112
113 if (Imm >= -16 && Imm <= -1)
114 return 192 + std::abs(Imm);
115
116 return 0;
117}
118
120 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
121 if (IntImm != 0)
122 return IntImm;
123
124 if (Val == 0x3800) // 0.5
125 return 240;
126
127 if (Val == 0xB800) // -0.5
128 return 241;
129
130 if (Val == 0x3C00) // 1.0
131 return 242;
132
133 if (Val == 0xBC00) // -1.0
134 return 243;
135
136 if (Val == 0x4000) // 2.0
137 return 244;
138
139 if (Val == 0xC000) // -2.0
140 return 245;
141
142 if (Val == 0x4400) // 4.0
143 return 246;
144
145 if (Val == 0xC400) // -4.0
146 return 247;
147
148 if (Val == 0x3118 && // 1.0 / (2.0 * pi)
149 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
150 return 248;
151
152 return 255;
153}
154
156 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
157 if (IntImm != 0)
158 return IntImm;
159
160 // clang-format off
161 switch (Val) {
162 case 0x3F00: return 240; // 0.5
163 case 0xBF00: return 241; // -0.5
164 case 0x3F80: return 242; // 1.0
165 case 0xBF80: return 243; // -1.0
166 case 0x4000: return 244; // 2.0
167 case 0xC000: return 245; // -2.0
168 case 0x4080: return 246; // 4.0
169 case 0xC080: return 247; // -4.0
170 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
171 default: return 255;
172 }
173 // clang-format on
174}
175
177 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
178 if (IntImm != 0)
179 return IntImm;
180
181 if (Val == llvm::bit_cast<uint32_t>(0.5f))
182 return 240;
183
184 if (Val == llvm::bit_cast<uint32_t>(-0.5f))
185 return 241;
186
187 if (Val == llvm::bit_cast<uint32_t>(1.0f))
188 return 242;
189
190 if (Val == llvm::bit_cast<uint32_t>(-1.0f))
191 return 243;
192
193 if (Val == llvm::bit_cast<uint32_t>(2.0f))
194 return 244;
195
196 if (Val == llvm::bit_cast<uint32_t>(-2.0f))
197 return 245;
198
199 if (Val == llvm::bit_cast<uint32_t>(4.0f))
200 return 246;
201
202 if (Val == llvm::bit_cast<uint32_t>(-4.0f))
203 return 247;
204
205 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
206 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
207 return 248;
208
209 return 255;
210}
211
213 return getLit32Encoding(Val, STI);
214}
215
217 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
218 if (IntImm != 0)
219 return IntImm;
220
221 if (Val == llvm::bit_cast<uint64_t>(0.5))
222 return 240;
223
224 if (Val == llvm::bit_cast<uint64_t>(-0.5))
225 return 241;
226
227 if (Val == llvm::bit_cast<uint64_t>(1.0))
228 return 242;
229
230 if (Val == llvm::bit_cast<uint64_t>(-1.0))
231 return 243;
232
233 if (Val == llvm::bit_cast<uint64_t>(2.0))
234 return 244;
235
236 if (Val == llvm::bit_cast<uint64_t>(-2.0))
237 return 245;
238
239 if (Val == llvm::bit_cast<uint64_t>(4.0))
240 return 246;
241
242 if (Val == llvm::bit_cast<uint64_t>(-4.0))
243 return 247;
244
245 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
246 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
247 return 248;
248
249 return 255;
250}
251
252std::optional<uint32_t>
253AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO,
254 const MCOperandInfo &OpInfo,
255 const MCSubtargetInfo &STI) const {
256 int64_t Imm;
257 if (MO.isExpr()) {
258 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
259 if (!C)
260 return 255;
261
262 Imm = C->getValue();
263 } else {
264
265 assert(!MO.isDFPImm());
266
267 if (!MO.isImm())
268 return {};
269
270 Imm = MO.getImm();
271 }
272
273 switch (OpInfo.OperandType) {
286 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
287
293 return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
294
298 return getLit16IntEncoding(static_cast<uint32_t>(Imm), STI);
299
304 // FIXME Is this correct? What do inline immediates do on SI for f16 src
305 // which does not have f16 support?
306 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
307
312 // We don't actually need to check Inv2Pi here because BF16 instructions can
313 // only be emitted for targets that already support the feature.
314 return getLitBF16Encoding(static_cast<uint16_t>(Imm));
315
319 return AMDGPU::getInlineEncodingV2I16(static_cast<uint32_t>(Imm))
320 .value_or(255);
321
325 return AMDGPU::getInlineEncodingV2F16(static_cast<uint32_t>(Imm))
326 .value_or(255);
327
331 return AMDGPU::getInlineEncodingV2BF16(static_cast<uint32_t>(Imm))
332 .value_or(255);
333
336 return MO.getImm();
337 default:
338 llvm_unreachable("invalid operand size");
339 }
340}
341
342uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
343 using namespace AMDGPU::VOP3PEncoding;
344 using namespace AMDGPU::OpName;
345
346 if (AMDGPU::hasNamedOperand(Opcode, op_sel_hi)) {
347 if (AMDGPU::hasNamedOperand(Opcode, src2))
348 return 0;
349 if (AMDGPU::hasNamedOperand(Opcode, src1))
350 return OP_SEL_HI_2;
351 if (AMDGPU::hasNamedOperand(Opcode, src0))
352 return OP_SEL_HI_1 | OP_SEL_HI_2;
353 }
355}
356
357static bool isVCMPX64(const MCInstrDesc &Desc) {
358 return (Desc.TSFlags & SIInstrFlags::VOP3) &&
359 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
360}
361
362void AMDGPUMCCodeEmitter::encodeInstruction(const MCInst &MI,
365 const MCSubtargetInfo &STI) const {
366 int Opcode = MI.getOpcode();
367 APInt Encoding, Scratch;
368 getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI);
369 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
370 unsigned bytes = Desc.getSize();
371
372 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions.
373 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel.
374 if ((Desc.TSFlags & SIInstrFlags::VOP3P) ||
375 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
376 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
377 Encoding |= getImplicitOpSelHiEncoding(Opcode);
378 }
379
380 // GFX10+ v_cmpx opcodes promoted to VOP3 have implied dst=EXEC.
381 // Documentation requires dst to be encoded as EXEC (0x7E),
382 // but it looks like the actual value encoded for dst operand
383 // is ignored by HW. It was decided to define dst as "do not care"
384 // in td files to allow disassembler accept any dst value.
385 // However, dst is encoded as EXEC for compatibility with SP3.
386 if (AMDGPU::isGFX10Plus(STI) && isVCMPX64(Desc)) {
387 assert((Encoding & 0xFF) == 0);
388 Encoding |= MRI.getEncodingValue(AMDGPU::EXEC_LO) &
390 }
391
392 for (unsigned i = 0; i < bytes; i++) {
393 CB.push_back((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i));
394 }
395
396 // NSA encoding.
397 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
398 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
399 AMDGPU::OpName::vaddr0);
400 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
401 AMDGPU::OpName::srsrc);
402 assert(vaddr0 >= 0 && srsrc > vaddr0);
403 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
404 unsigned NumPadding = (-NumExtraAddrs) & 3;
405
406 for (unsigned i = 0; i < NumExtraAddrs; ++i) {
407 getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
408 STI);
409 CB.push_back((uint8_t)Encoding.getLimitedValue());
410 }
411 CB.append(NumPadding, 0);
412 }
413
414 if ((bytes > 8 && STI.hasFeature(AMDGPU::FeatureVOP3Literal)) ||
415 (bytes > 4 && !STI.hasFeature(AMDGPU::FeatureVOP3Literal)))
416 return;
417
418 // Do not print literals from SISrc Operands for insts with mandatory literals
419 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm))
420 return;
421
422 // Check for additional literals
423 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
424
425 // Check if this operand should be encoded as [SV]Src
427 continue;
428
429 // Is this operand a literal immediate?
430 const MCOperand &Op = MI.getOperand(i);
431 auto Enc = getLitEncoding(Op, Desc.operands()[i], STI);
432 if (!Enc || *Enc != 255)
433 continue;
434
435 // Yes! Encode it
436 int64_t Imm = 0;
437
438 if (Op.isImm())
439 Imm = Op.getImm();
440 else if (Op.isExpr()) {
441 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
442 Imm = C->getValue();
443 } else // Exprs will be replaced with a fixup value.
444 llvm_unreachable("Must be immediate or expr");
445
446 if (Desc.operands()[i].OperandType == AMDGPU::OPERAND_REG_IMM_FP64)
447 Imm = Hi_32(Imm);
448
449 support::endian::write<uint32_t>(CB, Imm, llvm::endianness::little);
450
451 // Only one literal value allowed
452 break;
453 }
454}
455
456void AMDGPUMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
457 APInt &Op,
459 const MCSubtargetInfo &STI) const {
460 const MCOperand &MO = MI.getOperand(OpNo);
461
462 if (MO.isExpr()) {
463 const MCExpr *Expr = MO.getExpr();
465 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
466 Op = APInt::getZero(96);
467 } else {
468 getMachineOpValue(MI, MO, Op, Fixups, STI);
469 }
470}
471
472void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding(
473 const MCInst &MI, unsigned OpNo, APInt &Op,
474 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
475 auto Offset = MI.getOperand(OpNo).getImm();
476 // VI only supports 20-bit unsigned offsets.
477 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
478 Op = Offset;
479}
480
481void AMDGPUMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
482 APInt &Op,
484 const MCSubtargetInfo &STI) const {
485 using namespace AMDGPU::SDWA;
486
487 uint64_t RegEnc = 0;
488
489 const MCOperand &MO = MI.getOperand(OpNo);
490
491 if (MO.isReg()) {
492 unsigned Reg = MO.getReg();
493 RegEnc |= MRI.getEncodingValue(Reg);
494 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
496 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
497 }
498 Op = RegEnc;
499 return;
500 } else {
501 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
502 auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI);
503 if (Enc && *Enc != 255) {
504 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
505 return;
506 }
507 }
508
509 llvm_unreachable("Unsupported operand kind");
510}
511
512void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding(
513 const MCInst &MI, unsigned OpNo, APInt &Op,
514 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
515 using namespace AMDGPU::SDWA;
516
517 uint64_t RegEnc = 0;
518
519 const MCOperand &MO = MI.getOperand(OpNo);
520
521 unsigned Reg = MO.getReg();
522 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
523 RegEnc |= MRI.getEncodingValue(Reg);
524 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
525 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
526 }
527 Op = RegEnc;
528}
529
530void AMDGPUMCCodeEmitter::getAVOperandEncoding(
531 const MCInst &MI, unsigned OpNo, APInt &Op,
532 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
533 unsigned Reg = MI.getOperand(OpNo).getReg();
534 unsigned Enc = MRI.getEncodingValue(Reg);
535 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
536 bool IsVGPROrAGPR = Enc & AMDGPU::HWEncoding::IS_VGPR_OR_AGPR;
537
538 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma
539 // instructions use acc[0:1] modifier bits to distinguish. These bits are
540 // encoded as a virtual 9th bit of the register for these operands.
541 bool IsAGPR = false;
542 if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) ||
543 MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) ||
544 MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) ||
545 MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) ||
546 MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) ||
547 MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) ||
548 MRI.getRegClass(AMDGPU::AReg_224RegClassID).contains(Reg) ||
549 MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) ||
550 MRI.getRegClass(AMDGPU::AReg_288RegClassID).contains(Reg) ||
551 MRI.getRegClass(AMDGPU::AReg_320RegClassID).contains(Reg) ||
552 MRI.getRegClass(AMDGPU::AReg_352RegClassID).contains(Reg) ||
553 MRI.getRegClass(AMDGPU::AReg_384RegClassID).contains(Reg) ||
554 MRI.getRegClass(AMDGPU::AReg_512RegClassID).contains(Reg) ||
555 MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg))
556 IsAGPR = true;
557
558 Op = Idx | (IsVGPROrAGPR << 8) | (IsAGPR << 9);
559}
560
561static bool needsPCRel(const MCExpr *Expr) {
562 switch (Expr->getKind()) {
563 case MCExpr::SymbolRef: {
564 auto *SE = cast<MCSymbolRefExpr>(Expr);
565 MCSymbolRefExpr::VariantKind Kind = SE->getKind();
568 }
569 case MCExpr::Binary: {
570 auto *BE = cast<MCBinaryExpr>(Expr);
571 if (BE->getOpcode() == MCBinaryExpr::Sub)
572 return false;
573 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS());
574 }
575 case MCExpr::Unary:
576 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
577 case MCExpr::Target:
578 case MCExpr::Constant:
579 return false;
580 }
581 llvm_unreachable("invalid kind");
582}
583
584void AMDGPUMCCodeEmitter::getMachineOpValue(const MCInst &MI,
585 const MCOperand &MO, APInt &Op,
587 const MCSubtargetInfo &STI) const {
588 if (MO.isReg()){
589 unsigned Enc = MRI.getEncodingValue(MO.getReg());
590 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
591 bool IsVGPR = Enc & AMDGPU::HWEncoding::IS_VGPR_OR_AGPR;
592 Op = Idx | (IsVGPR << 8);
593 return;
594 }
595 unsigned OpNo = &MO - MI.begin();
596 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
597}
598
599void AMDGPUMCCodeEmitter::getMachineOpValueT16(
600 const MCInst &MI, unsigned OpNo, APInt &Op,
601 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
602 const MCOperand &MO = MI.getOperand(OpNo);
603 if (MO.isReg()) {
604 unsigned Enc = MRI.getEncodingValue(MO.getReg());
605 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
606 bool IsVGPR = Enc & AMDGPU::HWEncoding::IS_VGPR_OR_AGPR;
607 Op = Idx | (IsVGPR << 8);
608 return;
609 }
610 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
611 // VGPRs include the suffix/op_sel bit in the register encoding, but
612 // immediates and SGPRs include it in src_modifiers. Therefore, copy the
613 // op_sel bit from the src operands into src_modifier operands if Op is
614 // src_modifiers and the corresponding src is a VGPR
615 int SrcMOIdx = -1;
616 assert(OpNo < INT_MAX);
617 if ((int)OpNo == AMDGPU::getNamedOperandIdx(MI.getOpcode(),
618 AMDGPU::OpName::src0_modifiers)) {
619 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
620 int VDstMOIdx =
621 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst);
622 if (VDstMOIdx != -1) {
623 auto DstReg = MI.getOperand(VDstMOIdx).getReg();
624 if (AMDGPU::isHi(DstReg, MRI))
626 }
627 } else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
628 MI.getOpcode(), AMDGPU::OpName::src1_modifiers))
629 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
630 else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
631 MI.getOpcode(), AMDGPU::OpName::src2_modifiers))
632 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src2);
633 if (SrcMOIdx == -1)
634 return;
635
636 const MCOperand &SrcMO = MI.getOperand(SrcMOIdx);
637 if (!SrcMO.isReg())
638 return;
639 auto SrcReg = SrcMO.getReg();
640 if (AMDGPU::isSGPR(SrcReg, &MRI))
641 return;
642 if (AMDGPU::isHi(SrcReg, MRI))
644}
645
646void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128(
647 const MCInst &MI, unsigned OpNo, APInt &Op,
648 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
649 const MCOperand &MO = MI.getOperand(OpNo);
650 if (MO.isReg()) {
651 uint16_t Encoding = MRI.getEncodingValue(MO.getReg());
652 unsigned RegIdx = Encoding & AMDGPU::HWEncoding::REG_IDX_MASK;
653 bool IsHi = Encoding & AMDGPU::HWEncoding::IS_HI;
654 bool IsVGPR = Encoding & AMDGPU::HWEncoding::IS_VGPR_OR_AGPR;
655 assert((!IsVGPR || isUInt<7>(RegIdx)) && "VGPR0-VGPR127 expected!");
656 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx;
657 return;
658 }
659 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
660}
661
662void AMDGPUMCCodeEmitter::getMachineOpValueCommon(
663 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op,
664 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
665
666 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
667 // FIXME: If this is expression is PCRel or not should not depend on what
668 // the expression looks like. Given that this is just a general expression,
669 // it should probably be FK_Data_4 and whatever is producing
670 //
671 // s_add_u32 s2, s2, (extern_const_addrspace+16
672 //
673 // And expecting a PCRel should instead produce
674 //
675 // .Ltmp1:
676 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1
678 if (needsPCRel(MO.getExpr()))
680 else
681 Kind = FK_Data_4;
682
683 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
684 uint32_t Offset = Desc.getSize();
685 assert(Offset == 4 || Offset == 8);
686
687 Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc()));
688 }
689
690 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
691 if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
692 if (auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI)) {
693 Op = *Enc;
694 return;
695 }
696 } else if (MO.isImm()) {
697 Op = MO.getImm();
698 return;
699 }
700
701 llvm_unreachable("Encoding of this operand type is not supported yet.");
702}
703
704#include "AMDGPUGenMCCodeEmitter.inc"
unsigned const MachineRegisterInfo * MRI
static uint32_t getLit16IntEncoding(uint32_t Val, const MCSubtargetInfo &STI)
static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI)
static uint32_t getLitBF16Encoding(uint16_t Val)
static bool isVCMPX64(const MCInstrDesc &Desc)
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getIntInlineImmEncoding(IntTy Imm)
static bool needsPCRel(const MCExpr *Expr)
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI)
Provides AMDGPU specific target descriptions.
This file implements a class to represent arbitrary precision integral constant values and operations...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
IRTranslator LLVM IR MI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
Definition: APInt.h:76
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
Definition: APInt.cpp:489
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:453
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:178
This class represents an Operation in the Expression.
@ Sub
Subtraction.
Definition: MCExpr.h:517
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
Context object for machine code objects.
Definition: MCContext.h:76
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:448
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
@ Unary
Unary expressions.
Definition: MCExpr.h:41
@ Constant
Constant expressions.
Definition: MCExpr.h:39
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:40
@ Target
Target specific expression.
Definition: MCExpr.h:42
@ Binary
Binary expressions.
Definition: MCExpr.h:38
ExprKind getKind() const
Definition: MCExpr.h:81
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Definition: MCFixup.h:87
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
int64_t getImm() const
Definition: MCInst.h:80
bool isImm() const
Definition: MCInst.h:62
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
bool isReg() const
Definition: MCInst.h:61
bool isDFPImm() const
Definition: MCInst.h:64
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
unsigned mc2PseudoReg(unsigned Reg)
Convert hardware register Reg to a pseudo register.
@ fixup_si_sopp_br
16-bit PC relative fixup for SOPP branch instructions.
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this an AMDGPU specific source operand? These include registers, inline constants,...
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, uint64_t NamedIdx)
bool isSGPR(unsigned Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
bool isHi(unsigned Reg, const MCRegisterInfo &MRI)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition: SIDefines.h:234
@ OPERAND_REG_IMM_INT64
Definition: SIDefines.h:201
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:211
@ OPERAND_REG_INLINE_C_V2INT32
Definition: SIDefines.h:227
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:223
@ OPERAND_REG_INLINE_C_BF16
Definition: SIDefines.h:220
@ OPERAND_REG_INLINE_C_V2BF16
Definition: SIDefines.h:225
@ OPERAND_REG_IMM_V2INT16
Definition: SIDefines.h:212
@ OPERAND_REG_IMM_BF16
Definition: SIDefines.h:205
@ OPERAND_REG_INLINE_AC_V2FP16
Definition: SIDefines.h:246
@ OPERAND_REG_IMM_INT32
Operands with register or 32-bit immediate.
Definition: SIDefines.h:200
@ OPERAND_REG_IMM_V2BF16
Definition: SIDefines.h:210
@ OPERAND_REG_IMM_BF16_DEFERRED
Definition: SIDefines.h:207
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:206
@ OPERAND_REG_INLINE_C_INT64
Definition: SIDefines.h:219
@ OPERAND_REG_INLINE_AC_BF16
Definition: SIDefines.h:240
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition: SIDefines.h:217
@ OPERAND_REG_INLINE_AC_INT16
Operands with an AccVGPR register or inline constant.
Definition: SIDefines.h:238
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:204
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:226
@ OPERAND_REG_INLINE_AC_V2INT16
Definition: SIDefines.h:244
@ OPERAND_REG_INLINE_AC_FP16
Definition: SIDefines.h:241
@ OPERAND_REG_INLINE_AC_INT32
Definition: SIDefines.h:239
@ OPERAND_REG_INLINE_AC_FP32
Definition: SIDefines.h:242
@ OPERAND_REG_INLINE_AC_V2BF16
Definition: SIDefines.h:245
@ OPERAND_REG_IMM_V2INT32
Definition: SIDefines.h:213
@ OPERAND_REG_IMM_FP32
Definition: SIDefines.h:203
@ OPERAND_REG_INLINE_C_FP32
Definition: SIDefines.h:222
@ OPERAND_REG_INLINE_C_INT32
Definition: SIDefines.h:218
@ OPERAND_REG_INLINE_C_V2INT16
Definition: SIDefines.h:224
@ OPERAND_REG_IMM_V2FP32
Definition: SIDefines.h:214
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:243
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:221
@ OPERAND_REG_IMM_INT16
Definition: SIDefines.h:202
@ OPERAND_REG_INLINE_C_V2FP32
Definition: SIDefines.h:228
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
Definition: SIDefines.h:231
@ OPERAND_REG_IMM_FP32_DEFERRED
Definition: SIDefines.h:209
@ OPERAND_REG_IMM_FP16_DEFERRED
Definition: SIDefines.h:208
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
bool isVI(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition: MathExtras.h:136
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
MCCodeEmitter * createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Description of the encoding of one expression Op.