LLVM 20.0.0git
AMDGPUMCCodeEmitter.cpp
Go to the documentation of this file.
1//===-- AMDGPUMCCodeEmitter.cpp - AMDGPU Code Emitter ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU code emitter produces machine code that can be executed
11/// directly on the GPU device.
12//
13//===----------------------------------------------------------------------===//
14
17#include "SIDefines.h"
19#include "llvm/ADT/APInt.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInstrInfo.h"
29#include <optional>
30
31using namespace llvm;
32
33namespace {
34
35class AMDGPUMCCodeEmitter : public MCCodeEmitter {
36 const MCRegisterInfo &MRI;
37 const MCInstrInfo &MCII;
38
39public:
40 AMDGPUMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI)
41 : MRI(MRI), MCII(MCII) {}
42
43 /// Encode the instruction and write it to the OS.
46 const MCSubtargetInfo &STI) const override;
47
48 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op,
50 const MCSubtargetInfo &STI) const;
51
52 void getMachineOpValueT16(const MCInst &MI, unsigned OpNo, APInt &Op,
54 const MCSubtargetInfo &STI) const;
55
56 void getMachineOpValueT16Lo128(const MCInst &MI, unsigned OpNo, APInt &Op,
58 const MCSubtargetInfo &STI) const;
59
60 /// Use a fixup to encode the simm16 field for SOPP branch
61 /// instructions.
62 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
64 const MCSubtargetInfo &STI) const;
65
66 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
68 const MCSubtargetInfo &STI) const;
69
70 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
72 const MCSubtargetInfo &STI) const;
73
74 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
76 const MCSubtargetInfo &STI) const;
77
78 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
80 const MCSubtargetInfo &STI) const;
81
82private:
83 uint64_t getImplicitOpSelHiEncoding(int Opcode) const;
84 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO,
85 unsigned OpNo, APInt &Op,
87 const MCSubtargetInfo &STI) const;
88
89 /// Encode an fp or int literal.
90 std::optional<uint32_t> getLitEncoding(const MCOperand &MO,
91 const MCOperandInfo &OpInfo,
92 const MCSubtargetInfo &STI) const;
93
94 void getBinaryCodeForInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
95 APInt &Inst, APInt &Scratch,
96 const MCSubtargetInfo &STI) const;
97};
98
99} // end anonymous namespace
100
102 MCContext &Ctx) {
103 return new AMDGPUMCCodeEmitter(MCII, *Ctx.getRegisterInfo());
104}
105
106// Returns the encoding value to use if the given integer is an integer inline
107// immediate value, or 0 if it is not.
108template <typename IntTy>
110 if (Imm >= 0 && Imm <= 64)
111 return 128 + Imm;
112
113 if (Imm >= -16 && Imm <= -1)
114 return 192 + std::abs(Imm);
115
116 return 0;
117}
118
120 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
121 if (IntImm != 0)
122 return IntImm;
123
124 if (Val == 0x3800) // 0.5
125 return 240;
126
127 if (Val == 0xB800) // -0.5
128 return 241;
129
130 if (Val == 0x3C00) // 1.0
131 return 242;
132
133 if (Val == 0xBC00) // -1.0
134 return 243;
135
136 if (Val == 0x4000) // 2.0
137 return 244;
138
139 if (Val == 0xC000) // -2.0
140 return 245;
141
142 if (Val == 0x4400) // 4.0
143 return 246;
144
145 if (Val == 0xC400) // -4.0
146 return 247;
147
148 if (Val == 0x3118 && // 1.0 / (2.0 * pi)
149 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
150 return 248;
151
152 return 255;
153}
154
156 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
157 if (IntImm != 0)
158 return IntImm;
159
160 // clang-format off
161 switch (Val) {
162 case 0x3F00: return 240; // 0.5
163 case 0xBF00: return 241; // -0.5
164 case 0x3F80: return 242; // 1.0
165 case 0xBF80: return 243; // -1.0
166 case 0x4000: return 244; // 2.0
167 case 0xC000: return 245; // -2.0
168 case 0x4080: return 246; // 4.0
169 case 0xC080: return 247; // -4.0
170 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
171 default: return 255;
172 }
173 // clang-format on
174}
175
177 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
178 if (IntImm != 0)
179 return IntImm;
180
181 if (Val == llvm::bit_cast<uint32_t>(0.5f))
182 return 240;
183
184 if (Val == llvm::bit_cast<uint32_t>(-0.5f))
185 return 241;
186
187 if (Val == llvm::bit_cast<uint32_t>(1.0f))
188 return 242;
189
190 if (Val == llvm::bit_cast<uint32_t>(-1.0f))
191 return 243;
192
193 if (Val == llvm::bit_cast<uint32_t>(2.0f))
194 return 244;
195
196 if (Val == llvm::bit_cast<uint32_t>(-2.0f))
197 return 245;
198
199 if (Val == llvm::bit_cast<uint32_t>(4.0f))
200 return 246;
201
202 if (Val == llvm::bit_cast<uint32_t>(-4.0f))
203 return 247;
204
205 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
206 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
207 return 248;
208
209 return 255;
210}
211
213 return getLit32Encoding(Val, STI);
214}
215
217 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
218 if (IntImm != 0)
219 return IntImm;
220
221 if (Val == llvm::bit_cast<uint64_t>(0.5))
222 return 240;
223
224 if (Val == llvm::bit_cast<uint64_t>(-0.5))
225 return 241;
226
227 if (Val == llvm::bit_cast<uint64_t>(1.0))
228 return 242;
229
230 if (Val == llvm::bit_cast<uint64_t>(-1.0))
231 return 243;
232
233 if (Val == llvm::bit_cast<uint64_t>(2.0))
234 return 244;
235
236 if (Val == llvm::bit_cast<uint64_t>(-2.0))
237 return 245;
238
239 if (Val == llvm::bit_cast<uint64_t>(4.0))
240 return 246;
241
242 if (Val == llvm::bit_cast<uint64_t>(-4.0))
243 return 247;
244
245 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
246 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
247 return 248;
248
249 return 255;
250}
251
252std::optional<uint32_t>
253AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO,
254 const MCOperandInfo &OpInfo,
255 const MCSubtargetInfo &STI) const {
256 int64_t Imm;
257 if (MO.isExpr()) {
258 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
259 if (!C)
260 return 255;
261
262 Imm = C->getValue();
263 } else {
264
265 assert(!MO.isDFPImm());
266
267 if (!MO.isImm())
268 return {};
269
270 Imm = MO.getImm();
271 }
272
273 switch (OpInfo.OperandType) {
286 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
287
293 return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
294
298 return getLit16IntEncoding(static_cast<uint32_t>(Imm), STI);
299
304 // FIXME Is this correct? What do inline immediates do on SI for f16 src
305 // which does not have f16 support?
306 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
307
312 // We don't actually need to check Inv2Pi here because BF16 instructions can
313 // only be emitted for targets that already support the feature.
314 return getLitBF16Encoding(static_cast<uint16_t>(Imm));
315
319 return AMDGPU::getInlineEncodingV2I16(static_cast<uint32_t>(Imm))
320 .value_or(255);
321
325 return AMDGPU::getInlineEncodingV2F16(static_cast<uint32_t>(Imm))
326 .value_or(255);
327
331 return AMDGPU::getInlineEncodingV2BF16(static_cast<uint32_t>(Imm))
332 .value_or(255);
333
336 return MO.getImm();
337 default:
338 llvm_unreachable("invalid operand size");
339 }
340}
341
342uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
343 using namespace AMDGPU::VOP3PEncoding;
344 using namespace AMDGPU::OpName;
345
346 if (AMDGPU::hasNamedOperand(Opcode, op_sel_hi)) {
347 if (AMDGPU::hasNamedOperand(Opcode, src2))
348 return 0;
349 if (AMDGPU::hasNamedOperand(Opcode, src1))
350 return OP_SEL_HI_2;
351 if (AMDGPU::hasNamedOperand(Opcode, src0))
352 return OP_SEL_HI_1 | OP_SEL_HI_2;
353 }
355}
356
357static bool isVCMPX64(const MCInstrDesc &Desc) {
358 return (Desc.TSFlags & SIInstrFlags::VOP3) &&
359 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
360}
361
362void AMDGPUMCCodeEmitter::encodeInstruction(const MCInst &MI,
365 const MCSubtargetInfo &STI) const {
366 int Opcode = MI.getOpcode();
367 APInt Encoding, Scratch;
368 getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI);
369 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
370 unsigned bytes = Desc.getSize();
371
372 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions.
373 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel.
374 if ((Desc.TSFlags & SIInstrFlags::VOP3P) ||
375 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
376 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
377 Encoding |= getImplicitOpSelHiEncoding(Opcode);
378 }
379
380 // GFX10+ v_cmpx opcodes promoted to VOP3 have implied dst=EXEC.
381 // Documentation requires dst to be encoded as EXEC (0x7E),
382 // but it looks like the actual value encoded for dst operand
383 // is ignored by HW. It was decided to define dst as "do not care"
384 // in td files to allow disassembler accept any dst value.
385 // However, dst is encoded as EXEC for compatibility with SP3.
386 if (AMDGPU::isGFX10Plus(STI) && isVCMPX64(Desc)) {
387 assert((Encoding & 0xFF) == 0);
388 Encoding |= MRI.getEncodingValue(AMDGPU::EXEC_LO) &
390 }
391
392 for (unsigned i = 0; i < bytes; i++) {
393 CB.push_back((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i));
394 }
395
396 // NSA encoding.
397 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
398 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
399 AMDGPU::OpName::vaddr0);
400 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
401 AMDGPU::OpName::srsrc);
402 assert(vaddr0 >= 0 && srsrc > vaddr0);
403 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
404 unsigned NumPadding = (-NumExtraAddrs) & 3;
405
406 for (unsigned i = 0; i < NumExtraAddrs; ++i) {
407 getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
408 STI);
409 CB.push_back((uint8_t)Encoding.getLimitedValue());
410 }
411 CB.append(NumPadding, 0);
412 }
413
414 if ((bytes > 8 && STI.hasFeature(AMDGPU::FeatureVOP3Literal)) ||
415 (bytes > 4 && !STI.hasFeature(AMDGPU::FeatureVOP3Literal)))
416 return;
417
418 // Do not print literals from SISrc Operands for insts with mandatory literals
419 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm))
420 return;
421
422 // Check for additional literals
423 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
424
425 // Check if this operand should be encoded as [SV]Src
427 continue;
428
429 // Is this operand a literal immediate?
430 const MCOperand &Op = MI.getOperand(i);
431 auto Enc = getLitEncoding(Op, Desc.operands()[i], STI);
432 if (!Enc || *Enc != 255)
433 continue;
434
435 // Yes! Encode it
436 int64_t Imm = 0;
437
438 if (Op.isImm())
439 Imm = Op.getImm();
440 else if (Op.isExpr()) {
441 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
442 Imm = C->getValue();
443 } else // Exprs will be replaced with a fixup value.
444 llvm_unreachable("Must be immediate or expr");
445
446 if (Desc.operands()[i].OperandType == AMDGPU::OPERAND_REG_IMM_FP64)
447 Imm = Hi_32(Imm);
448
449 support::endian::write<uint32_t>(CB, Imm, llvm::endianness::little);
450
451 // Only one literal value allowed
452 break;
453 }
454}
455
456void AMDGPUMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
457 APInt &Op,
459 const MCSubtargetInfo &STI) const {
460 const MCOperand &MO = MI.getOperand(OpNo);
461
462 if (MO.isExpr()) {
463 const MCExpr *Expr = MO.getExpr();
465 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
466 Op = APInt::getZero(96);
467 } else {
468 getMachineOpValue(MI, MO, Op, Fixups, STI);
469 }
470}
471
472void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding(
473 const MCInst &MI, unsigned OpNo, APInt &Op,
474 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
475 auto Offset = MI.getOperand(OpNo).getImm();
476 // VI only supports 20-bit unsigned offsets.
477 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
478 Op = Offset;
479}
480
481void AMDGPUMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
482 APInt &Op,
484 const MCSubtargetInfo &STI) const {
485 using namespace AMDGPU::SDWA;
486
487 uint64_t RegEnc = 0;
488
489 const MCOperand &MO = MI.getOperand(OpNo);
490
491 if (MO.isReg()) {
492 unsigned Reg = MO.getReg();
493 RegEnc |= MRI.getEncodingValue(Reg);
494 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
496 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
497 }
498 Op = RegEnc;
499 return;
500 } else {
501 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
502 auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI);
503 if (Enc && *Enc != 255) {
504 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
505 return;
506 }
507 }
508
509 llvm_unreachable("Unsupported operand kind");
510}
511
512void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding(
513 const MCInst &MI, unsigned OpNo, APInt &Op,
514 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
515 using namespace AMDGPU::SDWA;
516
517 uint64_t RegEnc = 0;
518
519 const MCOperand &MO = MI.getOperand(OpNo);
520
521 unsigned Reg = MO.getReg();
522 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
523 RegEnc |= MRI.getEncodingValue(Reg);
524 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
525 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
526 }
527 Op = RegEnc;
528}
529
530void AMDGPUMCCodeEmitter::getAVOperandEncoding(
531 const MCInst &MI, unsigned OpNo, APInt &Op,
532 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
533 unsigned Reg = MI.getOperand(OpNo).getReg();
534 unsigned Enc = MRI.getEncodingValue(Reg);
535 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
536 bool IsVGPROrAGPR =
538
539 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma
540 // instructions use acc[0:1] modifier bits to distinguish. These bits are
541 // encoded as a virtual 9th bit of the register for these operands.
542 bool IsAGPR = Enc & AMDGPU::HWEncoding::IS_AGPR;
543
544 Op = Idx | (IsVGPROrAGPR << 8) | (IsAGPR << 9);
545}
546
547static bool needsPCRel(const MCExpr *Expr) {
548 switch (Expr->getKind()) {
549 case MCExpr::SymbolRef: {
550 auto *SE = cast<MCSymbolRefExpr>(Expr);
551 MCSymbolRefExpr::VariantKind Kind = SE->getKind();
554 }
555 case MCExpr::Binary: {
556 auto *BE = cast<MCBinaryExpr>(Expr);
557 if (BE->getOpcode() == MCBinaryExpr::Sub)
558 return false;
559 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS());
560 }
561 case MCExpr::Unary:
562 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
563 case MCExpr::Target:
564 case MCExpr::Constant:
565 return false;
566 }
567 llvm_unreachable("invalid kind");
568}
569
570void AMDGPUMCCodeEmitter::getMachineOpValue(const MCInst &MI,
571 const MCOperand &MO, APInt &Op,
573 const MCSubtargetInfo &STI) const {
574 if (MO.isReg()){
575 unsigned Enc = MRI.getEncodingValue(MO.getReg());
576 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
577 bool IsVGPROrAGPR =
579 Op = Idx | (IsVGPROrAGPR << 8);
580 return;
581 }
582 unsigned OpNo = &MO - MI.begin();
583 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
584}
585
586void AMDGPUMCCodeEmitter::getMachineOpValueT16(
587 const MCInst &MI, unsigned OpNo, APInt &Op,
588 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
589 const MCOperand &MO = MI.getOperand(OpNo);
590 if (MO.isReg()) {
591 unsigned Enc = MRI.getEncodingValue(MO.getReg());
592 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
593 bool IsVGPR = Enc & AMDGPU::HWEncoding::IS_VGPR;
594 Op = Idx | (IsVGPR << 8);
595 return;
596 }
597 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
598 // VGPRs include the suffix/op_sel bit in the register encoding, but
599 // immediates and SGPRs include it in src_modifiers. Therefore, copy the
600 // op_sel bit from the src operands into src_modifier operands if Op is
601 // src_modifiers and the corresponding src is a VGPR
602 int SrcMOIdx = -1;
603 assert(OpNo < INT_MAX);
604 if ((int)OpNo == AMDGPU::getNamedOperandIdx(MI.getOpcode(),
605 AMDGPU::OpName::src0_modifiers)) {
606 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
607 int VDstMOIdx =
608 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst);
609 if (VDstMOIdx != -1) {
610 auto DstReg = MI.getOperand(VDstMOIdx).getReg();
611 if (AMDGPU::isHi16Reg(DstReg, MRI))
613 }
614 } else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
615 MI.getOpcode(), AMDGPU::OpName::src1_modifiers))
616 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
617 else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
618 MI.getOpcode(), AMDGPU::OpName::src2_modifiers))
619 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src2);
620 if (SrcMOIdx == -1)
621 return;
622
623 const MCOperand &SrcMO = MI.getOperand(SrcMOIdx);
624 if (!SrcMO.isReg())
625 return;
626 auto SrcReg = SrcMO.getReg();
627 if (AMDGPU::isSGPR(SrcReg, &MRI))
628 return;
629 if (AMDGPU::isHi16Reg(SrcReg, MRI))
631}
632
633void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128(
634 const MCInst &MI, unsigned OpNo, APInt &Op,
635 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
636 const MCOperand &MO = MI.getOperand(OpNo);
637 if (MO.isReg()) {
638 uint16_t Encoding = MRI.getEncodingValue(MO.getReg());
639 unsigned RegIdx = Encoding & AMDGPU::HWEncoding::REG_IDX_MASK;
640 bool IsHi = Encoding & AMDGPU::HWEncoding::IS_HI16;
641 bool IsVGPR = Encoding & AMDGPU::HWEncoding::IS_VGPR;
642 assert((!IsVGPR || isUInt<7>(RegIdx)) && "VGPR0-VGPR127 expected!");
643 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx;
644 return;
645 }
646 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
647}
648
649void AMDGPUMCCodeEmitter::getMachineOpValueCommon(
650 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op,
651 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
652 int64_t Val;
653 if (MO.isExpr() && MO.getExpr()->evaluateAsAbsolute(Val)) {
654 Op = Val;
655 return;
656 }
657
658 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
659 // FIXME: If this is expression is PCRel or not should not depend on what
660 // the expression looks like. Given that this is just a general expression,
661 // it should probably be FK_Data_4 and whatever is producing
662 //
663 // s_add_u32 s2, s2, (extern_const_addrspace+16
664 //
665 // And expecting a PCRel should instead produce
666 //
667 // .Ltmp1:
668 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1
670 if (needsPCRel(MO.getExpr()))
672 else
673 Kind = FK_Data_4;
674
675 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
676 uint32_t Offset = Desc.getSize();
677 assert(Offset == 4 || Offset == 8);
678
679 Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc()));
680 }
681
682 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
683 if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
684 if (auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI)) {
685 Op = *Enc;
686 return;
687 }
688 } else if (MO.isImm()) {
689 Op = MO.getImm();
690 return;
691 }
692
693 llvm_unreachable("Encoding of this operand type is not supported yet.");
694}
695
696#include "AMDGPUGenMCCodeEmitter.inc"
unsigned const MachineRegisterInfo * MRI
static uint32_t getLit16IntEncoding(uint32_t Val, const MCSubtargetInfo &STI)
static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI)
static uint32_t getLitBF16Encoding(uint16_t Val)
static bool isVCMPX64(const MCInstrDesc &Desc)
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getIntInlineImmEncoding(IntTy Imm)
static bool needsPCRel(const MCExpr *Expr)
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI)
Provides AMDGPU specific target descriptions.
This file implements a class to represent arbitrary precision integral constant values and operations...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
IRTranslator LLVM IR MI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
Definition: APInt.h:78
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
Definition: APInt.cpp:489
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:453
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:178
This class represents an Operation in the Expression.
@ Sub
Subtraction.
Definition: MCExpr.h:513
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
Context object for machine code objects.
Definition: MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:414
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
@ Unary
Unary expressions.
Definition: MCExpr.h:40
@ Constant
Constant expressions.
Definition: MCExpr.h:38
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:39
@ Target
Target specific expression.
Definition: MCExpr.h:41
@ Binary
Binary expressions.
Definition: MCExpr.h:37
ExprKind getKind() const
Definition: MCExpr.h:78
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Definition: MCFixup.h:87
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
int64_t getImm() const
Definition: MCInst.h:80
bool isImm() const
Definition: MCInst.h:62
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
bool isReg() const
Definition: MCInst.h:61
bool isDFPImm() const
Definition: MCInst.h:64
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
unsigned mc2PseudoReg(unsigned Reg)
Convert hardware register Reg to a pseudo register.
@ fixup_si_sopp_br
16-bit PC relative fixup for SOPP branch instructions.
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this an AMDGPU specific source operand? These include registers, inline constants,...
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, uint64_t NamedIdx)
bool isSGPR(unsigned Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition: SIDefines.h:234
@ OPERAND_REG_IMM_INT64
Definition: SIDefines.h:201
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:211
@ OPERAND_REG_INLINE_C_V2INT32
Definition: SIDefines.h:227
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:223
@ OPERAND_REG_INLINE_C_BF16
Definition: SIDefines.h:220
@ OPERAND_REG_INLINE_C_V2BF16
Definition: SIDefines.h:225
@ OPERAND_REG_IMM_V2INT16
Definition: SIDefines.h:212
@ OPERAND_REG_IMM_BF16
Definition: SIDefines.h:205
@ OPERAND_REG_INLINE_AC_V2FP16
Definition: SIDefines.h:246
@ OPERAND_REG_IMM_INT32
Operands with register or 32-bit immediate.
Definition: SIDefines.h:200
@ OPERAND_REG_IMM_V2BF16
Definition: SIDefines.h:210
@ OPERAND_REG_IMM_BF16_DEFERRED
Definition: SIDefines.h:207
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:206
@ OPERAND_REG_INLINE_C_INT64
Definition: SIDefines.h:219
@ OPERAND_REG_INLINE_AC_BF16
Definition: SIDefines.h:240
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition: SIDefines.h:217
@ OPERAND_REG_INLINE_AC_INT16
Operands with an AccVGPR register or inline constant.
Definition: SIDefines.h:238
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:204
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:226
@ OPERAND_REG_INLINE_AC_V2INT16
Definition: SIDefines.h:244
@ OPERAND_REG_INLINE_AC_FP16
Definition: SIDefines.h:241
@ OPERAND_REG_INLINE_AC_INT32
Definition: SIDefines.h:239
@ OPERAND_REG_INLINE_AC_FP32
Definition: SIDefines.h:242
@ OPERAND_REG_INLINE_AC_V2BF16
Definition: SIDefines.h:245
@ OPERAND_REG_IMM_V2INT32
Definition: SIDefines.h:213
@ OPERAND_REG_IMM_FP32
Definition: SIDefines.h:203
@ OPERAND_REG_INLINE_C_FP32
Definition: SIDefines.h:222
@ OPERAND_REG_INLINE_C_INT32
Definition: SIDefines.h:218
@ OPERAND_REG_INLINE_C_V2INT16
Definition: SIDefines.h:224
@ OPERAND_REG_IMM_V2FP32
Definition: SIDefines.h:214
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:243
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:221
@ OPERAND_REG_IMM_INT16
Definition: SIDefines.h:202
@ OPERAND_REG_INLINE_C_V2FP32
Definition: SIDefines.h:228
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
Definition: SIDefines.h:231
@ OPERAND_REG_IMM_FP32_DEFERRED
Definition: SIDefines.h:209
@ OPERAND_REG_IMM_FP16_DEFERRED
Definition: SIDefines.h:208
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
bool isVI(const MCSubtargetInfo &STI)
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition: MathExtras.h:154
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
MCCodeEmitter * createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Description of the encoding of one expression Op.