LLVM 20.0.0git
AMDGPUMCCodeEmitter.cpp
Go to the documentation of this file.
1//===-- AMDGPUMCCodeEmitter.cpp - AMDGPU Code Emitter ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU code emitter produces machine code that can be executed
11/// directly on the GPU device.
12//
13//===----------------------------------------------------------------------===//
14
17#include "SIDefines.h"
19#include "llvm/ADT/APInt.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInstrInfo.h"
28#include <optional>
29
30using namespace llvm;
31
32namespace {
33
34class AMDGPUMCCodeEmitter : public MCCodeEmitter {
35 const MCRegisterInfo &MRI;
36 const MCInstrInfo &MCII;
37
38public:
39 AMDGPUMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI)
40 : MRI(MRI), MCII(MCII) {}
41
42 /// Encode the instruction and write it to the OS.
45 const MCSubtargetInfo &STI) const override;
46
47 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op,
49 const MCSubtargetInfo &STI) const;
50
51 void getMachineOpValueT16(const MCInst &MI, unsigned OpNo, APInt &Op,
53 const MCSubtargetInfo &STI) const;
54
55 void getMachineOpValueT16Lo128(const MCInst &MI, unsigned OpNo, APInt &Op,
57 const MCSubtargetInfo &STI) const;
58
59 /// Use a fixup to encode the simm16 field for SOPP branch
60 /// instructions.
61 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
63 const MCSubtargetInfo &STI) const;
64
65 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
67 const MCSubtargetInfo &STI) const;
68
69 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
71 const MCSubtargetInfo &STI) const;
72
73 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
75 const MCSubtargetInfo &STI) const;
76
77 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
79 const MCSubtargetInfo &STI) const;
80
81private:
82 uint64_t getImplicitOpSelHiEncoding(int Opcode) const;
83 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO,
84 unsigned OpNo, APInt &Op,
86 const MCSubtargetInfo &STI) const;
87
88 /// Encode an fp or int literal.
89 std::optional<uint32_t> getLitEncoding(const MCOperand &MO,
90 const MCOperandInfo &OpInfo,
91 const MCSubtargetInfo &STI) const;
92
93 void getBinaryCodeForInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
94 APInt &Inst, APInt &Scratch,
95 const MCSubtargetInfo &STI) const;
96};
97
98} // end anonymous namespace
99
101 MCContext &Ctx) {
102 return new AMDGPUMCCodeEmitter(MCII, *Ctx.getRegisterInfo());
103}
104
105// Returns the encoding value to use if the given integer is an integer inline
106// immediate value, or 0 if it is not.
107template <typename IntTy>
109 if (Imm >= 0 && Imm <= 64)
110 return 128 + Imm;
111
112 if (Imm >= -16 && Imm <= -1)
113 return 192 + std::abs(Imm);
114
115 return 0;
116}
117
119 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
120 if (IntImm != 0)
121 return IntImm;
122
123 if (Val == 0x3800) // 0.5
124 return 240;
125
126 if (Val == 0xB800) // -0.5
127 return 241;
128
129 if (Val == 0x3C00) // 1.0
130 return 242;
131
132 if (Val == 0xBC00) // -1.0
133 return 243;
134
135 if (Val == 0x4000) // 2.0
136 return 244;
137
138 if (Val == 0xC000) // -2.0
139 return 245;
140
141 if (Val == 0x4400) // 4.0
142 return 246;
143
144 if (Val == 0xC400) // -4.0
145 return 247;
146
147 if (Val == 0x3118 && // 1.0 / (2.0 * pi)
148 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
149 return 248;
150
151 return 255;
152}
153
155 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
156 if (IntImm != 0)
157 return IntImm;
158
159 // clang-format off
160 switch (Val) {
161 case 0x3F00: return 240; // 0.5
162 case 0xBF00: return 241; // -0.5
163 case 0x3F80: return 242; // 1.0
164 case 0xBF80: return 243; // -1.0
165 case 0x4000: return 244; // 2.0
166 case 0xC000: return 245; // -2.0
167 case 0x4080: return 246; // 4.0
168 case 0xC080: return 247; // -4.0
169 case 0x3E22: return 248; // 1.0 / (2.0 * pi)
170 default: return 255;
171 }
172 // clang-format on
173}
174
176 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
177 if (IntImm != 0)
178 return IntImm;
179
180 if (Val == llvm::bit_cast<uint32_t>(0.5f))
181 return 240;
182
183 if (Val == llvm::bit_cast<uint32_t>(-0.5f))
184 return 241;
185
186 if (Val == llvm::bit_cast<uint32_t>(1.0f))
187 return 242;
188
189 if (Val == llvm::bit_cast<uint32_t>(-1.0f))
190 return 243;
191
192 if (Val == llvm::bit_cast<uint32_t>(2.0f))
193 return 244;
194
195 if (Val == llvm::bit_cast<uint32_t>(-2.0f))
196 return 245;
197
198 if (Val == llvm::bit_cast<uint32_t>(4.0f))
199 return 246;
200
201 if (Val == llvm::bit_cast<uint32_t>(-4.0f))
202 return 247;
203
204 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
205 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
206 return 248;
207
208 return 255;
209}
210
212 return getLit32Encoding(Val, STI);
213}
214
216 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
217 if (IntImm != 0)
218 return IntImm;
219
220 if (Val == llvm::bit_cast<uint64_t>(0.5))
221 return 240;
222
223 if (Val == llvm::bit_cast<uint64_t>(-0.5))
224 return 241;
225
226 if (Val == llvm::bit_cast<uint64_t>(1.0))
227 return 242;
228
229 if (Val == llvm::bit_cast<uint64_t>(-1.0))
230 return 243;
231
232 if (Val == llvm::bit_cast<uint64_t>(2.0))
233 return 244;
234
235 if (Val == llvm::bit_cast<uint64_t>(-2.0))
236 return 245;
237
238 if (Val == llvm::bit_cast<uint64_t>(4.0))
239 return 246;
240
241 if (Val == llvm::bit_cast<uint64_t>(-4.0))
242 return 247;
243
244 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
245 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
246 return 248;
247
248 return 255;
249}
250
251std::optional<uint32_t>
252AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO,
253 const MCOperandInfo &OpInfo,
254 const MCSubtargetInfo &STI) const {
255 int64_t Imm;
256 if (MO.isExpr()) {
257 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
258 if (!C)
259 return 255;
260
261 Imm = C->getValue();
262 } else {
263
264 assert(!MO.isDFPImm());
265
266 if (!MO.isImm())
267 return {};
268
269 Imm = MO.getImm();
270 }
271
272 switch (OpInfo.OperandType) {
285 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
286
292 return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
293
297 return getLit16IntEncoding(static_cast<uint32_t>(Imm), STI);
298
303 // FIXME Is this correct? What do inline immediates do on SI for f16 src
304 // which does not have f16 support?
305 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
306
311 // We don't actually need to check Inv2Pi here because BF16 instructions can
312 // only be emitted for targets that already support the feature.
313 return getLitBF16Encoding(static_cast<uint16_t>(Imm));
314
318 return AMDGPU::getInlineEncodingV2I16(static_cast<uint32_t>(Imm))
319 .value_or(255);
320
324 return AMDGPU::getInlineEncodingV2F16(static_cast<uint32_t>(Imm))
325 .value_or(255);
326
330 return AMDGPU::getInlineEncodingV2BF16(static_cast<uint32_t>(Imm))
331 .value_or(255);
332
335 return MO.getImm();
336 default:
337 llvm_unreachable("invalid operand size");
338 }
339}
340
341uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
342 using namespace AMDGPU::VOP3PEncoding;
343 using namespace AMDGPU::OpName;
344
345 if (AMDGPU::hasNamedOperand(Opcode, op_sel_hi)) {
346 if (AMDGPU::hasNamedOperand(Opcode, src2))
347 return 0;
348 if (AMDGPU::hasNamedOperand(Opcode, src1))
349 return OP_SEL_HI_2;
350 if (AMDGPU::hasNamedOperand(Opcode, src0))
351 return OP_SEL_HI_1 | OP_SEL_HI_2;
352 }
354}
355
356static bool isVCMPX64(const MCInstrDesc &Desc) {
357 return (Desc.TSFlags & SIInstrFlags::VOP3) &&
358 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
359}
360
361void AMDGPUMCCodeEmitter::encodeInstruction(const MCInst &MI,
364 const MCSubtargetInfo &STI) const {
365 int Opcode = MI.getOpcode();
366 APInt Encoding, Scratch;
367 getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI);
368 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
369 unsigned bytes = Desc.getSize();
370
371 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions.
372 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel.
373 if ((Desc.TSFlags & SIInstrFlags::VOP3P) ||
374 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
375 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
376 Encoding |= getImplicitOpSelHiEncoding(Opcode);
377 }
378
379 // GFX10+ v_cmpx opcodes promoted to VOP3 have implied dst=EXEC.
380 // Documentation requires dst to be encoded as EXEC (0x7E),
381 // but it looks like the actual value encoded for dst operand
382 // is ignored by HW. It was decided to define dst as "do not care"
383 // in td files to allow disassembler accept any dst value.
384 // However, dst is encoded as EXEC for compatibility with SP3.
385 if (AMDGPU::isGFX10Plus(STI) && isVCMPX64(Desc)) {
386 assert((Encoding & 0xFF) == 0);
387 Encoding |= MRI.getEncodingValue(AMDGPU::EXEC_LO) &
389 }
390
391 for (unsigned i = 0; i < bytes; i++) {
392 CB.push_back((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i));
393 }
394
395 // NSA encoding.
396 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
397 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
398 AMDGPU::OpName::vaddr0);
399 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
400 AMDGPU::OpName::srsrc);
401 assert(vaddr0 >= 0 && srsrc > vaddr0);
402 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
403 unsigned NumPadding = (-NumExtraAddrs) & 3;
404
405 for (unsigned i = 0; i < NumExtraAddrs; ++i) {
406 getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
407 STI);
408 CB.push_back((uint8_t)Encoding.getLimitedValue());
409 }
410 CB.append(NumPadding, 0);
411 }
412
413 if ((bytes > 8 && STI.hasFeature(AMDGPU::FeatureVOP3Literal)) ||
414 (bytes > 4 && !STI.hasFeature(AMDGPU::FeatureVOP3Literal)))
415 return;
416
417 // Do not print literals from SISrc Operands for insts with mandatory literals
418 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm))
419 return;
420
421 // Check for additional literals
422 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
423
424 // Check if this operand should be encoded as [SV]Src
426 continue;
427
428 // Is this operand a literal immediate?
429 const MCOperand &Op = MI.getOperand(i);
430 auto Enc = getLitEncoding(Op, Desc.operands()[i], STI);
431 if (!Enc || *Enc != 255)
432 continue;
433
434 // Yes! Encode it
435 int64_t Imm = 0;
436
437 if (Op.isImm())
438 Imm = Op.getImm();
439 else if (Op.isExpr()) {
440 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
441 Imm = C->getValue();
442 } else // Exprs will be replaced with a fixup value.
443 llvm_unreachable("Must be immediate or expr");
444
445 if (Desc.operands()[i].OperandType == AMDGPU::OPERAND_REG_IMM_FP64)
446 Imm = Hi_32(Imm);
447
448 support::endian::write<uint32_t>(CB, Imm, llvm::endianness::little);
449
450 // Only one literal value allowed
451 break;
452 }
453}
454
455void AMDGPUMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
456 APInt &Op,
458 const MCSubtargetInfo &STI) const {
459 const MCOperand &MO = MI.getOperand(OpNo);
460
461 if (MO.isExpr()) {
462 const MCExpr *Expr = MO.getExpr();
464 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
465 Op = APInt::getZero(96);
466 } else {
467 getMachineOpValue(MI, MO, Op, Fixups, STI);
468 }
469}
470
471void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding(
472 const MCInst &MI, unsigned OpNo, APInt &Op,
473 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
474 auto Offset = MI.getOperand(OpNo).getImm();
475 // VI only supports 20-bit unsigned offsets.
476 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
477 Op = Offset;
478}
479
480void AMDGPUMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
481 APInt &Op,
483 const MCSubtargetInfo &STI) const {
484 using namespace AMDGPU::SDWA;
485
486 uint64_t RegEnc = 0;
487
488 const MCOperand &MO = MI.getOperand(OpNo);
489
490 if (MO.isReg()) {
491 MCRegister Reg = MO.getReg();
492 RegEnc |= MRI.getEncodingValue(Reg);
493 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
495 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
496 }
497 Op = RegEnc;
498 return;
499 } else {
500 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
501 auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI);
502 if (Enc && *Enc != 255) {
503 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
504 return;
505 }
506 }
507
508 llvm_unreachable("Unsupported operand kind");
509}
510
511void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding(
512 const MCInst &MI, unsigned OpNo, APInt &Op,
513 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
514 using namespace AMDGPU::SDWA;
515
516 uint64_t RegEnc = 0;
517
518 const MCOperand &MO = MI.getOperand(OpNo);
519
520 MCRegister Reg = MO.getReg();
521 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
522 RegEnc |= MRI.getEncodingValue(Reg);
523 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
524 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
525 }
526 Op = RegEnc;
527}
528
529void AMDGPUMCCodeEmitter::getAVOperandEncoding(
530 const MCInst &MI, unsigned OpNo, APInt &Op,
531 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
532 MCRegister Reg = MI.getOperand(OpNo).getReg();
533 unsigned Enc = MRI.getEncodingValue(Reg);
534 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
535 bool IsVGPROrAGPR =
537
538 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma
539 // instructions use acc[0:1] modifier bits to distinguish. These bits are
540 // encoded as a virtual 9th bit of the register for these operands.
541 bool IsAGPR = Enc & AMDGPU::HWEncoding::IS_AGPR;
542
543 Op = Idx | (IsVGPROrAGPR << 8) | (IsAGPR << 9);
544}
545
546static bool needsPCRel(const MCExpr *Expr) {
547 switch (Expr->getKind()) {
548 case MCExpr::SymbolRef: {
549 auto *SE = cast<MCSymbolRefExpr>(Expr);
550 MCSymbolRefExpr::VariantKind Kind = SE->getKind();
553 }
554 case MCExpr::Binary: {
555 auto *BE = cast<MCBinaryExpr>(Expr);
556 if (BE->getOpcode() == MCBinaryExpr::Sub)
557 return false;
558 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS());
559 }
560 case MCExpr::Unary:
561 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
562 case MCExpr::Target:
563 case MCExpr::Constant:
564 return false;
565 }
566 llvm_unreachable("invalid kind");
567}
568
569void AMDGPUMCCodeEmitter::getMachineOpValue(const MCInst &MI,
570 const MCOperand &MO, APInt &Op,
572 const MCSubtargetInfo &STI) const {
573 if (MO.isReg()){
574 unsigned Enc = MRI.getEncodingValue(MO.getReg());
575 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
576 bool IsVGPROrAGPR =
578 Op = Idx | (IsVGPROrAGPR << 8);
579 return;
580 }
581 unsigned OpNo = &MO - MI.begin();
582 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
583}
584
585void AMDGPUMCCodeEmitter::getMachineOpValueT16(
586 const MCInst &MI, unsigned OpNo, APInt &Op,
587 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
588 const MCOperand &MO = MI.getOperand(OpNo);
589 if (MO.isReg()) {
590 unsigned Enc = MRI.getEncodingValue(MO.getReg());
591 unsigned Idx = Enc & AMDGPU::HWEncoding::REG_IDX_MASK;
592 bool IsVGPR = Enc & AMDGPU::HWEncoding::IS_VGPR;
593 Op = Idx | (IsVGPR << 8);
594 return;
595 }
596 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
597 // VGPRs include the suffix/op_sel bit in the register encoding, but
598 // immediates and SGPRs include it in src_modifiers. Therefore, copy the
599 // op_sel bit from the src operands into src_modifier operands if Op is
600 // src_modifiers and the corresponding src is a VGPR
601 int SrcMOIdx = -1;
602 assert(OpNo < INT_MAX);
603 if ((int)OpNo == AMDGPU::getNamedOperandIdx(MI.getOpcode(),
604 AMDGPU::OpName::src0_modifiers)) {
605 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
606 int VDstMOIdx =
607 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdst);
608 if (VDstMOIdx != -1) {
609 auto DstReg = MI.getOperand(VDstMOIdx).getReg();
610 if (AMDGPU::isHi16Reg(DstReg, MRI))
612 }
613 } else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
614 MI.getOpcode(), AMDGPU::OpName::src1_modifiers))
615 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src1);
616 else if ((int)OpNo == AMDGPU::getNamedOperandIdx(
617 MI.getOpcode(), AMDGPU::OpName::src2_modifiers))
618 SrcMOIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src2);
619 if (SrcMOIdx == -1)
620 return;
621
622 const MCOperand &SrcMO = MI.getOperand(SrcMOIdx);
623 if (!SrcMO.isReg())
624 return;
625 auto SrcReg = SrcMO.getReg();
626 if (AMDGPU::isSGPR(SrcReg, &MRI))
627 return;
628 if (AMDGPU::isHi16Reg(SrcReg, MRI))
630}
631
632void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128(
633 const MCInst &MI, unsigned OpNo, APInt &Op,
634 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
635 const MCOperand &MO = MI.getOperand(OpNo);
636 if (MO.isReg()) {
637 uint16_t Encoding = MRI.getEncodingValue(MO.getReg());
638 unsigned RegIdx = Encoding & AMDGPU::HWEncoding::REG_IDX_MASK;
639 bool IsHi = Encoding & AMDGPU::HWEncoding::IS_HI16;
640 bool IsVGPR = Encoding & AMDGPU::HWEncoding::IS_VGPR;
641 assert((!IsVGPR || isUInt<7>(RegIdx)) && "VGPR0-VGPR127 expected!");
642 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx;
643 return;
644 }
645 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
646}
647
648void AMDGPUMCCodeEmitter::getMachineOpValueCommon(
649 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op,
650 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
651 int64_t Val;
652 if (MO.isExpr() && MO.getExpr()->evaluateAsAbsolute(Val)) {
653 Op = Val;
654 return;
655 }
656
657 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
658 // FIXME: If this is expression is PCRel or not should not depend on what
659 // the expression looks like. Given that this is just a general expression,
660 // it should probably be FK_Data_4 and whatever is producing
661 //
662 // s_add_u32 s2, s2, (extern_const_addrspace+16
663 //
664 // And expecting a PCRel should instead produce
665 //
666 // .Ltmp1:
667 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1
669 if (needsPCRel(MO.getExpr()))
671 else
672 Kind = FK_Data_4;
673
674 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
675 uint32_t Offset = Desc.getSize();
676 assert(Offset == 4 || Offset == 8);
677
678 Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc()));
679 }
680
681 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
682 if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
683 if (auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI)) {
684 Op = *Enc;
685 return;
686 }
687 } else if (MO.isImm()) {
688 Op = MO.getImm();
689 return;
690 }
691
692 llvm_unreachable("Encoding of this operand type is not supported yet.");
693}
694
695#include "AMDGPUGenMCCodeEmitter.inc"
unsigned const MachineRegisterInfo * MRI
static uint32_t getLit16IntEncoding(uint32_t Val, const MCSubtargetInfo &STI)
static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI)
static uint32_t getLitBF16Encoding(uint16_t Val)
static bool isVCMPX64(const MCInstrDesc &Desc)
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getIntInlineImmEncoding(IntTy Imm)
static bool needsPCRel(const MCExpr *Expr)
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI)
Provides AMDGPU specific target descriptions.
This file implements a class to represent arbitrary precision integral constant values and operations...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
IRTranslator LLVM IR MI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
Definition: APInt.h:78
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
Definition: APInt.cpp:493
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:475
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:200
This class represents an Operation in the Expression.
@ Sub
Subtraction.
Definition: MCExpr.h:518
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
Context object for machine code objects.
Definition: MCContext.h:83
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:414
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
@ Unary
Unary expressions.
Definition: MCExpr.h:40
@ Constant
Constant expressions.
Definition: MCExpr.h:38
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:39
@ Target
Target specific expression.
Definition: MCExpr.h:41
@ Binary
Binary expressions.
Definition: MCExpr.h:37
ExprKind getKind() const
Definition: MCExpr.h:78
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Definition: MCFixup.h:87
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:185
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:37
int64_t getImm() const
Definition: MCInst.h:81
bool isImm() const
Definition: MCInst.h:63
bool isReg() const
Definition: MCInst.h:62
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:70
bool isDFPImm() const
Definition: MCInst.h:65
const MCExpr * getExpr() const
Definition: MCInst.h:115
bool isExpr() const
Definition: MCInst.h:66
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void push_back(const T &Elt)
Definition: SmallVector.h:413
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isSGPR(MCRegister Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
bool isHi16Reg(MCRegister Reg, const MCRegisterInfo &MRI)
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
@ fixup_si_sopp_br
16-bit PC relative fixup for SOPP branch instructions.
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this an AMDGPU specific source operand? These include registers, inline constants,...
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, uint64_t NamedIdx)
std::optional< unsigned > getInlineEncodingV2F16(uint32_t Literal)
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition: SIDefines.h:234
@ OPERAND_REG_IMM_INT64
Definition: SIDefines.h:201
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:211
@ OPERAND_REG_INLINE_C_V2INT32
Definition: SIDefines.h:227
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:223
@ OPERAND_REG_INLINE_C_BF16
Definition: SIDefines.h:220
@ OPERAND_REG_INLINE_C_V2BF16
Definition: SIDefines.h:225
@ OPERAND_REG_IMM_V2INT16
Definition: SIDefines.h:212
@ OPERAND_REG_IMM_BF16
Definition: SIDefines.h:205
@ OPERAND_REG_INLINE_AC_V2FP16
Definition: SIDefines.h:246
@ OPERAND_REG_IMM_INT32
Operands with register or 32-bit immediate.
Definition: SIDefines.h:200
@ OPERAND_REG_IMM_V2BF16
Definition: SIDefines.h:210
@ OPERAND_REG_IMM_BF16_DEFERRED
Definition: SIDefines.h:207
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:206
@ OPERAND_REG_INLINE_C_INT64
Definition: SIDefines.h:219
@ OPERAND_REG_INLINE_AC_BF16
Definition: SIDefines.h:240
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition: SIDefines.h:217
@ OPERAND_REG_INLINE_AC_INT16
Operands with an AccVGPR register or inline constant.
Definition: SIDefines.h:238
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:204
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:226
@ OPERAND_REG_INLINE_AC_V2INT16
Definition: SIDefines.h:244
@ OPERAND_REG_INLINE_AC_FP16
Definition: SIDefines.h:241
@ OPERAND_REG_INLINE_AC_INT32
Definition: SIDefines.h:239
@ OPERAND_REG_INLINE_AC_FP32
Definition: SIDefines.h:242
@ OPERAND_REG_INLINE_AC_V2BF16
Definition: SIDefines.h:245
@ OPERAND_REG_IMM_V2INT32
Definition: SIDefines.h:213
@ OPERAND_REG_IMM_FP32
Definition: SIDefines.h:203
@ OPERAND_REG_INLINE_C_FP32
Definition: SIDefines.h:222
@ OPERAND_REG_INLINE_C_INT32
Definition: SIDefines.h:218
@ OPERAND_REG_INLINE_C_V2INT16
Definition: SIDefines.h:224
@ OPERAND_REG_IMM_V2FP32
Definition: SIDefines.h:214
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:243
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:221
@ OPERAND_REG_IMM_INT16
Definition: SIDefines.h:202
@ OPERAND_REG_INLINE_C_V2FP32
Definition: SIDefines.h:228
@ OPERAND_INLINE_SPLIT_BARRIER_INT32
Definition: SIDefines.h:231
@ OPERAND_REG_IMM_FP32_DEFERRED
Definition: SIDefines.h:209
@ OPERAND_REG_IMM_FP16_DEFERRED
Definition: SIDefines.h:208
std::optional< unsigned > getInlineEncodingV2I16(uint32_t Literal)
bool isVI(const MCSubtargetInfo &STI)
MCRegister mc2PseudoReg(MCRegister Reg)
Convert hardware register Reg to a pseudo register.
std::optional< unsigned > getInlineEncodingV2BF16(uint32_t Literal)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
constexpr uint32_t Hi_32(uint64_t Value)
Return the high 32 bits of a 64 bit value.
Definition: MathExtras.h:154
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
MCCodeEmitter * createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Description of the encoding of one expression Op.