LLVM 17.0.0git
SIMCCodeEmitter.cpp
Go to the documentation of this file.
1//===-- SIMCCodeEmitter.cpp - SI Code Emitter -----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The SI code emitter produces machine code that can be executed
11/// directly on the GPU device.
12//
13//===----------------------------------------------------------------------===//
14
18#include "SIDefines.h"
20#include "llvm/ADT/APInt.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInstrInfo.h"
29#include <optional>
30
31using namespace llvm;
32
33namespace {
34
35class SIMCCodeEmitter : public AMDGPUMCCodeEmitter {
36 const MCRegisterInfo &MRI;
37
38 /// Encode an fp or int literal
39 std::optional<uint32_t> getLitEncoding(const MCOperand &MO,
40 const MCOperandInfo &OpInfo,
41 const MCSubtargetInfo &STI) const;
42
43public:
44 SIMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
45 : AMDGPUMCCodeEmitter(mcii), MRI(*ctx.getRegisterInfo()) {}
46 SIMCCodeEmitter(const SIMCCodeEmitter &) = delete;
47 SIMCCodeEmitter &operator=(const SIMCCodeEmitter &) = delete;
48
49 /// Encode the instruction and write it to the OS.
52 const MCSubtargetInfo &STI) const override;
53
54 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op,
56 const MCSubtargetInfo &STI) const override;
57
58 /// Use a fixup to encode the simm16 field for SOPP branch
59 /// instructions.
60 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
62 const MCSubtargetInfo &STI) const override;
63
64 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
66 const MCSubtargetInfo &STI) const override;
67
68 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
70 const MCSubtargetInfo &STI) const override;
71
72 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
74 const MCSubtargetInfo &STI) const override;
75
76 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
78 const MCSubtargetInfo &STI) const override;
79
80private:
81 uint64_t getImplicitOpSelHiEncoding(int Opcode) const;
82 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO,
83 unsigned OpNo, APInt &Op,
85 const MCSubtargetInfo &STI) const;
86};
87
88} // end anonymous namespace
89
91 MCContext &Ctx) {
92 return new SIMCCodeEmitter(MCII, Ctx);
93}
94
95// Returns the encoding value to use if the given integer is an integer inline
96// immediate value, or 0 if it is not.
97template <typename IntTy>
99 if (Imm >= 0 && Imm <= 64)
100 return 128 + Imm;
101
102 if (Imm >= -16 && Imm <= -1)
103 return 192 + std::abs(Imm);
104
105 return 0;
106}
107
109 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
110 return IntImm == 0 ? 255 : IntImm;
111}
112
114 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
115 if (IntImm != 0)
116 return IntImm;
117
118 if (Val == 0x3800) // 0.5
119 return 240;
120
121 if (Val == 0xB800) // -0.5
122 return 241;
123
124 if (Val == 0x3C00) // 1.0
125 return 242;
126
127 if (Val == 0xBC00) // -1.0
128 return 243;
129
130 if (Val == 0x4000) // 2.0
131 return 244;
132
133 if (Val == 0xC000) // -2.0
134 return 245;
135
136 if (Val == 0x4400) // 4.0
137 return 246;
138
139 if (Val == 0xC400) // -4.0
140 return 247;
141
142 if (Val == 0x3118 && // 1.0 / (2.0 * pi)
143 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
144 return 248;
145
146 return 255;
147}
148
150 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
151 if (IntImm != 0)
152 return IntImm;
153
154 if (Val == llvm::bit_cast<uint32_t>(0.5f))
155 return 240;
156
157 if (Val == llvm::bit_cast<uint32_t>(-0.5f))
158 return 241;
159
160 if (Val == llvm::bit_cast<uint32_t>(1.0f))
161 return 242;
162
163 if (Val == llvm::bit_cast<uint32_t>(-1.0f))
164 return 243;
165
166 if (Val == llvm::bit_cast<uint32_t>(2.0f))
167 return 244;
168
169 if (Val == llvm::bit_cast<uint32_t>(-2.0f))
170 return 245;
171
172 if (Val == llvm::bit_cast<uint32_t>(4.0f))
173 return 246;
174
175 if (Val == llvm::bit_cast<uint32_t>(-4.0f))
176 return 247;
177
178 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
179 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
180 return 248;
181
182 return 255;
183}
184
186 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
187 if (IntImm != 0)
188 return IntImm;
189
190 if (Val == llvm::bit_cast<uint64_t>(0.5))
191 return 240;
192
193 if (Val == llvm::bit_cast<uint64_t>(-0.5))
194 return 241;
195
196 if (Val == llvm::bit_cast<uint64_t>(1.0))
197 return 242;
198
199 if (Val == llvm::bit_cast<uint64_t>(-1.0))
200 return 243;
201
202 if (Val == llvm::bit_cast<uint64_t>(2.0))
203 return 244;
204
205 if (Val == llvm::bit_cast<uint64_t>(-2.0))
206 return 245;
207
208 if (Val == llvm::bit_cast<uint64_t>(4.0))
209 return 246;
210
211 if (Val == llvm::bit_cast<uint64_t>(-4.0))
212 return 247;
213
214 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
215 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
216 return 248;
217
218 return 255;
219}
220
221std::optional<uint32_t>
222SIMCCodeEmitter::getLitEncoding(const MCOperand &MO,
223 const MCOperandInfo &OpInfo,
224 const MCSubtargetInfo &STI) const {
225 int64_t Imm;
226 if (MO.isExpr()) {
227 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
228 if (!C)
229 return 255;
230
231 Imm = C->getValue();
232 } else {
233
234 assert(!MO.isDFPImm());
235
236 if (!MO.isImm())
237 return {};
238
239 Imm = MO.getImm();
240 }
241
242 switch (OpInfo.OperandType) {
243 case AMDGPU::OPERAND_REG_IMM_INT32:
244 case AMDGPU::OPERAND_REG_IMM_FP32:
245 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
246 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
247 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
248 case AMDGPU::OPERAND_REG_INLINE_AC_INT32:
249 case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
250 case AMDGPU::OPERAND_REG_IMM_V2INT32:
251 case AMDGPU::OPERAND_REG_IMM_V2FP32:
252 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32:
253 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
254 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
255
256 case AMDGPU::OPERAND_REG_IMM_INT64:
257 case AMDGPU::OPERAND_REG_IMM_FP64:
258 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
259 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
260 case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
261 return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
262
263 case AMDGPU::OPERAND_REG_IMM_INT16:
264 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
265 case AMDGPU::OPERAND_REG_INLINE_AC_INT16:
266 return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
267 case AMDGPU::OPERAND_REG_IMM_FP16:
268 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
269 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
270 case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
271 // FIXME Is this correct? What do inline immediates do on SI for f16 src
272 // which does not have f16 support?
273 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
274 case AMDGPU::OPERAND_REG_IMM_V2INT16:
275 case AMDGPU::OPERAND_REG_IMM_V2FP16: {
276 if (!isUInt<16>(Imm) && STI.hasFeature(AMDGPU::FeatureVOP3Literal))
277 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
278 if (OpInfo.OperandType == AMDGPU::OPERAND_REG_IMM_V2FP16)
279 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
280 [[fallthrough]];
281 }
282 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
283 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
284 return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
285 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
286 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: {
287 uint16_t Lo16 = static_cast<uint16_t>(Imm);
288 uint32_t Encoding = getLit16Encoding(Lo16, STI);
289 return Encoding;
290 }
291 case AMDGPU::OPERAND_KIMM32:
292 case AMDGPU::OPERAND_KIMM16:
293 return MO.getImm();
294 default:
295 llvm_unreachable("invalid operand size");
296 }
297}
298
299uint64_t SIMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
300 using namespace AMDGPU::VOP3PEncoding;
301 using namespace AMDGPU::OpName;
302
303 if (AMDGPU::hasNamedOperand(Opcode, op_sel_hi)) {
304 if (AMDGPU::hasNamedOperand(Opcode, src2))
305 return 0;
306 if (AMDGPU::hasNamedOperand(Opcode, src1))
307 return OP_SEL_HI_2;
308 if (AMDGPU::hasNamedOperand(Opcode, src0))
309 return OP_SEL_HI_1 | OP_SEL_HI_2;
310 }
312}
313
314static bool isVCMPX64(const MCInstrDesc &Desc) {
315 return (Desc.TSFlags & SIInstrFlags::VOP3) &&
316 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
317}
318
319void SIMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
321 const MCSubtargetInfo &STI) const {
322 int Opcode = MI.getOpcode();
323 APInt Encoding, Scratch;
324 getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI);
325 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
326 unsigned bytes = Desc.getSize();
327
328 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions.
329 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel.
330 if ((Desc.TSFlags & SIInstrFlags::VOP3P) ||
331 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
332 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
333 Encoding |= getImplicitOpSelHiEncoding(Opcode);
334 }
335
336 // GFX10+ v_cmpx opcodes promoted to VOP3 have implied dst=EXEC.
337 // Documentation requires dst to be encoded as EXEC (0x7E),
338 // but it looks like the actual value encoded for dst operand
339 // is ignored by HW. It was decided to define dst as "do not care"
340 // in td files to allow disassembler accept any dst value.
341 // However, dst is encoded as EXEC for compatibility with SP3.
342 if (AMDGPU::isGFX10Plus(STI) && isVCMPX64(Desc)) {
343 assert((Encoding & 0xFF) == 0);
344 Encoding |= MRI.getEncodingValue(AMDGPU::EXEC_LO);
345 }
346
347 for (unsigned i = 0; i < bytes; i++) {
348 OS.write((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i));
349 }
350
351 // NSA encoding.
352 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
353 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
354 AMDGPU::OpName::vaddr0);
355 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
356 AMDGPU::OpName::srsrc);
357 assert(vaddr0 >= 0 && srsrc > vaddr0);
358 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
359 unsigned NumPadding = (-NumExtraAddrs) & 3;
360
361 for (unsigned i = 0; i < NumExtraAddrs; ++i) {
362 getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
363 STI);
364 OS.write((uint8_t)Encoding.getLimitedValue());
365 }
366 for (unsigned i = 0; i < NumPadding; ++i)
367 OS.write(0);
368 }
369
370 if ((bytes > 8 && STI.hasFeature(AMDGPU::FeatureVOP3Literal)) ||
371 (bytes > 4 && !STI.hasFeature(AMDGPU::FeatureVOP3Literal)))
372 return;
373
374 // Do not print literals from SISrc Operands for insts with mandatory literals
375 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm))
376 return;
377
378 // Check for additional literals
379 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
380
381 // Check if this operand should be encoded as [SV]Src
382 if (!AMDGPU::isSISrcOperand(Desc, i))
383 continue;
384
385 // Is this operand a literal immediate?
386 const MCOperand &Op = MI.getOperand(i);
387 auto Enc = getLitEncoding(Op, Desc.operands()[i], STI);
388 if (!Enc || *Enc != 255)
389 continue;
390
391 // Yes! Encode it
392 int64_t Imm = 0;
393
394 if (Op.isImm())
395 Imm = Op.getImm();
396 else if (Op.isExpr()) {
397 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
398 Imm = C->getValue();
399
400 } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
401 llvm_unreachable("Must be immediate or expr");
402
403 for (unsigned j = 0; j < 4; j++) {
404 OS.write((uint8_t) ((Imm >> (8 * j)) & 0xff));
405 }
406
407 // Only one literal value allowed
408 break;
409 }
410}
411
412void SIMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
413 APInt &Op,
415 const MCSubtargetInfo &STI) const {
416 const MCOperand &MO = MI.getOperand(OpNo);
417
418 if (MO.isExpr()) {
419 const MCExpr *Expr = MO.getExpr();
420 MCFixupKind Kind = (MCFixupKind)AMDGPU::fixup_si_sopp_br;
421 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
422 Op = APInt::getZero(96);
423 } else {
424 getMachineOpValue(MI, MO, Op, Fixups, STI);
425 }
426}
427
428void SIMCCodeEmitter::getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo,
429 APInt &Op,
431 const MCSubtargetInfo &STI) const {
432 auto Offset = MI.getOperand(OpNo).getImm();
433 // VI only supports 20-bit unsigned offsets.
434 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
435 Op = Offset;
436}
437
438void SIMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
439 APInt &Op,
441 const MCSubtargetInfo &STI) const {
442 using namespace AMDGPU::SDWA;
443
444 uint64_t RegEnc = 0;
445
446 const MCOperand &MO = MI.getOperand(OpNo);
447
448 if (MO.isReg()) {
449 unsigned Reg = MO.getReg();
450 RegEnc |= MRI.getEncodingValue(Reg);
451 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
452 if (AMDGPU::isSGPR(AMDGPU::mc2PseudoReg(Reg), &MRI)) {
453 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
454 }
455 Op = RegEnc;
456 return;
457 } else {
458 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
459 auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI);
460 if (Enc && *Enc != 255) {
461 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
462 return;
463 }
464 }
465
466 llvm_unreachable("Unsupported operand kind");
467}
468
469void SIMCCodeEmitter::getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo,
470 APInt &Op,
472 const MCSubtargetInfo &STI) const {
473 using namespace AMDGPU::SDWA;
474
475 uint64_t RegEnc = 0;
476
477 const MCOperand &MO = MI.getOperand(OpNo);
478
479 unsigned Reg = MO.getReg();
480 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
481 RegEnc |= MRI.getEncodingValue(Reg);
482 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
483 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
484 }
485 Op = RegEnc;
486}
487
488void SIMCCodeEmitter::getAVOperandEncoding(const MCInst &MI, unsigned OpNo,
489 APInt &Op,
491 const MCSubtargetInfo &STI) const {
492 unsigned Reg = MI.getOperand(OpNo).getReg();
493 uint64_t Enc = MRI.getEncodingValue(Reg);
494
495 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma
496 // instructions use acc[0:1] modifier bits to distinguish. These bits are
497 // encoded as a virtual 9th bit of the register for these operands.
498 if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) ||
499 MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) ||
500 MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) ||
501 MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) ||
502 MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) ||
503 MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) ||
504 MRI.getRegClass(AMDGPU::AReg_224RegClassID).contains(Reg) ||
505 MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) ||
506 MRI.getRegClass(AMDGPU::AReg_288RegClassID).contains(Reg) ||
507 MRI.getRegClass(AMDGPU::AReg_320RegClassID).contains(Reg) ||
508 MRI.getRegClass(AMDGPU::AReg_352RegClassID).contains(Reg) ||
509 MRI.getRegClass(AMDGPU::AReg_384RegClassID).contains(Reg) ||
510 MRI.getRegClass(AMDGPU::AReg_512RegClassID).contains(Reg) ||
511 MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg))
512 Enc |= 512;
513
514 Op = Enc;
515}
516
517static bool needsPCRel(const MCExpr *Expr) {
518 switch (Expr->getKind()) {
519 case MCExpr::SymbolRef: {
520 auto *SE = cast<MCSymbolRefExpr>(Expr);
521 MCSymbolRefExpr::VariantKind Kind = SE->getKind();
524 }
525 case MCExpr::Binary: {
526 auto *BE = cast<MCBinaryExpr>(Expr);
527 if (BE->getOpcode() == MCBinaryExpr::Sub)
528 return false;
529 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS());
530 }
531 case MCExpr::Unary:
532 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
533 case MCExpr::Target:
534 case MCExpr::Constant:
535 return false;
536 }
537 llvm_unreachable("invalid kind");
538}
539
540void SIMCCodeEmitter::getMachineOpValue(const MCInst &MI,
541 const MCOperand &MO, APInt &Op,
543 const MCSubtargetInfo &STI) const {
544 if (MO.isReg()){
545 Op = MRI.getEncodingValue(MO.getReg());
546 return;
547 }
548 unsigned OpNo = &MO - MI.begin();
549 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
550}
551
552void SIMCCodeEmitter::getMachineOpValueCommon(
553 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op,
554 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
555
556 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
557 // FIXME: If this is expression is PCRel or not should not depend on what
558 // the expression looks like. Given that this is just a general expression,
559 // it should probably be FK_Data_4 and whatever is producing
560 //
561 // s_add_u32 s2, s2, (extern_const_addrspace+16
562 //
563 // And expecting a PCRel should instead produce
564 //
565 // .Ltmp1:
566 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1
568 if (needsPCRel(MO.getExpr()))
570 else
571 Kind = FK_Data_4;
572
573 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
574 uint32_t Offset = Desc.getSize();
575 assert(Offset == 4 || Offset == 8);
576
577 Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc()));
578 }
579
580 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
581 if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
582 if (auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI)) {
583 Op = *Enc;
584 return;
585 }
586 } else if (MO.isImm()) {
587 Op = MO.getImm();
588 return;
589 }
590
591 llvm_unreachable("Encoding of this operand type is not supported yet.");
592}
593
594#include "AMDGPUGenMCCodeEmitter.inc"
unsigned const MachineRegisterInfo * MRI
CodeEmitter interface for SI codegen.
Provides AMDGPU specific target descriptions.
This file implements a class to represent arbitrary precision integral constant values and operations...
IRTranslator LLVM IR MI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static uint32_t getLit16IntEncoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI)
static bool isVCMPX64(const MCInstrDesc &Desc)
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getIntInlineImmEncoding(IntTy Imm)
static bool needsPCRel(const MCExpr *Expr)
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI)
raw_pwrite_stream & OS
virtual void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
virtual void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
virtual void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
virtual void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
virtual void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
virtual void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Class for arbitrary precision integers.
Definition: APInt.h:75
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
Definition: APInt.cpp:480
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:463
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:177
@ Sub
Subtraction.
Definition: MCExpr.h:506
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
virtual void encodeInstruction(const MCInst &Inst, raw_ostream &OS, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
EncodeInstruction - Encode the given Inst to bytes on the output stream OS.
MCCodeEmitter & operator=(const MCCodeEmitter &)=delete
Context object for machine code objects.
Definition: MCContext.h:76
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
@ Unary
Unary expressions.
Definition: MCExpr.h:41
@ Constant
Constant expressions.
Definition: MCExpr.h:39
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:40
@ Target
Target specific expression.
Definition: MCExpr.h:42
@ Binary
Binary expressions.
Definition: MCExpr.h:38
ExprKind getKind() const
Definition: MCExpr.h:81
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Definition: MCFixup.h:87
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
bool hasImplicitDefOfPhysReg(unsigned Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
Definition: MCInstrDesc.cpp:32
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
Definition: MCInstrDesc.h:605
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
int64_t getImm() const
Definition: MCInst.h:80
bool isImm() const
Definition: MCInst.h:62
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
bool isReg() const
Definition: MCInst.h:61
bool isDFPImm() const
Definition: MCInst.h:64
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & write(unsigned char C)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:406
MCCodeEmitter * createSIMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:30
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25