LLVM 18.0.0git
AMDGPUMCCodeEmitter.cpp
Go to the documentation of this file.
1//===-- AMDGPUMCCodeEmitter.cpp - AMDGPU Code Emitter ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU code emitter produces machine code that can be executed
11/// directly on the GPU device.
12//
13//===----------------------------------------------------------------------===//
14
17#include "SIDefines.h"
19#include "llvm/ADT/APInt.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInstrInfo.h"
29#include <optional>
30
31using namespace llvm;
32
33namespace {
34
35class AMDGPUMCCodeEmitter : public MCCodeEmitter {
36 const MCRegisterInfo &MRI;
37 const MCInstrInfo &MCII;
38
39public:
40 AMDGPUMCCodeEmitter(const MCInstrInfo &MCII, const MCRegisterInfo &MRI)
41 : MRI(MRI), MCII(MCII) {}
42
43 /// Encode the instruction and write it to the OS.
46 const MCSubtargetInfo &STI) const override;
47
48 void getMachineOpValue(const MCInst &MI, const MCOperand &MO, APInt &Op,
50 const MCSubtargetInfo &STI) const;
51
52 void getMachineOpValueT16(const MCInst &MI, unsigned OpNo, APInt &Op,
54 const MCSubtargetInfo &STI) const;
55
56 void getMachineOpValueT16Lo128(const MCInst &MI, unsigned OpNo, APInt &Op,
58 const MCSubtargetInfo &STI) const;
59
60 /// Use a fixup to encode the simm16 field for SOPP branch
61 /// instructions.
62 void getSOPPBrEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
64 const MCSubtargetInfo &STI) const;
65
66 void getSMEMOffsetEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
68 const MCSubtargetInfo &STI) const;
69
70 void getSDWASrcEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
72 const MCSubtargetInfo &STI) const;
73
74 void getSDWAVopcDstEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
76 const MCSubtargetInfo &STI) const;
77
78 void getAVOperandEncoding(const MCInst &MI, unsigned OpNo, APInt &Op,
80 const MCSubtargetInfo &STI) const;
81
82private:
83 uint64_t getImplicitOpSelHiEncoding(int Opcode) const;
84 void getMachineOpValueCommon(const MCInst &MI, const MCOperand &MO,
85 unsigned OpNo, APInt &Op,
87 const MCSubtargetInfo &STI) const;
88
89 /// Encode an fp or int literal.
90 std::optional<uint32_t> getLitEncoding(const MCOperand &MO,
91 const MCOperandInfo &OpInfo,
92 const MCSubtargetInfo &STI) const;
93
94 void getBinaryCodeForInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
95 APInt &Inst, APInt &Scratch,
96 const MCSubtargetInfo &STI) const;
97};
98
99} // end anonymous namespace
100
102 MCContext &Ctx) {
103 return new AMDGPUMCCodeEmitter(MCII, *Ctx.getRegisterInfo());
104}
105
106// Returns the encoding value to use if the given integer is an integer inline
107// immediate value, or 0 if it is not.
108template <typename IntTy>
110 if (Imm >= 0 && Imm <= 64)
111 return 128 + Imm;
112
113 if (Imm >= -16 && Imm <= -1)
114 return 192 + std::abs(Imm);
115
116 return 0;
117}
118
120 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
121 return IntImm == 0 ? 255 : IntImm;
122}
123
125 uint16_t IntImm = getIntInlineImmEncoding(static_cast<int16_t>(Val));
126 if (IntImm != 0)
127 return IntImm;
128
129 if (Val == 0x3800) // 0.5
130 return 240;
131
132 if (Val == 0xB800) // -0.5
133 return 241;
134
135 if (Val == 0x3C00) // 1.0
136 return 242;
137
138 if (Val == 0xBC00) // -1.0
139 return 243;
140
141 if (Val == 0x4000) // 2.0
142 return 244;
143
144 if (Val == 0xC000) // -2.0
145 return 245;
146
147 if (Val == 0x4400) // 4.0
148 return 246;
149
150 if (Val == 0xC400) // -4.0
151 return 247;
152
153 if (Val == 0x3118 && // 1.0 / (2.0 * pi)
154 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
155 return 248;
156
157 return 255;
158}
159
161 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int32_t>(Val));
162 if (IntImm != 0)
163 return IntImm;
164
165 if (Val == llvm::bit_cast<uint32_t>(0.5f))
166 return 240;
167
168 if (Val == llvm::bit_cast<uint32_t>(-0.5f))
169 return 241;
170
171 if (Val == llvm::bit_cast<uint32_t>(1.0f))
172 return 242;
173
174 if (Val == llvm::bit_cast<uint32_t>(-1.0f))
175 return 243;
176
177 if (Val == llvm::bit_cast<uint32_t>(2.0f))
178 return 244;
179
180 if (Val == llvm::bit_cast<uint32_t>(-2.0f))
181 return 245;
182
183 if (Val == llvm::bit_cast<uint32_t>(4.0f))
184 return 246;
185
186 if (Val == llvm::bit_cast<uint32_t>(-4.0f))
187 return 247;
188
189 if (Val == 0x3e22f983 && // 1.0 / (2.0 * pi)
190 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
191 return 248;
192
193 return 255;
194}
195
197 uint32_t IntImm = getIntInlineImmEncoding(static_cast<int64_t>(Val));
198 if (IntImm != 0)
199 return IntImm;
200
201 if (Val == llvm::bit_cast<uint64_t>(0.5))
202 return 240;
203
204 if (Val == llvm::bit_cast<uint64_t>(-0.5))
205 return 241;
206
207 if (Val == llvm::bit_cast<uint64_t>(1.0))
208 return 242;
209
210 if (Val == llvm::bit_cast<uint64_t>(-1.0))
211 return 243;
212
213 if (Val == llvm::bit_cast<uint64_t>(2.0))
214 return 244;
215
216 if (Val == llvm::bit_cast<uint64_t>(-2.0))
217 return 245;
218
219 if (Val == llvm::bit_cast<uint64_t>(4.0))
220 return 246;
221
222 if (Val == llvm::bit_cast<uint64_t>(-4.0))
223 return 247;
224
225 if (Val == 0x3fc45f306dc9c882 && // 1.0 / (2.0 * pi)
226 STI.hasFeature(AMDGPU::FeatureInv2PiInlineImm))
227 return 248;
228
229 return 255;
230}
231
232std::optional<uint32_t>
233AMDGPUMCCodeEmitter::getLitEncoding(const MCOperand &MO,
234 const MCOperandInfo &OpInfo,
235 const MCSubtargetInfo &STI) const {
236 int64_t Imm;
237 if (MO.isExpr()) {
238 const auto *C = dyn_cast<MCConstantExpr>(MO.getExpr());
239 if (!C)
240 return 255;
241
242 Imm = C->getValue();
243 } else {
244
245 assert(!MO.isDFPImm());
246
247 if (!MO.isImm())
248 return {};
249
250 Imm = MO.getImm();
251 }
252
253 switch (OpInfo.OperandType) {
265 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
266
272 return getLit64Encoding(static_cast<uint64_t>(Imm), STI);
273
277 return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
282 // FIXME Is this correct? What do inline immediates do on SI for f16 src
283 // which does not have f16 support?
284 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
287 if (!isUInt<16>(Imm) && STI.hasFeature(AMDGPU::FeatureVOP3Literal))
288 return getLit32Encoding(static_cast<uint32_t>(Imm), STI);
290 return getLit16Encoding(static_cast<uint16_t>(Imm), STI);
291 [[fallthrough]];
292 }
295 return getLit16IntEncoding(static_cast<uint16_t>(Imm), STI);
298 uint16_t Lo16 = static_cast<uint16_t>(Imm);
299 uint32_t Encoding = getLit16Encoding(Lo16, STI);
300 return Encoding;
301 }
304 return MO.getImm();
305 default:
306 llvm_unreachable("invalid operand size");
307 }
308}
309
310uint64_t AMDGPUMCCodeEmitter::getImplicitOpSelHiEncoding(int Opcode) const {
311 using namespace AMDGPU::VOP3PEncoding;
312 using namespace AMDGPU::OpName;
313
314 if (AMDGPU::hasNamedOperand(Opcode, op_sel_hi)) {
315 if (AMDGPU::hasNamedOperand(Opcode, src2))
316 return 0;
317 if (AMDGPU::hasNamedOperand(Opcode, src1))
318 return OP_SEL_HI_2;
319 if (AMDGPU::hasNamedOperand(Opcode, src0))
320 return OP_SEL_HI_1 | OP_SEL_HI_2;
321 }
323}
324
325static bool isVCMPX64(const MCInstrDesc &Desc) {
326 return (Desc.TSFlags & SIInstrFlags::VOP3) &&
327 Desc.hasImplicitDefOfPhysReg(AMDGPU::EXEC);
328}
329
330void AMDGPUMCCodeEmitter::encodeInstruction(const MCInst &MI,
333 const MCSubtargetInfo &STI) const {
334 int Opcode = MI.getOpcode();
335 APInt Encoding, Scratch;
336 getBinaryCodeForInstr(MI, Fixups, Encoding, Scratch, STI);
337 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
338 unsigned bytes = Desc.getSize();
339
340 // Set unused op_sel_hi bits to 1 for VOP3P and MAI instructions.
341 // Note that accvgpr_read/write are MAI, have src0, but do not use op_sel.
342 if ((Desc.TSFlags & SIInstrFlags::VOP3P) ||
343 Opcode == AMDGPU::V_ACCVGPR_READ_B32_vi ||
344 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_vi) {
345 Encoding |= getImplicitOpSelHiEncoding(Opcode);
346 }
347
348 // GFX10+ v_cmpx opcodes promoted to VOP3 have implied dst=EXEC.
349 // Documentation requires dst to be encoded as EXEC (0x7E),
350 // but it looks like the actual value encoded for dst operand
351 // is ignored by HW. It was decided to define dst as "do not care"
352 // in td files to allow disassembler accept any dst value.
353 // However, dst is encoded as EXEC for compatibility with SP3.
354 if (AMDGPU::isGFX10Plus(STI) && isVCMPX64(Desc)) {
355 assert((Encoding & 0xFF) == 0);
356 Encoding |= MRI.getEncodingValue(AMDGPU::EXEC_LO);
357 }
358
359 for (unsigned i = 0; i < bytes; i++) {
360 CB.push_back((uint8_t)Encoding.extractBitsAsZExtValue(8, 8 * i));
361 }
362
363 // NSA encoding.
364 if (AMDGPU::isGFX10Plus(STI) && Desc.TSFlags & SIInstrFlags::MIMG) {
365 int vaddr0 = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
366 AMDGPU::OpName::vaddr0);
367 int srsrc = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
368 AMDGPU::OpName::srsrc);
369 assert(vaddr0 >= 0 && srsrc > vaddr0);
370 unsigned NumExtraAddrs = srsrc - vaddr0 - 1;
371 unsigned NumPadding = (-NumExtraAddrs) & 3;
372
373 for (unsigned i = 0; i < NumExtraAddrs; ++i) {
374 getMachineOpValue(MI, MI.getOperand(vaddr0 + 1 + i), Encoding, Fixups,
375 STI);
376 CB.push_back((uint8_t)Encoding.getLimitedValue());
377 }
378 CB.append(NumPadding, 0);
379 }
380
381 if ((bytes > 8 && STI.hasFeature(AMDGPU::FeatureVOP3Literal)) ||
382 (bytes > 4 && !STI.hasFeature(AMDGPU::FeatureVOP3Literal)))
383 return;
384
385 // Do not print literals from SISrc Operands for insts with mandatory literals
386 if (AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::imm))
387 return;
388
389 // Check for additional literals
390 for (unsigned i = 0, e = Desc.getNumOperands(); i < e; ++i) {
391
392 // Check if this operand should be encoded as [SV]Src
394 continue;
395
396 // Is this operand a literal immediate?
397 const MCOperand &Op = MI.getOperand(i);
398 auto Enc = getLitEncoding(Op, Desc.operands()[i], STI);
399 if (!Enc || *Enc != 255)
400 continue;
401
402 // Yes! Encode it
403 int64_t Imm = 0;
404
405 if (Op.isImm())
406 Imm = Op.getImm();
407 else if (Op.isExpr()) {
408 if (const auto *C = dyn_cast<MCConstantExpr>(Op.getExpr()))
409 Imm = C->getValue();
410
411 } else if (!Op.isExpr()) // Exprs will be replaced with a fixup value.
412 llvm_unreachable("Must be immediate or expr");
413
414 support::endian::write<uint32_t>(CB, Imm, support::endianness::little);
415
416 // Only one literal value allowed
417 break;
418 }
419}
420
421void AMDGPUMCCodeEmitter::getSOPPBrEncoding(const MCInst &MI, unsigned OpNo,
422 APInt &Op,
424 const MCSubtargetInfo &STI) const {
425 const MCOperand &MO = MI.getOperand(OpNo);
426
427 if (MO.isExpr()) {
428 const MCExpr *Expr = MO.getExpr();
430 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
431 Op = APInt::getZero(96);
432 } else {
433 getMachineOpValue(MI, MO, Op, Fixups, STI);
434 }
435}
436
437void AMDGPUMCCodeEmitter::getSMEMOffsetEncoding(
438 const MCInst &MI, unsigned OpNo, APInt &Op,
439 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
440 auto Offset = MI.getOperand(OpNo).getImm();
441 // VI only supports 20-bit unsigned offsets.
442 assert(!AMDGPU::isVI(STI) || isUInt<20>(Offset));
443 Op = Offset;
444}
445
446void AMDGPUMCCodeEmitter::getSDWASrcEncoding(const MCInst &MI, unsigned OpNo,
447 APInt &Op,
449 const MCSubtargetInfo &STI) const {
450 using namespace AMDGPU::SDWA;
451
452 uint64_t RegEnc = 0;
453
454 const MCOperand &MO = MI.getOperand(OpNo);
455
456 if (MO.isReg()) {
457 unsigned Reg = MO.getReg();
458 RegEnc |= MRI.getEncodingValue(Reg);
459 RegEnc &= SDWA9EncValues::SRC_VGPR_MASK;
461 RegEnc |= SDWA9EncValues::SRC_SGPR_MASK;
462 }
463 Op = RegEnc;
464 return;
465 } else {
466 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
467 auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI);
468 if (Enc && *Enc != 255) {
469 Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK;
470 return;
471 }
472 }
473
474 llvm_unreachable("Unsupported operand kind");
475}
476
477void AMDGPUMCCodeEmitter::getSDWAVopcDstEncoding(
478 const MCInst &MI, unsigned OpNo, APInt &Op,
479 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
480 using namespace AMDGPU::SDWA;
481
482 uint64_t RegEnc = 0;
483
484 const MCOperand &MO = MI.getOperand(OpNo);
485
486 unsigned Reg = MO.getReg();
487 if (Reg != AMDGPU::VCC && Reg != AMDGPU::VCC_LO) {
488 RegEnc |= MRI.getEncodingValue(Reg);
489 RegEnc &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
490 RegEnc |= SDWA9EncValues::VOPC_DST_VCC_MASK;
491 }
492 Op = RegEnc;
493}
494
495void AMDGPUMCCodeEmitter::getAVOperandEncoding(
496 const MCInst &MI, unsigned OpNo, APInt &Op,
497 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
498 unsigned Reg = MI.getOperand(OpNo).getReg();
499 uint64_t Enc = MRI.getEncodingValue(Reg);
500
501 // VGPR and AGPR have the same encoding, but SrcA and SrcB operands of mfma
502 // instructions use acc[0:1] modifier bits to distinguish. These bits are
503 // encoded as a virtual 9th bit of the register for these operands.
504 if (MRI.getRegClass(AMDGPU::AGPR_32RegClassID).contains(Reg) ||
505 MRI.getRegClass(AMDGPU::AReg_64RegClassID).contains(Reg) ||
506 MRI.getRegClass(AMDGPU::AReg_96RegClassID).contains(Reg) ||
507 MRI.getRegClass(AMDGPU::AReg_128RegClassID).contains(Reg) ||
508 MRI.getRegClass(AMDGPU::AReg_160RegClassID).contains(Reg) ||
509 MRI.getRegClass(AMDGPU::AReg_192RegClassID).contains(Reg) ||
510 MRI.getRegClass(AMDGPU::AReg_224RegClassID).contains(Reg) ||
511 MRI.getRegClass(AMDGPU::AReg_256RegClassID).contains(Reg) ||
512 MRI.getRegClass(AMDGPU::AReg_288RegClassID).contains(Reg) ||
513 MRI.getRegClass(AMDGPU::AReg_320RegClassID).contains(Reg) ||
514 MRI.getRegClass(AMDGPU::AReg_352RegClassID).contains(Reg) ||
515 MRI.getRegClass(AMDGPU::AReg_384RegClassID).contains(Reg) ||
516 MRI.getRegClass(AMDGPU::AReg_512RegClassID).contains(Reg) ||
517 MRI.getRegClass(AMDGPU::AGPR_LO16RegClassID).contains(Reg))
518 Enc |= 512;
519
520 Op = Enc;
521}
522
523static bool needsPCRel(const MCExpr *Expr) {
524 switch (Expr->getKind()) {
525 case MCExpr::SymbolRef: {
526 auto *SE = cast<MCSymbolRefExpr>(Expr);
527 MCSymbolRefExpr::VariantKind Kind = SE->getKind();
530 }
531 case MCExpr::Binary: {
532 auto *BE = cast<MCBinaryExpr>(Expr);
533 if (BE->getOpcode() == MCBinaryExpr::Sub)
534 return false;
535 return needsPCRel(BE->getLHS()) || needsPCRel(BE->getRHS());
536 }
537 case MCExpr::Unary:
538 return needsPCRel(cast<MCUnaryExpr>(Expr)->getSubExpr());
539 case MCExpr::Target:
540 case MCExpr::Constant:
541 return false;
542 }
543 llvm_unreachable("invalid kind");
544}
545
546void AMDGPUMCCodeEmitter::getMachineOpValue(const MCInst &MI,
547 const MCOperand &MO, APInt &Op,
549 const MCSubtargetInfo &STI) const {
550 if (MO.isReg()){
551 Op = MRI.getEncodingValue(MO.getReg());
552 return;
553 }
554 unsigned OpNo = &MO - MI.begin();
555 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
556}
557
558void AMDGPUMCCodeEmitter::getMachineOpValueT16(
559 const MCInst &MI, unsigned OpNo, APInt &Op,
560 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
561 llvm_unreachable("TODO: Implement getMachineOpValueT16().");
562}
563
564void AMDGPUMCCodeEmitter::getMachineOpValueT16Lo128(
565 const MCInst &MI, unsigned OpNo, APInt &Op,
566 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
567 const MCOperand &MO = MI.getOperand(OpNo);
568 if (MO.isReg()) {
569 uint16_t Encoding = MRI.getEncodingValue(MO.getReg());
570 unsigned RegIdx = Encoding & AMDGPU::EncValues::REG_IDX_MASK;
571 bool IsHi = Encoding & AMDGPU::EncValues::IS_HI;
572 bool IsVGPR = Encoding & AMDGPU::EncValues::IS_VGPR;
573 assert((!IsVGPR || isUInt<7>(RegIdx)) && "VGPR0-VGPR127 expected!");
574 Op = (IsVGPR ? 0x100 : 0) | (IsHi ? 0x80 : 0) | RegIdx;
575 return;
576 }
577 getMachineOpValueCommon(MI, MO, OpNo, Op, Fixups, STI);
578}
579
580void AMDGPUMCCodeEmitter::getMachineOpValueCommon(
581 const MCInst &MI, const MCOperand &MO, unsigned OpNo, APInt &Op,
582 SmallVectorImpl<MCFixup> &Fixups, const MCSubtargetInfo &STI) const {
583
584 if (MO.isExpr() && MO.getExpr()->getKind() != MCExpr::Constant) {
585 // FIXME: If this is expression is PCRel or not should not depend on what
586 // the expression looks like. Given that this is just a general expression,
587 // it should probably be FK_Data_4 and whatever is producing
588 //
589 // s_add_u32 s2, s2, (extern_const_addrspace+16
590 //
591 // And expecting a PCRel should instead produce
592 //
593 // .Ltmp1:
594 // s_add_u32 s2, s2, (extern_const_addrspace+16)-.Ltmp1
596 if (needsPCRel(MO.getExpr()))
598 else
599 Kind = FK_Data_4;
600
601 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
602 uint32_t Offset = Desc.getSize();
603 assert(Offset == 4 || Offset == 8);
604
605 Fixups.push_back(MCFixup::create(Offset, MO.getExpr(), Kind, MI.getLoc()));
606 }
607
608 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
609 if (AMDGPU::isSISrcOperand(Desc, OpNo)) {
610 if (auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI)) {
611 Op = *Enc;
612 return;
613 }
614 } else if (MO.isImm()) {
615 Op = MO.getImm();
616 return;
617 }
618
619 llvm_unreachable("Encoding of this operand type is not supported yet.");
620}
621
622#include "AMDGPUGenMCCodeEmitter.inc"
unsigned const MachineRegisterInfo * MRI
static uint32_t getLit16IntEncoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getLit64Encoding(uint64_t Val, const MCSubtargetInfo &STI)
static bool isVCMPX64(const MCInstrDesc &Desc)
static uint32_t getLit16Encoding(uint16_t Val, const MCSubtargetInfo &STI)
static uint32_t getIntInlineImmEncoding(IntTy Imm)
static bool needsPCRel(const MCExpr *Expr)
static uint32_t getLit32Encoding(uint32_t Val, const MCSubtargetInfo &STI)
Provides AMDGPU specific target descriptions.
This file implements a class to represent arbitrary precision integral constant values and operations...
IRTranslator LLVM IR MI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
Definition: APInt.h:76
uint64_t extractBitsAsZExtValue(unsigned numBits, unsigned bitPosition) const
Definition: APInt.cpp:489
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition: APInt.h:453
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:178
This class represents an Operation in the Expression.
@ Sub
Subtraction.
Definition: MCExpr.h:509
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:21
virtual void encodeInstruction(const MCInst &Inst, SmallVectorImpl< char > &CB, SmallVectorImpl< MCFixup > &Fixups, const MCSubtargetInfo &STI) const =0
Encode the given Inst to bytes and append to CB.
Context object for machine code objects.
Definition: MCContext.h:76
const MCRegisterInfo * getRegisterInfo() const
Definition: MCContext.h:448
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:35
@ Unary
Unary expressions.
Definition: MCExpr.h:41
@ Constant
Constant expressions.
Definition: MCExpr.h:39
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:40
@ Target
Target specific expression.
Definition: MCExpr.h:42
@ Binary
Binary expressions.
Definition: MCExpr.h:38
ExprKind getKind() const
Definition: MCExpr.h:81
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, SMLoc Loc=SMLoc())
Definition: MCFixup.h:86
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:184
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:26
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:36
int64_t getImm() const
Definition: MCInst.h:80
bool isImm() const
Definition: MCInst.h:62
unsigned getReg() const
Returns the register number.
Definition: MCInst.h:69
bool isReg() const
Definition: MCInst.h:61
bool isDFPImm() const
Definition: MCInst.h:64
const MCExpr * getExpr() const
Definition: MCInst.h:114
bool isExpr() const
Definition: MCInst.h:65
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void push_back(const T &Elt)
Definition: SmallVector.h:416
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_READONLY int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIdx)
unsigned mc2PseudoReg(unsigned Reg)
Convert hardware register Reg to a pseudo register.
@ fixup_si_sopp_br
16-bit PC relative fixup for SOPP branch instructions.
bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo)
Is this an AMDGPU specific source operand? These include registers, inline constants,...
LLVM_READONLY bool hasNamedOperand(uint64_t Opcode, uint64_t NamedIdx)
bool isSGPR(unsigned Reg, const MCRegisterInfo *TRI)
Is Reg - scalar register.
bool isGFX10Plus(const MCSubtargetInfo &STI)
@ OPERAND_KIMM32
Operand with 32-bit immediate that uses the constant bus.
Definition: SIDefines.h:214
@ OPERAND_REG_IMM_INT64
Definition: SIDefines.h:189
@ OPERAND_REG_IMM_V2FP16
Definition: SIDefines.h:196
@ OPERAND_REG_INLINE_C_V2INT32
Definition: SIDefines.h:210
@ OPERAND_REG_INLINE_C_FP64
Definition: SIDefines.h:207
@ OPERAND_REG_IMM_V2INT16
Definition: SIDefines.h:197
@ OPERAND_REG_INLINE_AC_V2FP16
Definition: SIDefines.h:224
@ OPERAND_REG_IMM_INT32
Operands with register or 32-bit immediate.
Definition: SIDefines.h:188
@ OPERAND_REG_IMM_FP16
Definition: SIDefines.h:193
@ OPERAND_REG_INLINE_C_INT64
Definition: SIDefines.h:204
@ OPERAND_REG_INLINE_C_INT16
Operands with register or inline constant.
Definition: SIDefines.h:202
@ OPERAND_REG_INLINE_AC_INT16
Operands with an AccVGPR register or inline constant.
Definition: SIDefines.h:218
@ OPERAND_REG_IMM_FP64
Definition: SIDefines.h:192
@ OPERAND_REG_INLINE_C_V2FP16
Definition: SIDefines.h:209
@ OPERAND_REG_INLINE_AC_V2INT16
Definition: SIDefines.h:223
@ OPERAND_REG_INLINE_AC_FP16
Definition: SIDefines.h:220
@ OPERAND_REG_INLINE_AC_INT32
Definition: SIDefines.h:219
@ OPERAND_REG_INLINE_AC_FP32
Definition: SIDefines.h:221
@ OPERAND_REG_IMM_V2INT32
Definition: SIDefines.h:198
@ OPERAND_REG_IMM_FP32
Definition: SIDefines.h:191
@ OPERAND_REG_INLINE_C_FP32
Definition: SIDefines.h:206
@ OPERAND_REG_INLINE_C_INT32
Definition: SIDefines.h:203
@ OPERAND_REG_INLINE_C_V2INT16
Definition: SIDefines.h:208
@ OPERAND_REG_IMM_V2FP32
Definition: SIDefines.h:199
@ OPERAND_REG_INLINE_AC_FP64
Definition: SIDefines.h:222
@ OPERAND_REG_INLINE_C_FP16
Definition: SIDefines.h:205
@ OPERAND_REG_IMM_INT16
Definition: SIDefines.h:190
@ OPERAND_REG_INLINE_C_V2FP32
Definition: SIDefines.h:211
@ OPERAND_REG_IMM_FP32_DEFERRED
Definition: SIDefines.h:195
@ OPERAND_REG_IMM_FP16_DEFERRED
Definition: SIDefines.h:194
bool isVI(const MCSubtargetInfo &STI)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
MCFixupKind
Extensible enumeration to represent the type of a fixup.
Definition: MCFixup.h:21
@ FK_PCRel_4
A four-byte pc relative fixup.
Definition: MCFixup.h:29
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:25
MCCodeEmitter * createAMDGPUMCCodeEmitter(const MCInstrInfo &MCII, MCContext &Ctx)
Description of the encoding of one expression Op.