LLVM 20.0.0git
SPIRVInstructionSelector.cpp
Go to the documentation of this file.
1//===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the targeting of the InstructionSelector class for
10// SPIRV.
11// TODO: This should be generated by TableGen.
12//
13//===----------------------------------------------------------------------===//
14
17#include "SPIRV.h"
18#include "SPIRVGlobalRegistry.h"
19#include "SPIRVInstrInfo.h"
21#include "SPIRVRegisterInfo.h"
22#include "SPIRVTargetMachine.h"
23#include "SPIRVUtils.h"
24#include "llvm/ADT/APFloat.h"
32#include "llvm/IR/IntrinsicsSPIRV.h"
33#include "llvm/Support/Debug.h"
34
35namespace {
36
37struct SyncScopeIDs {
38 llvm::SyncScope::ID Work_ItemSSID;
39 llvm::SyncScope::ID WorkGroupSSID;
40 llvm::SyncScope::ID DeviceSSID;
41 llvm::SyncScope::ID AllSVMDevicesSSID;
42 llvm::SyncScope::ID SubGroupSSID;
43
44 SyncScopeIDs() {}
45 SyncScopeIDs(llvm::LLVMContext &Context) {
46 Work_ItemSSID = Context.getOrInsertSyncScopeID("work_item");
47 WorkGroupSSID = Context.getOrInsertSyncScopeID("workgroup");
48 DeviceSSID = Context.getOrInsertSyncScopeID("device");
49 AllSVMDevicesSSID = Context.getOrInsertSyncScopeID("all_svm_devices");
50 SubGroupSSID = Context.getOrInsertSyncScopeID("sub_group");
51 }
52};
53
54} // namespace
55
56#define DEBUG_TYPE "spirv-isel"
57
58using namespace llvm;
59namespace CL = SPIRV::OpenCLExtInst;
60namespace GL = SPIRV::GLSLExtInst;
61
63 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
64
65namespace {
66
67#define GET_GLOBALISEL_PREDICATE_BITSET
68#include "SPIRVGenGlobalISel.inc"
69#undef GET_GLOBALISEL_PREDICATE_BITSET
70
71class SPIRVInstructionSelector : public InstructionSelector {
72 const SPIRVSubtarget &STI;
73 const SPIRVInstrInfo &TII;
75 const RegisterBankInfo &RBI;
78 SyncScopeIDs SSIDs;
79 MachineFunction *HasVRegsReset = nullptr;
80
81 /// We need to keep track of the number we give to anonymous global values to
82 /// generate the same name every time when this is needed.
83 mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
84
85public:
86 SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
87 const SPIRVSubtarget &ST,
88 const RegisterBankInfo &RBI);
89 void setupMF(MachineFunction &MF, GISelKnownBits *KB,
90 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
91 BlockFrequencyInfo *BFI) override;
92 // Common selection code. Instruction-specific selection occurs in spvSelect.
93 bool select(MachineInstr &I) override;
94 static const char *getName() { return DEBUG_TYPE; }
95
96#define GET_GLOBALISEL_PREDICATES_DECL
97#include "SPIRVGenGlobalISel.inc"
98#undef GET_GLOBALISEL_PREDICATES_DECL
99
100#define GET_GLOBALISEL_TEMPORARIES_DECL
101#include "SPIRVGenGlobalISel.inc"
102#undef GET_GLOBALISEL_TEMPORARIES_DECL
103
104private:
105 void resetVRegsType(MachineFunction &MF);
106
107 // tblgen-erated 'select' implementation, used as the initial selector for
108 // the patterns that don't require complex C++.
109 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
110
111 // All instruction-specific selection that didn't happen in "select()".
112 // Is basically a large Switch/Case delegating to all other select method.
113 bool spvSelect(Register ResVReg, const SPIRVType *ResType,
114 MachineInstr &I) const;
115
116 bool selectGlobalValue(Register ResVReg, MachineInstr &I,
117 const MachineInstr *Init = nullptr) const;
118
119 bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
120 MachineInstr &I, Register SrcReg,
121 unsigned Opcode) const;
122 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
123 unsigned Opcode) const;
124
125 bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
126 MachineInstr &I) const;
127
128 bool selectLoad(Register ResVReg, const SPIRVType *ResType,
129 MachineInstr &I) const;
130 bool selectStore(MachineInstr &I) const;
131
132 bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
133 MachineInstr &I) const;
134 bool selectStackRestore(MachineInstr &I) const;
135
136 bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
137
138 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
139 MachineInstr &I, unsigned NewOpcode,
140 unsigned NegateOpcode = 0) const;
141
142 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
143 MachineInstr &I) const;
144
145 bool selectFence(MachineInstr &I) const;
146
147 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
148 MachineInstr &I) const;
149
150 bool selectAnyOrAll(Register ResVReg, const SPIRVType *ResType,
151 MachineInstr &I, unsigned OpType) const;
152
153 bool selectAll(Register ResVReg, const SPIRVType *ResType,
154 MachineInstr &I) const;
155
156 bool selectAny(Register ResVReg, const SPIRVType *ResType,
157 MachineInstr &I) const;
158
159 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
160 MachineInstr &I) const;
161
162 bool selectBuildVector(Register ResVReg, const SPIRVType *ResType,
163 MachineInstr &I) const;
164 bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
165 MachineInstr &I) const;
166
167 bool selectCmp(Register ResVReg, const SPIRVType *ResType,
168 unsigned comparisonOpcode, MachineInstr &I) const;
169
170 bool selectICmp(Register ResVReg, const SPIRVType *ResType,
171 MachineInstr &I) const;
172 bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
173 MachineInstr &I) const;
174
175 bool selectFmix(Register ResVReg, const SPIRVType *ResType,
176 MachineInstr &I) const;
177
178 bool selectLength(Register ResVReg, const SPIRVType *ResType,
179 MachineInstr &I) const;
180
181 bool selectFrac(Register ResVReg, const SPIRVType *ResType,
182 MachineInstr &I) const;
183
184 bool selectRsqrt(Register ResVReg, const SPIRVType *ResType,
185 MachineInstr &I) const;
186
187 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
188 int OpIdx) const;
189 void renderFImm64(MachineInstrBuilder &MIB, const MachineInstr &I,
190 int OpIdx) const;
191
192 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
193 MachineInstr &I) const;
194
195 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
196 bool IsSigned) const;
197 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
198 bool IsSigned, unsigned Opcode) const;
199 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
200 bool IsSigned) const;
201
202 bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
203 MachineInstr &I) const;
204
205 bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
206 const SPIRVType *intTy, const SPIRVType *boolTy) const;
207
208 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
209 MachineInstr &I) const;
210 bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
211 MachineInstr &I) const;
212 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
213 MachineInstr &I) const;
214 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
215 MachineInstr &I) const;
216 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
217 MachineInstr &I) const;
218 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
219 MachineInstr &I) const;
220 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
221 MachineInstr &I) const;
222 bool selectGEP(Register ResVReg, const SPIRVType *ResType,
223 MachineInstr &I) const;
224
225 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
226 MachineInstr &I) const;
227 bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
228 MachineInstr &I) const;
229
230 bool selectBranch(MachineInstr &I) const;
231 bool selectBranchCond(MachineInstr &I) const;
232
233 bool selectPhi(Register ResVReg, const SPIRVType *ResType,
234 MachineInstr &I) const;
235
236 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
237 MachineInstr &I, CL::OpenCLExtInst CLInst) const;
238 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
239 MachineInstr &I, CL::OpenCLExtInst CLInst,
240 GL::GLSLExtInst GLInst) const;
241 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
242 MachineInstr &I, const ExtInstList &ExtInsts) const;
243
244 bool selectLog10(Register ResVReg, const SPIRVType *ResType,
245 MachineInstr &I) const;
246
247 bool selectNormalize(Register ResVReg, const SPIRVType *ResType,
248 MachineInstr &I) const;
249
250 bool selectSaturate(Register ResVReg, const SPIRVType *ResType,
251 MachineInstr &I) const;
252
253 bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
254 MachineInstr &I) const;
255
257
258 Register buildI32Constant(uint32_t Val, MachineInstr &I,
259 const SPIRVType *ResType = nullptr) const;
260
261 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
262 Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
263 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
264 MachineInstr &I) const;
265 Register buildOnesValF(const SPIRVType *ResType, MachineInstr &I) const;
266
267 bool wrapIntoSpecConstantOp(MachineInstr &I,
268 SmallVector<Register> &CompositeArgs) const;
269};
270
271} // end anonymous namespace
272
273#define GET_GLOBALISEL_IMPL
274#include "SPIRVGenGlobalISel.inc"
275#undef GET_GLOBALISEL_IMPL
276
277SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
278 const SPIRVSubtarget &ST,
279 const RegisterBankInfo &RBI)
280 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
281 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
283#include "SPIRVGenGlobalISel.inc"
286#include "SPIRVGenGlobalISel.inc"
288{
289}
290
291void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
292 CodeGenCoverage *CoverageInfo,
294 BlockFrequencyInfo *BFI) {
295 SSIDs = SyncScopeIDs(MF.getFunction().getContext());
296 MRI = &MF.getRegInfo();
297 GR.setCurrentFunc(MF);
298 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
299}
300
301// Ensure that register classes correspond to pattern matching rules.
302void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
303 if (HasVRegsReset == &MF)
304 return;
305 HasVRegsReset = &MF;
306
308 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
310 LLT RegType = MRI.getType(Reg);
311 if (RegType.isScalar())
312 MRI.setType(Reg, LLT::scalar(64));
313 else if (RegType.isPointer())
314 MRI.setType(Reg, LLT::pointer(0, 64));
315 else if (RegType.isVector())
316 MRI.setType(Reg, LLT::fixed_vector(2, LLT::scalar(64)));
317 }
318 for (const auto &MBB : MF) {
319 for (const auto &MI : MBB) {
320 if (MI.getOpcode() != SPIRV::ASSIGN_TYPE)
321 continue;
322 Register DstReg = MI.getOperand(0).getReg();
323 LLT DstType = MRI.getType(DstReg);
324 Register SrcReg = MI.getOperand(1).getReg();
325 LLT SrcType = MRI.getType(SrcReg);
326 if (DstType != SrcType)
327 MRI.setType(DstReg, MRI.getType(SrcReg));
328
329 const TargetRegisterClass *DstRC = MRI.getRegClassOrNull(DstReg);
330 const TargetRegisterClass *SrcRC = MRI.getRegClassOrNull(SrcReg);
331 if (DstRC != SrcRC && SrcRC)
332 MRI.setRegClass(DstReg, SrcRC);
333 }
334 }
335}
336
337static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
338
339// Defined in SPIRVLegalizerInfo.cpp.
340extern bool isTypeFoldingSupported(unsigned Opcode);
341
342bool SPIRVInstructionSelector::select(MachineInstr &I) {
343 resetVRegsType(*I.getParent()->getParent());
344
345 assert(I.getParent() && "Instruction should be in a basic block!");
346 assert(I.getParent()->getParent() && "Instruction should be in a function!");
347
348 Register Opcode = I.getOpcode();
349 // If it's not a GMIR instruction, we've selected it already.
350 if (!isPreISelGenericOpcode(Opcode)) {
351 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
352 Register DstReg = I.getOperand(0).getReg();
353 Register SrcReg = I.getOperand(1).getReg();
354 auto *Def = MRI->getVRegDef(SrcReg);
355 if (isTypeFoldingSupported(Def->getOpcode())) {
356 bool Res = selectImpl(I, *CoverageInfo);
357 LLVM_DEBUG({
358 if (!Res && Def->getOpcode() != TargetOpcode::G_CONSTANT) {
359 dbgs() << "Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
360 I.print(dbgs());
361 }
362 });
363 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
364 if (Res)
365 return Res;
366 }
367 MRI->setRegClass(SrcReg, MRI->getRegClass(DstReg));
368 MRI->replaceRegWith(SrcReg, DstReg);
369 I.removeFromParent();
370 return true;
371 } else if (I.getNumDefs() == 1) {
372 // Make all vregs 64 bits (for SPIR-V IDs).
373 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(64));
374 }
376 }
377
378 if (I.getNumOperands() != I.getNumExplicitOperands()) {
379 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
380 return false;
381 }
382
383 // Common code for getting return reg+type, and removing selected instr
384 // from parent occurs here. Instr-specific selection happens in spvSelect().
385 bool HasDefs = I.getNumDefs() > 0;
386 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
387 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
388 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
389 if (spvSelect(ResVReg, ResType, I)) {
390 if (HasDefs) // Make all vregs 64 bits (for SPIR-V IDs).
391 for (unsigned i = 0; i < I.getNumDefs(); ++i)
392 MRI->setType(I.getOperand(i).getReg(), LLT::scalar(64));
393 I.removeFromParent();
394 return true;
395 }
396 return false;
397}
398
399bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
400 const SPIRVType *ResType,
401 MachineInstr &I) const {
402 const unsigned Opcode = I.getOpcode();
403 if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
404 return selectImpl(I, *CoverageInfo);
405 switch (Opcode) {
406 case TargetOpcode::G_CONSTANT:
407 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
408 I);
409 case TargetOpcode::G_GLOBAL_VALUE:
410 return selectGlobalValue(ResVReg, I);
411 case TargetOpcode::G_IMPLICIT_DEF:
412 return selectOpUndef(ResVReg, ResType, I);
413 case TargetOpcode::G_FREEZE:
414 return selectFreeze(ResVReg, ResType, I);
415
416 case TargetOpcode::G_INTRINSIC:
417 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
418 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
419 return selectIntrinsic(ResVReg, ResType, I);
420 case TargetOpcode::G_BITREVERSE:
421 return selectBitreverse(ResVReg, ResType, I);
422
423 case TargetOpcode::G_BUILD_VECTOR:
424 return selectBuildVector(ResVReg, ResType, I);
425 case TargetOpcode::G_SPLAT_VECTOR:
426 return selectSplatVector(ResVReg, ResType, I);
427
428 case TargetOpcode::G_SHUFFLE_VECTOR: {
429 MachineBasicBlock &BB = *I.getParent();
430 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
431 .addDef(ResVReg)
432 .addUse(GR.getSPIRVTypeID(ResType))
433 .addUse(I.getOperand(1).getReg())
434 .addUse(I.getOperand(2).getReg());
435 for (auto V : I.getOperand(3).getShuffleMask())
436 MIB.addImm(V);
437 return MIB.constrainAllUses(TII, TRI, RBI);
438 }
439 case TargetOpcode::G_MEMMOVE:
440 case TargetOpcode::G_MEMCPY:
441 case TargetOpcode::G_MEMSET:
442 return selectMemOperation(ResVReg, I);
443
444 case TargetOpcode::G_ICMP:
445 return selectICmp(ResVReg, ResType, I);
446 case TargetOpcode::G_FCMP:
447 return selectFCmp(ResVReg, ResType, I);
448
449 case TargetOpcode::G_FRAME_INDEX:
450 return selectFrameIndex(ResVReg, ResType, I);
451
452 case TargetOpcode::G_LOAD:
453 return selectLoad(ResVReg, ResType, I);
454 case TargetOpcode::G_STORE:
455 return selectStore(I);
456
457 case TargetOpcode::G_BR:
458 return selectBranch(I);
459 case TargetOpcode::G_BRCOND:
460 return selectBranchCond(I);
461
462 case TargetOpcode::G_PHI:
463 return selectPhi(ResVReg, ResType, I);
464
465 case TargetOpcode::G_FPTOSI:
466 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
467 case TargetOpcode::G_FPTOUI:
468 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
469
470 case TargetOpcode::G_SITOFP:
471 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
472 case TargetOpcode::G_UITOFP:
473 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
474
475 case TargetOpcode::G_CTPOP:
476 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
477 case TargetOpcode::G_SMIN:
478 return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
479 case TargetOpcode::G_UMIN:
480 return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
481
482 case TargetOpcode::G_SMAX:
483 return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
484 case TargetOpcode::G_UMAX:
485 return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
486
487 case TargetOpcode::G_FMA:
488 return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
489
490 case TargetOpcode::G_FPOW:
491 return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
492 case TargetOpcode::G_FPOWI:
493 return selectExtInst(ResVReg, ResType, I, CL::pown);
494
495 case TargetOpcode::G_FEXP:
496 return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
497 case TargetOpcode::G_FEXP2:
498 return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
499
500 case TargetOpcode::G_FLOG:
501 return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
502 case TargetOpcode::G_FLOG2:
503 return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
504 case TargetOpcode::G_FLOG10:
505 return selectLog10(ResVReg, ResType, I);
506
507 case TargetOpcode::G_FABS:
508 return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
509 case TargetOpcode::G_ABS:
510 return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
511
512 case TargetOpcode::G_FMINNUM:
513 case TargetOpcode::G_FMINIMUM:
514 return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::NMin);
515 case TargetOpcode::G_FMAXNUM:
516 case TargetOpcode::G_FMAXIMUM:
517 return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::NMax);
518
519 case TargetOpcode::G_FCOPYSIGN:
520 return selectExtInst(ResVReg, ResType, I, CL::copysign);
521
522 case TargetOpcode::G_FCEIL:
523 return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
524 case TargetOpcode::G_FFLOOR:
525 return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
526
527 case TargetOpcode::G_FCOS:
528 return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
529 case TargetOpcode::G_FSIN:
530 return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
531 case TargetOpcode::G_FTAN:
532 return selectExtInst(ResVReg, ResType, I, CL::tan, GL::Tan);
533 case TargetOpcode::G_FACOS:
534 return selectExtInst(ResVReg, ResType, I, CL::acos, GL::Acos);
535 case TargetOpcode::G_FASIN:
536 return selectExtInst(ResVReg, ResType, I, CL::asin, GL::Asin);
537 case TargetOpcode::G_FATAN:
538 return selectExtInst(ResVReg, ResType, I, CL::atan, GL::Atan);
539 case TargetOpcode::G_FCOSH:
540 return selectExtInst(ResVReg, ResType, I, CL::cosh, GL::Cosh);
541 case TargetOpcode::G_FSINH:
542 return selectExtInst(ResVReg, ResType, I, CL::sinh, GL::Sinh);
543 case TargetOpcode::G_FTANH:
544 return selectExtInst(ResVReg, ResType, I, CL::tanh, GL::Tanh);
545
546 case TargetOpcode::G_FSQRT:
547 return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
548
549 case TargetOpcode::G_CTTZ:
550 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
551 return selectExtInst(ResVReg, ResType, I, CL::ctz);
552 case TargetOpcode::G_CTLZ:
553 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
554 return selectExtInst(ResVReg, ResType, I, CL::clz);
555
556 case TargetOpcode::G_INTRINSIC_ROUND:
557 return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
558 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
559 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
560 case TargetOpcode::G_INTRINSIC_TRUNC:
561 return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
562 case TargetOpcode::G_FRINT:
563 case TargetOpcode::G_FNEARBYINT:
564 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
565
566 case TargetOpcode::G_SMULH:
567 return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
568 case TargetOpcode::G_UMULH:
569 return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
570
571 case TargetOpcode::G_SADDSAT:
572 return selectExtInst(ResVReg, ResType, I, CL::s_add_sat);
573 case TargetOpcode::G_UADDSAT:
574 return selectExtInst(ResVReg, ResType, I, CL::u_add_sat);
575 case TargetOpcode::G_SSUBSAT:
576 return selectExtInst(ResVReg, ResType, I, CL::s_sub_sat);
577 case TargetOpcode::G_USUBSAT:
578 return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat);
579
580 case TargetOpcode::G_SEXT:
581 return selectExt(ResVReg, ResType, I, true);
582 case TargetOpcode::G_ANYEXT:
583 case TargetOpcode::G_ZEXT:
584 return selectExt(ResVReg, ResType, I, false);
585 case TargetOpcode::G_TRUNC:
586 return selectTrunc(ResVReg, ResType, I);
587 case TargetOpcode::G_FPTRUNC:
588 case TargetOpcode::G_FPEXT:
589 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
590
591 case TargetOpcode::G_PTRTOINT:
592 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
593 case TargetOpcode::G_INTTOPTR:
594 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
595 case TargetOpcode::G_BITCAST:
596 return selectBitcast(ResVReg, ResType, I);
597 case TargetOpcode::G_ADDRSPACE_CAST:
598 return selectAddrSpaceCast(ResVReg, ResType, I);
599 case TargetOpcode::G_PTR_ADD: {
600 // Currently, we get G_PTR_ADD only as a result of translating
601 // global variables, initialized with constant expressions like GV + Const
602 // (see test opencl/basic/progvar_prog_scope_init.ll).
603 // TODO: extend the handler once we have other cases.
604 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
605 Register GV = I.getOperand(1).getReg();
606 MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
607 (void)II;
608 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
609 (*II).getOpcode() == TargetOpcode::COPY ||
610 (*II).getOpcode() == SPIRV::OpVariable) &&
611 isImm(I.getOperand(2), MRI));
612 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
613 MachineBasicBlock &BB = *I.getParent();
614 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
615 .addDef(ResVReg)
616 .addUse(GR.getSPIRVTypeID(ResType))
617 .addImm(static_cast<uint32_t>(
618 SPIRV::Opcode::InBoundsPtrAccessChain))
619 .addUse(GV)
620 .addUse(Idx)
621 .addUse(I.getOperand(2).getReg());
622 return MIB.constrainAllUses(TII, TRI, RBI);
623 }
624
625 case TargetOpcode::G_ATOMICRMW_OR:
626 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
627 case TargetOpcode::G_ATOMICRMW_ADD:
628 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
629 case TargetOpcode::G_ATOMICRMW_AND:
630 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
631 case TargetOpcode::G_ATOMICRMW_MAX:
632 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
633 case TargetOpcode::G_ATOMICRMW_MIN:
634 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
635 case TargetOpcode::G_ATOMICRMW_SUB:
636 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
637 case TargetOpcode::G_ATOMICRMW_XOR:
638 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
639 case TargetOpcode::G_ATOMICRMW_UMAX:
640 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
641 case TargetOpcode::G_ATOMICRMW_UMIN:
642 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
643 case TargetOpcode::G_ATOMICRMW_XCHG:
644 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
645 case TargetOpcode::G_ATOMIC_CMPXCHG:
646 return selectAtomicCmpXchg(ResVReg, ResType, I);
647
648 case TargetOpcode::G_ATOMICRMW_FADD:
649 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
650 case TargetOpcode::G_ATOMICRMW_FSUB:
651 // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
652 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
653 SPIRV::OpFNegate);
654 case TargetOpcode::G_ATOMICRMW_FMIN:
655 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
656 case TargetOpcode::G_ATOMICRMW_FMAX:
657 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
658
659 case TargetOpcode::G_FENCE:
660 return selectFence(I);
661
662 case TargetOpcode::G_STACKSAVE:
663 return selectStackSave(ResVReg, ResType, I);
664 case TargetOpcode::G_STACKRESTORE:
665 return selectStackRestore(I);
666
667 case TargetOpcode::G_UNMERGE_VALUES:
668 return selectUnmergeValues(I);
669
670 default:
671 return false;
672 }
673}
674
675bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
676 const SPIRVType *ResType,
678 CL::OpenCLExtInst CLInst) const {
679 return selectExtInst(ResVReg, ResType, I,
680 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
681}
682
683bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
684 const SPIRVType *ResType,
686 CL::OpenCLExtInst CLInst,
687 GL::GLSLExtInst GLInst) const {
688 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
689 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
690 return selectExtInst(ResVReg, ResType, I, ExtInsts);
691}
692
693bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
694 const SPIRVType *ResType,
696 const ExtInstList &Insts) const {
697
698 for (const auto &Ex : Insts) {
699 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
700 uint32_t Opcode = Ex.second;
701 if (STI.canUseExtInstSet(Set)) {
702 MachineBasicBlock &BB = *I.getParent();
703 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
704 .addDef(ResVReg)
705 .addUse(GR.getSPIRVTypeID(ResType))
706 .addImm(static_cast<uint32_t>(Set))
707 .addImm(Opcode);
708 const unsigned NumOps = I.getNumOperands();
709 for (unsigned i = 1; i < NumOps; ++i)
710 MIB.add(I.getOperand(i));
711 return MIB.constrainAllUses(TII, TRI, RBI);
712 }
713 }
714 return false;
715}
716
717bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
718 const SPIRVType *ResType,
720 Register SrcReg,
721 unsigned Opcode) const {
722 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
723 .addDef(ResVReg)
724 .addUse(GR.getSPIRVTypeID(ResType))
725 .addUse(SrcReg)
726 .constrainAllUses(TII, TRI, RBI);
727}
728
729bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
730 const SPIRVType *ResType,
732 unsigned Opcode) const {
733 if (STI.isOpenCLEnv() && I.getOperand(1).isReg()) {
734 Register SrcReg = I.getOperand(1).getReg();
735 bool IsGV = false;
737 MRI->def_instr_begin(SrcReg);
738 DefIt != MRI->def_instr_end(); DefIt = std::next(DefIt)) {
739 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
740 IsGV = true;
741 break;
742 }
743 }
744 if (IsGV) {
745 uint32_t SpecOpcode = 0;
746 switch (Opcode) {
747 case SPIRV::OpConvertPtrToU:
748 SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
749 break;
750 case SPIRV::OpConvertUToPtr:
751 SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
752 break;
753 }
754 if (SpecOpcode)
755 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
756 TII.get(SPIRV::OpSpecConstantOp))
757 .addDef(ResVReg)
758 .addUse(GR.getSPIRVTypeID(ResType))
759 .addImm(SpecOpcode)
760 .addUse(SrcReg)
761 .constrainAllUses(TII, TRI, RBI);
762 }
763 }
764 return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
765 Opcode);
766}
767
768bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
769 const SPIRVType *ResType,
770 MachineInstr &I) const {
771 Register OpReg = I.getOperand(1).getReg();
772 SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
773 if (!GR.isBitcastCompatible(ResType, OpType))
774 report_fatal_error("incompatible result and operand types in a bitcast");
775 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
776}
777
778static SPIRV::Scope::Scope getScope(SyncScope::ID Ord,
779 const SyncScopeIDs &SSIDs) {
780 if (Ord == SyncScope::SingleThread || Ord == SSIDs.Work_ItemSSID)
781 return SPIRV::Scope::Invocation;
782 else if (Ord == SyncScope::System || Ord == SSIDs.DeviceSSID)
783 return SPIRV::Scope::Device;
784 else if (Ord == SSIDs.WorkGroupSSID)
785 return SPIRV::Scope::Workgroup;
786 else if (Ord == SSIDs.AllSVMDevicesSSID)
787 return SPIRV::Scope::CrossDevice;
788 else if (Ord == SSIDs.SubGroupSSID)
789 return SPIRV::Scope::Subgroup;
790 else
791 // OpenCL approach is: "The functions that do not have memory_scope argument
792 // have the same semantics as the corresponding functions with the
793 // memory_scope argument set to memory_scope_device." See ref.: //
794 // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
795 // In our case if the scope is unknown, assuming that SPIR-V code is to be
796 // consumed in an OpenCL environment, we use the same approach and set the
797 // scope to memory_scope_device.
798 return SPIRV::Scope::Device;
799}
800
802 MachineInstrBuilder &MIB) {
803 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
804 if (MemOp->isVolatile())
805 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
806 if (MemOp->isNonTemporal())
807 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
808 if (MemOp->getAlign().value())
809 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
810
811 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
812 MIB.addImm(SpvMemOp);
813 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
814 MIB.addImm(MemOp->getAlign().value());
815 }
816}
817
819 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
820 if (Flags & MachineMemOperand::Flags::MOVolatile)
821 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
822 if (Flags & MachineMemOperand::Flags::MONonTemporal)
823 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
824
825 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
826 MIB.addImm(SpvMemOp);
827}
828
829bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
830 const SPIRVType *ResType,
831 MachineInstr &I) const {
832 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
833 Register Ptr = I.getOperand(1 + OpOffset).getReg();
834 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
835 .addDef(ResVReg)
836 .addUse(GR.getSPIRVTypeID(ResType))
837 .addUse(Ptr);
838 if (!I.getNumMemOperands()) {
839 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
840 I.getOpcode() ==
841 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
842 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
843 } else {
844 addMemoryOperands(*I.memoperands_begin(), MIB);
845 }
846 return MIB.constrainAllUses(TII, TRI, RBI);
847}
848
849bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
850 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
851 Register StoreVal = I.getOperand(0 + OpOffset).getReg();
852 Register Ptr = I.getOperand(1 + OpOffset).getReg();
853 MachineBasicBlock &BB = *I.getParent();
854 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
855 .addUse(Ptr)
856 .addUse(StoreVal);
857 if (!I.getNumMemOperands()) {
858 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
859 I.getOpcode() ==
860 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
861 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
862 } else {
863 addMemoryOperands(*I.memoperands_begin(), MIB);
864 }
865 return MIB.constrainAllUses(TII, TRI, RBI);
866}
867
868bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
869 const SPIRVType *ResType,
870 MachineInstr &I) const {
871 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
873 "llvm.stacksave intrinsic: this instruction requires the following "
874 "SPIR-V extension: SPV_INTEL_variable_length_array",
875 false);
876 MachineBasicBlock &BB = *I.getParent();
877 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
878 .addDef(ResVReg)
879 .addUse(GR.getSPIRVTypeID(ResType))
880 .constrainAllUses(TII, TRI, RBI);
881}
882
883bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
884 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
886 "llvm.stackrestore intrinsic: this instruction requires the following "
887 "SPIR-V extension: SPV_INTEL_variable_length_array",
888 false);
889 if (!I.getOperand(0).isReg())
890 return false;
891 MachineBasicBlock &BB = *I.getParent();
892 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
893 .addUse(I.getOperand(0).getReg())
894 .constrainAllUses(TII, TRI, RBI);
895}
896
897bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
898 MachineInstr &I) const {
899 MachineBasicBlock &BB = *I.getParent();
900 Register SrcReg = I.getOperand(1).getReg();
901 if (I.getOpcode() == TargetOpcode::G_MEMSET) {
902 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
903 unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
904 unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
905 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
906 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
907 Register Const = GR.getOrCreateConstIntArray(Val, Num, I, ArrTy, TII);
908 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
909 ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
910 // TODO: check if we have such GV, add init, use buildGlobalVariable.
911 Function &CurFunction = GR.CurMF->getFunction();
912 Type *LLVMArrTy =
913 ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
914 // Module takes ownership of the global var.
915 GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
917 Constant::getNullValue(LLVMArrTy));
918 Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
919 GR.add(GV, GR.CurMF, VarReg);
920
921 buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
922 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
923 .addDef(VarReg)
924 .addUse(GR.getSPIRVTypeID(VarTy))
925 .addImm(SPIRV::StorageClass::UniformConstant)
926 .addUse(Const)
927 .constrainAllUses(TII, TRI, RBI);
928 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
929 ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
930 SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
931 selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
932 }
933 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
934 .addUse(I.getOperand(0).getReg())
935 .addUse(SrcReg)
936 .addUse(I.getOperand(2).getReg());
937 if (I.getNumMemOperands())
938 addMemoryOperands(*I.memoperands_begin(), MIB);
939 bool Result = MIB.constrainAllUses(TII, TRI, RBI);
940 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
941 BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
942 .addUse(MIB->getOperand(0).getReg());
943 return Result;
944}
945
946bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
947 const SPIRVType *ResType,
949 unsigned NewOpcode,
950 unsigned NegateOpcode) const {
951 assert(I.hasOneMemOperand());
952 const MachineMemOperand *MemOp = *I.memoperands_begin();
954 static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), SSIDs));
955 Register ScopeReg = buildI32Constant(Scope, I);
956
957 Register Ptr = I.getOperand(1).getReg();
958 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
959 // auto ScSem =
960 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
961 AtomicOrdering AO = MemOp->getSuccessOrdering();
962 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
963 Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
964
965 bool Result = false;
966 Register ValueReg = I.getOperand(2).getReg();
967 if (NegateOpcode != 0) {
968 // Translation with negative value operand is requested
969 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
970 Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
971 ValueReg = TmpReg;
972 }
973
974 Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
975 .addDef(ResVReg)
976 .addUse(GR.getSPIRVTypeID(ResType))
977 .addUse(Ptr)
978 .addUse(ScopeReg)
979 .addUse(MemSemReg)
980 .addUse(ValueReg)
981 .constrainAllUses(TII, TRI, RBI);
982 return Result;
983}
984
985bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
986 unsigned ArgI = I.getNumOperands() - 1;
987 Register SrcReg =
988 I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
989 SPIRVType *DefType =
990 SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
991 if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
993 "cannot select G_UNMERGE_VALUES with a non-vector argument");
994
995 SPIRVType *ScalarType =
996 GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
997 MachineBasicBlock &BB = *I.getParent();
998 bool Res = false;
999 for (unsigned i = 0; i < I.getNumDefs(); ++i) {
1000 Register ResVReg = I.getOperand(i).getReg();
1001 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
1002 if (!ResType) {
1003 // There was no "assign type" actions, let's fix this now
1004 ResType = ScalarType;
1005 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
1006 MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
1007 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
1008 }
1009 auto MIB =
1010 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1011 .addDef(ResVReg)
1012 .addUse(GR.getSPIRVTypeID(ResType))
1013 .addUse(SrcReg)
1014 .addImm(static_cast<int64_t>(i));
1015 Res |= MIB.constrainAllUses(TII, TRI, RBI);
1016 }
1017 return Res;
1018}
1019
1020bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
1021 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
1022 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
1023 Register MemSemReg = buildI32Constant(MemSem, I);
1024 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
1025 uint32_t Scope = static_cast<uint32_t>(getScope(Ord, SSIDs));
1026 Register ScopeReg = buildI32Constant(Scope, I);
1027 MachineBasicBlock &BB = *I.getParent();
1028 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
1029 .addUse(ScopeReg)
1030 .addUse(MemSemReg)
1031 .constrainAllUses(TII, TRI, RBI);
1032}
1033
1034bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
1035 const SPIRVType *ResType,
1036 MachineInstr &I) const {
1037 Register ScopeReg;
1038 Register MemSemEqReg;
1039 Register MemSemNeqReg;
1040 Register Ptr = I.getOperand(2).getReg();
1041 if (!isa<GIntrinsic>(I)) {
1042 assert(I.hasOneMemOperand());
1043 const MachineMemOperand *MemOp = *I.memoperands_begin();
1044 unsigned Scope =
1045 static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), SSIDs));
1046 ScopeReg = buildI32Constant(Scope, I);
1047
1048 unsigned ScSem = static_cast<uint32_t>(
1049 getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
1050 AtomicOrdering AO = MemOp->getSuccessOrdering();
1051 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
1052 MemSemEqReg = buildI32Constant(MemSemEq, I);
1053 AtomicOrdering FO = MemOp->getFailureOrdering();
1054 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
1055 MemSemNeqReg =
1056 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
1057 } else {
1058 ScopeReg = I.getOperand(5).getReg();
1059 MemSemEqReg = I.getOperand(6).getReg();
1060 MemSemNeqReg = I.getOperand(7).getReg();
1061 }
1062
1063 Register Cmp = I.getOperand(3).getReg();
1064 Register Val = I.getOperand(4).getReg();
1065 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1066 Register ACmpRes = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1067 const DebugLoc &DL = I.getDebugLoc();
1068 bool Result =
1069 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
1070 .addDef(ACmpRes)
1071 .addUse(GR.getSPIRVTypeID(SpvValTy))
1072 .addUse(Ptr)
1073 .addUse(ScopeReg)
1074 .addUse(MemSemEqReg)
1075 .addUse(MemSemNeqReg)
1076 .addUse(Val)
1077 .addUse(Cmp)
1078 .constrainAllUses(TII, TRI, RBI);
1079 Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1080 SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
1081 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
1082 .addDef(CmpSuccReg)
1083 .addUse(GR.getSPIRVTypeID(BoolTy))
1084 .addUse(ACmpRes)
1085 .addUse(Cmp)
1086 .constrainAllUses(TII, TRI, RBI);
1087 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1088 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1089 .addDef(TmpReg)
1090 .addUse(GR.getSPIRVTypeID(ResType))
1091 .addUse(ACmpRes)
1092 .addUse(GR.getOrCreateUndef(I, ResType, TII))
1093 .addImm(0)
1094 .constrainAllUses(TII, TRI, RBI);
1095 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1096 .addDef(ResVReg)
1097 .addUse(GR.getSPIRVTypeID(ResType))
1098 .addUse(CmpSuccReg)
1099 .addUse(TmpReg)
1100 .addImm(1)
1101 .constrainAllUses(TII, TRI, RBI);
1102 return Result;
1103}
1104
1105static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
1106 switch (SC) {
1107 case SPIRV::StorageClass::Workgroup:
1108 case SPIRV::StorageClass::CrossWorkgroup:
1109 case SPIRV::StorageClass::Function:
1110 return true;
1111 default:
1112 return false;
1113 }
1114}
1115
1116static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
1117 switch (SC) {
1118 case SPIRV::StorageClass::DeviceOnlyINTEL:
1119 case SPIRV::StorageClass::HostOnlyINTEL:
1120 return true;
1121 default:
1122 return false;
1123 }
1124}
1125
1126// In SPIR-V address space casting can only happen to and from the Generic
1127// storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
1128// pointers to and from Generic pointers. As such, we can convert e.g. from
1129// Workgroup to Function by going via a Generic pointer as an intermediary. All
1130// other combinations can only be done by a bitcast, and are probably not safe.
1131bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
1132 const SPIRVType *ResType,
1133 MachineInstr &I) const {
1134 // If the AddrSpaceCast user is single and in OpConstantComposite or
1135 // OpVariable, we should select OpSpecConstantOp.
1136 auto UIs = MRI->use_instructions(ResVReg);
1137 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1138 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1139 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1140 isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
1141 Register NewReg = I.getOperand(1).getReg();
1142 MachineBasicBlock &BB = *I.getParent();
1143 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1144 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
1145 SPIRV::StorageClass::Generic);
1146 bool Result =
1147 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1148 .addDef(ResVReg)
1149 .addUse(GR.getSPIRVTypeID(ResType))
1150 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
1151 .addUse(NewReg)
1152 .constrainAllUses(TII, TRI, RBI);
1153 return Result;
1154 }
1155 Register SrcPtr = I.getOperand(1).getReg();
1156 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1157 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1158 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1159
1160 // don't generate a cast between identical storage classes
1161 if (SrcSC == DstSC)
1162 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1163 TII.get(TargetOpcode::COPY))
1164 .addDef(ResVReg)
1165 .addUse(SrcPtr)
1166 .constrainAllUses(TII, TRI, RBI);
1167
1168 // Casting from an eligible pointer to Generic.
1169 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1170 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1171 // Casting from Generic to an eligible pointer.
1172 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1173 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1174 // Casting between 2 eligible pointers using Generic as an intermediary.
1175 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1176 Register Tmp = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1177 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1178 GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
1179 MachineBasicBlock &BB = *I.getParent();
1180 const DebugLoc &DL = I.getDebugLoc();
1181 bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1182 .addDef(Tmp)
1183 .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1184 .addUse(SrcPtr)
1185 .constrainAllUses(TII, TRI, RBI);
1186 return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1187 .addDef(ResVReg)
1188 .addUse(GR.getSPIRVTypeID(ResType))
1189 .addUse(Tmp)
1190 .constrainAllUses(TII, TRI, RBI);
1191 }
1192
1193 // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1194 // be applied
1195 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1196 return selectUnOp(ResVReg, ResType, I,
1197 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1198 if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1199 return selectUnOp(ResVReg, ResType, I,
1200 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1201 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::Generic)
1202 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1203 if (SrcSC == SPIRV::StorageClass::Generic && isUSMStorageClass(DstSC))
1204 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1205
1206 // Bitcast for pointers requires that the address spaces must match
1207 return false;
1208}
1209
1210static unsigned getFCmpOpcode(unsigned PredNum) {
1211 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1212 switch (Pred) {
1213 case CmpInst::FCMP_OEQ:
1214 return SPIRV::OpFOrdEqual;
1215 case CmpInst::FCMP_OGE:
1216 return SPIRV::OpFOrdGreaterThanEqual;
1217 case CmpInst::FCMP_OGT:
1218 return SPIRV::OpFOrdGreaterThan;
1219 case CmpInst::FCMP_OLE:
1220 return SPIRV::OpFOrdLessThanEqual;
1221 case CmpInst::FCMP_OLT:
1222 return SPIRV::OpFOrdLessThan;
1223 case CmpInst::FCMP_ONE:
1224 return SPIRV::OpFOrdNotEqual;
1225 case CmpInst::FCMP_ORD:
1226 return SPIRV::OpOrdered;
1227 case CmpInst::FCMP_UEQ:
1228 return SPIRV::OpFUnordEqual;
1229 case CmpInst::FCMP_UGE:
1230 return SPIRV::OpFUnordGreaterThanEqual;
1231 case CmpInst::FCMP_UGT:
1232 return SPIRV::OpFUnordGreaterThan;
1233 case CmpInst::FCMP_ULE:
1234 return SPIRV::OpFUnordLessThanEqual;
1235 case CmpInst::FCMP_ULT:
1236 return SPIRV::OpFUnordLessThan;
1237 case CmpInst::FCMP_UNE:
1238 return SPIRV::OpFUnordNotEqual;
1239 case CmpInst::FCMP_UNO:
1240 return SPIRV::OpUnordered;
1241 default:
1242 llvm_unreachable("Unknown predicate type for FCmp");
1243 }
1244}
1245
1246static unsigned getICmpOpcode(unsigned PredNum) {
1247 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1248 switch (Pred) {
1249 case CmpInst::ICMP_EQ:
1250 return SPIRV::OpIEqual;
1251 case CmpInst::ICMP_NE:
1252 return SPIRV::OpINotEqual;
1253 case CmpInst::ICMP_SGE:
1254 return SPIRV::OpSGreaterThanEqual;
1255 case CmpInst::ICMP_SGT:
1256 return SPIRV::OpSGreaterThan;
1257 case CmpInst::ICMP_SLE:
1258 return SPIRV::OpSLessThanEqual;
1259 case CmpInst::ICMP_SLT:
1260 return SPIRV::OpSLessThan;
1261 case CmpInst::ICMP_UGE:
1262 return SPIRV::OpUGreaterThanEqual;
1263 case CmpInst::ICMP_UGT:
1264 return SPIRV::OpUGreaterThan;
1265 case CmpInst::ICMP_ULE:
1266 return SPIRV::OpULessThanEqual;
1267 case CmpInst::ICMP_ULT:
1268 return SPIRV::OpULessThan;
1269 default:
1270 llvm_unreachable("Unknown predicate type for ICmp");
1271 }
1272}
1273
1274static unsigned getPtrCmpOpcode(unsigned Pred) {
1275 switch (static_cast<CmpInst::Predicate>(Pred)) {
1276 case CmpInst::ICMP_EQ:
1277 return SPIRV::OpPtrEqual;
1278 case CmpInst::ICMP_NE:
1279 return SPIRV::OpPtrNotEqual;
1280 default:
1281 llvm_unreachable("Unknown predicate type for pointer comparison");
1282 }
1283}
1284
1285// Return the logical operation, or abort if none exists.
1286static unsigned getBoolCmpOpcode(unsigned PredNum) {
1287 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1288 switch (Pred) {
1289 case CmpInst::ICMP_EQ:
1290 return SPIRV::OpLogicalEqual;
1291 case CmpInst::ICMP_NE:
1292 return SPIRV::OpLogicalNotEqual;
1293 default:
1294 llvm_unreachable("Unknown predicate type for Bool comparison");
1295 }
1296}
1297
1298static APFloat getZeroFP(const Type *LLVMFloatTy) {
1299 if (!LLVMFloatTy)
1300 return APFloat::getZero(APFloat::IEEEsingle());
1301 switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1302 case Type::HalfTyID:
1303 return APFloat::getZero(APFloat::IEEEhalf());
1304 default:
1305 case Type::FloatTyID:
1306 return APFloat::getZero(APFloat::IEEEsingle());
1307 case Type::DoubleTyID:
1308 return APFloat::getZero(APFloat::IEEEdouble());
1309 }
1310}
1311
1312static APFloat getOneFP(const Type *LLVMFloatTy) {
1313 if (!LLVMFloatTy)
1314 return APFloat::getOne(APFloat::IEEEsingle());
1315 switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1316 case Type::HalfTyID:
1317 return APFloat::getOne(APFloat::IEEEhalf());
1318 default:
1319 case Type::FloatTyID:
1320 return APFloat::getOne(APFloat::IEEEsingle());
1321 case Type::DoubleTyID:
1322 return APFloat::getOne(APFloat::IEEEdouble());
1323 }
1324}
1325
1326bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
1327 const SPIRVType *ResType,
1328 MachineInstr &I,
1329 unsigned OpAnyOrAll) const {
1330 assert(I.getNumOperands() == 3);
1331 assert(I.getOperand(2).isReg());
1332 MachineBasicBlock &BB = *I.getParent();
1333 Register InputRegister = I.getOperand(2).getReg();
1334 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1335
1336 if (!InputType)
1337 report_fatal_error("Input Type could not be determined.");
1338
1339 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1340 bool IsVectorTy = InputType->getOpcode() == SPIRV::OpTypeVector;
1341 if (IsBoolTy && !IsVectorTy) {
1342 assert(ResVReg == I.getOperand(0).getReg());
1343 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1344 TII.get(TargetOpcode::COPY))
1345 .addDef(ResVReg)
1346 .addUse(InputRegister)
1347 .constrainAllUses(TII, TRI, RBI);
1348 }
1349
1350 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1351 unsigned SpirvNotEqualId =
1352 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1353 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(I, TII);
1354 SPIRVType *SpvBoolTy = SpvBoolScalarTy;
1355 Register NotEqualReg = ResVReg;
1356
1357 if (IsVectorTy) {
1358 NotEqualReg = IsBoolTy ? InputRegister
1359 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1360 const unsigned NumElts = InputType->getOperand(2).getImm();
1361 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
1362 }
1363
1364 if (!IsBoolTy) {
1365 Register ConstZeroReg =
1366 IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
1367
1368 BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
1369 .addDef(NotEqualReg)
1370 .addUse(GR.getSPIRVTypeID(SpvBoolTy))
1371 .addUse(InputRegister)
1372 .addUse(ConstZeroReg)
1373 .constrainAllUses(TII, TRI, RBI);
1374 }
1375
1376 if (!IsVectorTy)
1377 return true;
1378
1379 return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
1380 .addDef(ResVReg)
1381 .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1382 .addUse(NotEqualReg)
1383 .constrainAllUses(TII, TRI, RBI);
1384}
1385
1386bool SPIRVInstructionSelector::selectAll(Register ResVReg,
1387 const SPIRVType *ResType,
1388 MachineInstr &I) const {
1389 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAll);
1390}
1391
1392bool SPIRVInstructionSelector::selectAny(Register ResVReg,
1393 const SPIRVType *ResType,
1394 MachineInstr &I) const {
1395 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAny);
1396}
1397
1398bool SPIRVInstructionSelector::selectFmix(Register ResVReg,
1399 const SPIRVType *ResType,
1400 MachineInstr &I) const {
1401
1402 assert(I.getNumOperands() == 5);
1403 assert(I.getOperand(2).isReg());
1404 assert(I.getOperand(3).isReg());
1405 assert(I.getOperand(4).isReg());
1406 MachineBasicBlock &BB = *I.getParent();
1407
1408 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1409 .addDef(ResVReg)
1410 .addUse(GR.getSPIRVTypeID(ResType))
1411 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1412 .addImm(GL::FMix)
1413 .addUse(I.getOperand(2).getReg())
1414 .addUse(I.getOperand(3).getReg())
1415 .addUse(I.getOperand(4).getReg())
1416 .constrainAllUses(TII, TRI, RBI);
1417}
1418
1419bool SPIRVInstructionSelector::selectLength(Register ResVReg,
1420 const SPIRVType *ResType,
1421 MachineInstr &I) const {
1422
1423 assert(I.getNumOperands() == 3);
1424 assert(I.getOperand(2).isReg());
1425 MachineBasicBlock &BB = *I.getParent();
1426
1427 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1428 .addDef(ResVReg)
1429 .addUse(GR.getSPIRVTypeID(ResType))
1430 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1431 .addImm(GL::Length)
1432 .addUse(I.getOperand(2).getReg())
1433 .constrainAllUses(TII, TRI, RBI);
1434}
1435
1436bool SPIRVInstructionSelector::selectFrac(Register ResVReg,
1437 const SPIRVType *ResType,
1438 MachineInstr &I) const {
1439
1440 assert(I.getNumOperands() == 3);
1441 assert(I.getOperand(2).isReg());
1442 MachineBasicBlock &BB = *I.getParent();
1443
1444 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1445 .addDef(ResVReg)
1446 .addUse(GR.getSPIRVTypeID(ResType))
1447 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1448 .addImm(GL::Fract)
1449 .addUse(I.getOperand(2).getReg())
1450 .constrainAllUses(TII, TRI, RBI);
1451}
1452
1453bool SPIRVInstructionSelector::selectNormalize(Register ResVReg,
1454 const SPIRVType *ResType,
1455 MachineInstr &I) const {
1456
1457 assert(I.getNumOperands() == 3);
1458 assert(I.getOperand(2).isReg());
1459 MachineBasicBlock &BB = *I.getParent();
1460
1461 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1462 .addDef(ResVReg)
1463 .addUse(GR.getSPIRVTypeID(ResType))
1464 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1465 .addImm(GL::Normalize)
1466 .addUse(I.getOperand(2).getReg())
1467 .constrainAllUses(TII, TRI, RBI);
1468}
1469
1470bool SPIRVInstructionSelector::selectRsqrt(Register ResVReg,
1471 const SPIRVType *ResType,
1472 MachineInstr &I) const {
1473
1474 assert(I.getNumOperands() == 3);
1475 assert(I.getOperand(2).isReg());
1476 MachineBasicBlock &BB = *I.getParent();
1477
1478 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1479 .addDef(ResVReg)
1480 .addUse(GR.getSPIRVTypeID(ResType))
1481 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1482 .addImm(GL::InverseSqrt)
1483 .addUse(I.getOperand(2).getReg())
1484 .constrainAllUses(TII, TRI, RBI);
1485}
1486
1487/// Transform saturate(x) to clamp(x, 0.0f, 1.0f) as SPIRV
1488/// does not have a saturate builtin.
1489bool SPIRVInstructionSelector::selectSaturate(Register ResVReg,
1490 const SPIRVType *ResType,
1491 MachineInstr &I) const {
1492 assert(I.getNumOperands() == 3);
1493 assert(I.getOperand(2).isReg());
1494 MachineBasicBlock &BB = *I.getParent();
1495 Register VZero = buildZerosValF(ResType, I);
1496 Register VOne = buildOnesValF(ResType, I);
1497
1498 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1499 .addDef(ResVReg)
1500 .addUse(GR.getSPIRVTypeID(ResType))
1501 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1502 .addImm(GL::FClamp)
1503 .addUse(I.getOperand(2).getReg())
1504 .addUse(VZero)
1505 .addUse(VOne)
1506 .constrainAllUses(TII, TRI, RBI);
1507}
1508
1509bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1510 const SPIRVType *ResType,
1511 MachineInstr &I) const {
1512 MachineBasicBlock &BB = *I.getParent();
1513 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1514 .addDef(ResVReg)
1515 .addUse(GR.getSPIRVTypeID(ResType))
1516 .addUse(I.getOperand(1).getReg())
1517 .constrainAllUses(TII, TRI, RBI);
1518}
1519
1520bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1521 const SPIRVType *ResType,
1522 MachineInstr &I) const {
1523 // There is no way to implement `freeze` correctly without support on SPIR-V
1524 // standard side, but we may at least address a simple (static) case when
1525 // undef/poison value presence is obvious. The main benefit of even
1526 // incomplete `freeze` support is preventing of translation from crashing due
1527 // to lack of support on legalization and instruction selection steps.
1528 if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1529 return false;
1530 Register OpReg = I.getOperand(1).getReg();
1531 if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1532 Register Reg;
1533 switch (Def->getOpcode()) {
1534 case SPIRV::ASSIGN_TYPE:
1535 if (MachineInstr *AssignToDef =
1536 MRI->getVRegDef(Def->getOperand(1).getReg())) {
1537 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1538 Reg = Def->getOperand(2).getReg();
1539 }
1540 break;
1541 case SPIRV::OpUndef:
1542 Reg = Def->getOperand(1).getReg();
1543 break;
1544 }
1545 unsigned DestOpCode;
1546 if (Reg.isValid()) {
1547 DestOpCode = SPIRV::OpConstantNull;
1548 } else {
1549 DestOpCode = TargetOpcode::COPY;
1550 Reg = OpReg;
1551 }
1552 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1553 .addDef(I.getOperand(0).getReg())
1554 .addUse(Reg)
1555 .constrainAllUses(TII, TRI, RBI);
1556 }
1557 return false;
1558}
1559
1561 const SPIRVType *ResType) {
1562 Register OpReg = ResType->getOperand(2).getReg();
1563 SPIRVType *OpDef = MRI->getVRegDef(OpReg);
1564 if (!OpDef)
1565 return 0;
1566 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1567 OpDef->getOperand(1).isReg()) {
1568 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1569 OpDef = RefDef;
1570 }
1571 unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
1572 ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
1573 : 0;
1574 return N;
1575}
1576
1577// Return true if the type represents a constant register
1579 SmallPtrSet<SPIRVType *, 4> &Visited) {
1580 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1581 OpDef->getOperand(1).isReg()) {
1582 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1583 OpDef = RefDef;
1584 }
1585
1586 if (Visited.contains(OpDef))
1587 return true;
1588 Visited.insert(OpDef);
1589
1590 unsigned Opcode = OpDef->getOpcode();
1591 switch (Opcode) {
1592 case TargetOpcode::G_CONSTANT:
1593 case TargetOpcode::G_FCONSTANT:
1594 return true;
1595 case TargetOpcode::G_INTRINSIC:
1596 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1597 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
1598 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
1599 Intrinsic::spv_const_composite;
1600 case TargetOpcode::G_BUILD_VECTOR:
1601 case TargetOpcode::G_SPLAT_VECTOR: {
1602 for (unsigned i = OpDef->getNumExplicitDefs(); i < OpDef->getNumOperands();
1603 i++) {
1604 SPIRVType *OpNestedDef =
1605 OpDef->getOperand(i).isReg()
1606 ? MRI->getVRegDef(OpDef->getOperand(i).getReg())
1607 : nullptr;
1608 if (OpNestedDef && !isConstReg(MRI, OpNestedDef, Visited))
1609 return false;
1610 }
1611 return true;
1612 }
1613 }
1614 return false;
1615}
1616
1617// Return true if the virtual register represents a constant
1620 if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
1621 return isConstReg(MRI, OpDef, Visited);
1622 return false;
1623}
1624
1625bool SPIRVInstructionSelector::selectBuildVector(Register ResVReg,
1626 const SPIRVType *ResType,
1627 MachineInstr &I) const {
1628 unsigned N = 0;
1629 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1630 N = GR.getScalarOrVectorComponentCount(ResType);
1631 else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1632 N = getArrayComponentCount(MRI, ResType);
1633 else
1634 report_fatal_error("Cannot select G_BUILD_VECTOR with a non-vector result");
1635 if (I.getNumExplicitOperands() - I.getNumExplicitDefs() != N)
1636 report_fatal_error("G_BUILD_VECTOR and the result type are inconsistent");
1637
1638 // check if we may construct a constant vector
1639 bool IsConst = true;
1640 for (unsigned i = I.getNumExplicitDefs();
1641 i < I.getNumExplicitOperands() && IsConst; ++i)
1642 if (!isConstReg(MRI, I.getOperand(i).getReg()))
1643 IsConst = false;
1644
1645 if (!IsConst && N < 2)
1647 "There must be at least two constituent operands in a vector");
1648
1649 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1650 TII.get(IsConst ? SPIRV::OpConstantComposite
1651 : SPIRV::OpCompositeConstruct))
1652 .addDef(ResVReg)
1653 .addUse(GR.getSPIRVTypeID(ResType));
1654 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1655 MIB.addUse(I.getOperand(i).getReg());
1656 return MIB.constrainAllUses(TII, TRI, RBI);
1657}
1658
1659bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1660 const SPIRVType *ResType,
1661 MachineInstr &I) const {
1662 unsigned N = 0;
1663 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1664 N = GR.getScalarOrVectorComponentCount(ResType);
1665 else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1666 N = getArrayComponentCount(MRI, ResType);
1667 else
1668 report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1669
1670 unsigned OpIdx = I.getNumExplicitDefs();
1671 if (!I.getOperand(OpIdx).isReg())
1672 report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1673
1674 // check if we may construct a constant vector
1675 Register OpReg = I.getOperand(OpIdx).getReg();
1676 bool IsConst = isConstReg(MRI, OpReg);
1677
1678 if (!IsConst && N < 2)
1680 "There must be at least two constituent operands in a vector");
1681
1682 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1683 TII.get(IsConst ? SPIRV::OpConstantComposite
1684 : SPIRV::OpCompositeConstruct))
1685 .addDef(ResVReg)
1686 .addUse(GR.getSPIRVTypeID(ResType));
1687 for (unsigned i = 0; i < N; ++i)
1688 MIB.addUse(OpReg);
1689 return MIB.constrainAllUses(TII, TRI, RBI);
1690}
1691
1692bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1693 const SPIRVType *ResType,
1694 unsigned CmpOpc,
1695 MachineInstr &I) const {
1696 Register Cmp0 = I.getOperand(2).getReg();
1697 Register Cmp1 = I.getOperand(3).getReg();
1698 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1699 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1700 "CMP operands should have the same type");
1701 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1702 .addDef(ResVReg)
1703 .addUse(GR.getSPIRVTypeID(ResType))
1704 .addUse(Cmp0)
1705 .addUse(Cmp1)
1706 .constrainAllUses(TII, TRI, RBI);
1707}
1708
1709bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1710 const SPIRVType *ResType,
1711 MachineInstr &I) const {
1712 auto Pred = I.getOperand(1).getPredicate();
1713 unsigned CmpOpc;
1714
1715 Register CmpOperand = I.getOperand(2).getReg();
1716 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1717 CmpOpc = getPtrCmpOpcode(Pred);
1718 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1719 CmpOpc = getBoolCmpOpcode(Pred);
1720 else
1721 CmpOpc = getICmpOpcode(Pred);
1722 return selectCmp(ResVReg, ResType, CmpOpc, I);
1723}
1724
1725void SPIRVInstructionSelector::renderFImm64(MachineInstrBuilder &MIB,
1726 const MachineInstr &I,
1727 int OpIdx) const {
1728 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1729 "Expected G_FCONSTANT");
1730 const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1731 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1732}
1733
1734void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1735 const MachineInstr &I,
1736 int OpIdx) const {
1737 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1738 "Expected G_CONSTANT");
1739 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1740}
1741
1743SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1744 const SPIRVType *ResType) const {
1745 Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1746 const SPIRVType *SpvI32Ty =
1747 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1748 // Find a constant in DT or build a new one.
1749 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1750 Register NewReg = GR.find(ConstInt, GR.CurMF);
1751 if (!NewReg.isValid()) {
1752 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1753 GR.add(ConstInt, GR.CurMF, NewReg);
1755 MachineBasicBlock &BB = *I.getParent();
1756 if (Val == 0) {
1757 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1758 .addDef(NewReg)
1759 .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1760 } else {
1761 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1762 .addDef(NewReg)
1763 .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1764 .addImm(APInt(32, Val).getZExtValue());
1765 }
1767 }
1768 return NewReg;
1769}
1770
1771bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1772 const SPIRVType *ResType,
1773 MachineInstr &I) const {
1774 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1775 return selectCmp(ResVReg, ResType, CmpOp, I);
1776}
1777
1778Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1779 MachineInstr &I) const {
1780 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1781 bool ZeroAsNull = STI.isOpenCLEnv();
1782 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1783 return GR.getOrCreateConstVector(0UL, I, ResType, TII, ZeroAsNull);
1784 return GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
1785}
1786
1787Register SPIRVInstructionSelector::buildZerosValF(const SPIRVType *ResType,
1788 MachineInstr &I) const {
1789 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1790 bool ZeroAsNull = STI.isOpenCLEnv();
1791 APFloat VZero = getZeroFP(GR.getTypeForSPIRVType(ResType));
1792 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1793 return GR.getOrCreateConstVector(VZero, I, ResType, TII, ZeroAsNull);
1794 return GR.getOrCreateConstFP(VZero, I, ResType, TII, ZeroAsNull);
1795}
1796
1797Register SPIRVInstructionSelector::buildOnesValF(const SPIRVType *ResType,
1798 MachineInstr &I) const {
1799 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1800 bool ZeroAsNull = STI.isOpenCLEnv();
1801 APFloat VOne = getOneFP(GR.getTypeForSPIRVType(ResType));
1802 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1803 return GR.getOrCreateConstVector(VOne, I, ResType, TII, ZeroAsNull);
1804 return GR.getOrCreateConstFP(VOne, I, ResType, TII, ZeroAsNull);
1805}
1806
1807Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1808 const SPIRVType *ResType,
1809 MachineInstr &I) const {
1810 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1811 APInt One =
1812 AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1813 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1814 return GR.getOrCreateConstVector(One.getZExtValue(), I, ResType, TII);
1815 return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1816}
1817
1818bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1819 const SPIRVType *ResType,
1820 MachineInstr &I,
1821 bool IsSigned) const {
1822 // To extend a bool, we need to use OpSelect between constants.
1823 Register ZeroReg = buildZerosVal(ResType, I);
1824 Register OneReg = buildOnesVal(IsSigned, ResType, I);
1825 bool IsScalarBool =
1826 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1827 unsigned Opcode =
1828 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1829 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1830 .addDef(ResVReg)
1831 .addUse(GR.getSPIRVTypeID(ResType))
1832 .addUse(I.getOperand(1).getReg())
1833 .addUse(OneReg)
1834 .addUse(ZeroReg)
1835 .constrainAllUses(TII, TRI, RBI);
1836}
1837
1838bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1839 const SPIRVType *ResType,
1840 MachineInstr &I, bool IsSigned,
1841 unsigned Opcode) const {
1842 Register SrcReg = I.getOperand(1).getReg();
1843 // We can convert bool value directly to float type without OpConvert*ToF,
1844 // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1845 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1846 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1847 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1848 if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1849 const unsigned NumElts = ResType->getOperand(2).getImm();
1850 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1851 }
1852 SrcReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1853 selectSelect(SrcReg, TmpType, I, false);
1854 }
1855 return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1856}
1857
1858bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1859 const SPIRVType *ResType,
1860 MachineInstr &I, bool IsSigned) const {
1861 Register SrcReg = I.getOperand(1).getReg();
1862 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
1863 return selectSelect(ResVReg, ResType, I, IsSigned);
1864
1865 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
1866 if (SrcType == ResType) {
1867 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(ResVReg);
1868 const TargetRegisterClass *SrcRC = MRI->getRegClassOrNull(SrcReg);
1869 if (DstRC != SrcRC && SrcRC)
1870 MRI->setRegClass(ResVReg, SrcRC);
1871 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1872 TII.get(TargetOpcode::COPY))
1873 .addDef(ResVReg)
1874 .addUse(SrcReg)
1875 .constrainAllUses(TII, TRI, RBI);
1876 }
1877
1878 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1879 return selectUnOp(ResVReg, ResType, I, Opcode);
1880}
1881
1882bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1883 Register ResVReg,
1884 MachineInstr &I,
1885 const SPIRVType *IntTy,
1886 const SPIRVType *BoolTy) const {
1887 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1888 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1889 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1890 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1891 Register Zero = buildZerosVal(IntTy, I);
1892 Register One = buildOnesVal(false, IntTy, I);
1893 MachineBasicBlock &BB = *I.getParent();
1894 BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1895 .addDef(BitIntReg)
1896 .addUse(GR.getSPIRVTypeID(IntTy))
1897 .addUse(IntReg)
1898 .addUse(One)
1899 .constrainAllUses(TII, TRI, RBI);
1900 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1901 .addDef(ResVReg)
1902 .addUse(GR.getSPIRVTypeID(BoolTy))
1903 .addUse(BitIntReg)
1904 .addUse(Zero)
1905 .constrainAllUses(TII, TRI, RBI);
1906}
1907
1908bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1909 const SPIRVType *ResType,
1910 MachineInstr &I) const {
1911 Register IntReg = I.getOperand(1).getReg();
1912 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1913 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
1914 return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1915 if (ArgType == ResType) {
1916 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(ResVReg);
1917 const TargetRegisterClass *SrcRC = MRI->getRegClassOrNull(IntReg);
1918 if (DstRC != SrcRC && SrcRC)
1919 MRI->setRegClass(ResVReg, SrcRC);
1920 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1921 TII.get(TargetOpcode::COPY))
1922 .addDef(ResVReg)
1923 .addUse(IntReg)
1924 .constrainAllUses(TII, TRI, RBI);
1925 }
1926 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1927 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1928 return selectUnOp(ResVReg, ResType, I, Opcode);
1929}
1930
1931bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1932 const SPIRVType *ResType,
1933 const APInt &Imm,
1934 MachineInstr &I) const {
1935 unsigned TyOpcode = ResType->getOpcode();
1936 assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1937 MachineBasicBlock &BB = *I.getParent();
1938 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1939 Imm.isZero())
1940 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1941 .addDef(ResVReg)
1942 .addUse(GR.getSPIRVTypeID(ResType))
1943 .constrainAllUses(TII, TRI, RBI);
1944 if (TyOpcode == SPIRV::OpTypeInt) {
1945 assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1946 Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1947 if (Reg == ResVReg)
1948 return true;
1949 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1950 .addDef(ResVReg)
1951 .addUse(Reg)
1952 .constrainAllUses(TII, TRI, RBI);
1953 }
1954 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1955 .addDef(ResVReg)
1956 .addUse(GR.getSPIRVTypeID(ResType));
1957 // <=32-bit integers should be caught by the sdag pattern.
1958 assert(Imm.getBitWidth() > 32);
1959 addNumImm(Imm, MIB);
1960 return MIB.constrainAllUses(TII, TRI, RBI);
1961}
1962
1963bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1964 const SPIRVType *ResType,
1965 MachineInstr &I) const {
1966 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1967 .addDef(ResVReg)
1968 .addUse(GR.getSPIRVTypeID(ResType))
1969 .constrainAllUses(TII, TRI, RBI);
1970}
1971
1973 assert(MO.isReg());
1974 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1975 if (TypeInst->getOpcode() == SPIRV::ASSIGN_TYPE) {
1976 assert(TypeInst->getOperand(1).isReg());
1977 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1978 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1979 }
1980 return TypeInst->getOpcode() == SPIRV::OpConstantI;
1981}
1982
1983static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1984 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1985 if (TypeInst->getOpcode() == SPIRV::OpConstantI)
1986 return TypeInst->getOperand(2).getImm();
1987 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1988 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1989 return ImmInst->getOperand(1).getCImm()->getZExtValue();
1990}
1991
1992bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1993 const SPIRVType *ResType,
1994 MachineInstr &I) const {
1995 MachineBasicBlock &BB = *I.getParent();
1996 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1997 .addDef(ResVReg)
1998 .addUse(GR.getSPIRVTypeID(ResType))
1999 // object to insert
2000 .addUse(I.getOperand(3).getReg())
2001 // composite to insert into
2002 .addUse(I.getOperand(2).getReg());
2003 for (unsigned i = 4; i < I.getNumOperands(); i++)
2004 MIB.addImm(foldImm(I.getOperand(i), MRI));
2005 return MIB.constrainAllUses(TII, TRI, RBI);
2006}
2007
2008bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
2009 const SPIRVType *ResType,
2010 MachineInstr &I) const {
2011 MachineBasicBlock &BB = *I.getParent();
2012 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2013 .addDef(ResVReg)
2014 .addUse(GR.getSPIRVTypeID(ResType))
2015 .addUse(I.getOperand(2).getReg());
2016 for (unsigned i = 3; i < I.getNumOperands(); i++)
2017 MIB.addImm(foldImm(I.getOperand(i), MRI));
2018 return MIB.constrainAllUses(TII, TRI, RBI);
2019}
2020
2021bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
2022 const SPIRVType *ResType,
2023 MachineInstr &I) const {
2024 if (isImm(I.getOperand(4), MRI))
2025 return selectInsertVal(ResVReg, ResType, I);
2026 MachineBasicBlock &BB = *I.getParent();
2027 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
2028 .addDef(ResVReg)
2029 .addUse(GR.getSPIRVTypeID(ResType))
2030 .addUse(I.getOperand(2).getReg())
2031 .addUse(I.getOperand(3).getReg())
2032 .addUse(I.getOperand(4).getReg())
2033 .constrainAllUses(TII, TRI, RBI);
2034}
2035
2036bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
2037 const SPIRVType *ResType,
2038 MachineInstr &I) const {
2039 if (isImm(I.getOperand(3), MRI))
2040 return selectExtractVal(ResVReg, ResType, I);
2041 MachineBasicBlock &BB = *I.getParent();
2042 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
2043 .addDef(ResVReg)
2044 .addUse(GR.getSPIRVTypeID(ResType))
2045 .addUse(I.getOperand(2).getReg())
2046 .addUse(I.getOperand(3).getReg())
2047 .constrainAllUses(TII, TRI, RBI);
2048}
2049
2050bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
2051 const SPIRVType *ResType,
2052 MachineInstr &I) const {
2053 const bool IsGEPInBounds = I.getOperand(2).getImm();
2054
2055 // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
2056 // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
2057 // we have to use Op[InBounds]AccessChain.
2058 const unsigned Opcode = STI.isVulkanEnv()
2059 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2060 : SPIRV::OpAccessChain)
2061 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
2062 : SPIRV::OpPtrAccessChain);
2063
2064 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
2065 .addDef(ResVReg)
2066 .addUse(GR.getSPIRVTypeID(ResType))
2067 // Object to get a pointer to.
2068 .addUse(I.getOperand(3).getReg());
2069 // Adding indices.
2070 const unsigned StartingIndex =
2071 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2072 ? 5
2073 : 4;
2074 for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
2075 Res.addUse(I.getOperand(i).getReg());
2076 return Res.constrainAllUses(TII, TRI, RBI);
2077}
2078
2079// Maybe wrap a value into OpSpecConstantOp
2080bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2081 MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
2082 bool Result = true;
2083 unsigned Lim = I.getNumExplicitOperands();
2084 for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2085 Register OpReg = I.getOperand(i).getReg();
2086 SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
2087 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
2089 if (!OpDefine || !OpType || isConstReg(MRI, OpDefine, Visited) ||
2090 OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2091 GR.isAggregateType(OpType)) {
2092 // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
2093 // by selectAddrSpaceCast()
2094 CompositeArgs.push_back(OpReg);
2095 continue;
2096 }
2097 MachineFunction *MF = I.getMF();
2098 Register WrapReg = GR.find(OpDefine, MF);
2099 if (WrapReg.isValid()) {
2100 CompositeArgs.push_back(WrapReg);
2101 continue;
2102 }
2103 // Create a new register for the wrapper
2104 WrapReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2105 GR.add(OpDefine, MF, WrapReg);
2106 CompositeArgs.push_back(WrapReg);
2107 // Decorate the wrapper register and generate a new instruction
2108 MRI->setType(WrapReg, LLT::pointer(0, 64));
2109 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
2110 MachineBasicBlock &BB = *I.getParent();
2111 Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
2112 .addDef(WrapReg)
2113 .addUse(GR.getSPIRVTypeID(OpType))
2114 .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
2115 .addUse(OpReg)
2116 .constrainAllUses(TII, TRI, RBI);
2117 if (!Result)
2118 break;
2119 }
2120 return Result;
2121}
2122
2123bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
2124 const SPIRVType *ResType,
2125 MachineInstr &I) const {
2126 MachineBasicBlock &BB = *I.getParent();
2127 Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
2128 switch (IID) {
2129 case Intrinsic::spv_load:
2130 return selectLoad(ResVReg, ResType, I);
2131 case Intrinsic::spv_store:
2132 return selectStore(I);
2133 case Intrinsic::spv_extractv:
2134 return selectExtractVal(ResVReg, ResType, I);
2135 case Intrinsic::spv_insertv:
2136 return selectInsertVal(ResVReg, ResType, I);
2137 case Intrinsic::spv_extractelt:
2138 return selectExtractElt(ResVReg, ResType, I);
2139 case Intrinsic::spv_insertelt:
2140 return selectInsertElt(ResVReg, ResType, I);
2141 case Intrinsic::spv_gep:
2142 return selectGEP(ResVReg, ResType, I);
2143 case Intrinsic::spv_unref_global:
2144 case Intrinsic::spv_init_global: {
2145 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
2146 MachineInstr *Init = I.getNumExplicitOperands() > 2
2147 ? MRI->getVRegDef(I.getOperand(2).getReg())
2148 : nullptr;
2149 assert(MI);
2150 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
2151 }
2152 case Intrinsic::spv_undef: {
2153 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
2154 .addDef(ResVReg)
2155 .addUse(GR.getSPIRVTypeID(ResType));
2156 return MIB.constrainAllUses(TII, TRI, RBI);
2157 }
2158 case Intrinsic::spv_const_composite: {
2159 // If no values are attached, the composite is null constant.
2160 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
2161 // Select a proper instruction.
2162 unsigned Opcode = SPIRV::OpConstantNull;
2163 SmallVector<Register> CompositeArgs;
2164 if (!IsNull) {
2165 Opcode = SPIRV::OpConstantComposite;
2166 if (!wrapIntoSpecConstantOp(I, CompositeArgs))
2167 return false;
2168 }
2169 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2170 .addDef(ResVReg)
2171 .addUse(GR.getSPIRVTypeID(ResType));
2172 // skip type MD node we already used when generated assign.type for this
2173 if (!IsNull) {
2174 for (Register OpReg : CompositeArgs)
2175 MIB.addUse(OpReg);
2176 }
2177 return MIB.constrainAllUses(TII, TRI, RBI);
2178 }
2179 case Intrinsic::spv_assign_name: {
2180 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
2181 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
2182 for (unsigned i = I.getNumExplicitDefs() + 2;
2183 i < I.getNumExplicitOperands(); ++i) {
2184 MIB.addImm(I.getOperand(i).getImm());
2185 }
2186 return MIB.constrainAllUses(TII, TRI, RBI);
2187 }
2188 case Intrinsic::spv_switch: {
2189 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
2190 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
2191 if (I.getOperand(i).isReg())
2192 MIB.addReg(I.getOperand(i).getReg());
2193 else if (I.getOperand(i).isCImm())
2194 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
2195 else if (I.getOperand(i).isMBB())
2196 MIB.addMBB(I.getOperand(i).getMBB());
2197 else
2198 llvm_unreachable("Unexpected OpSwitch operand");
2199 }
2200 return MIB.constrainAllUses(TII, TRI, RBI);
2201 }
2202 case Intrinsic::spv_cmpxchg:
2203 return selectAtomicCmpXchg(ResVReg, ResType, I);
2204 case Intrinsic::spv_unreachable:
2205 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
2206 break;
2207 case Intrinsic::spv_alloca:
2208 return selectFrameIndex(ResVReg, ResType, I);
2209 case Intrinsic::spv_alloca_array:
2210 return selectAllocaArray(ResVReg, ResType, I);
2211 case Intrinsic::spv_assume:
2212 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2213 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
2214 .addUse(I.getOperand(1).getReg());
2215 break;
2216 case Intrinsic::spv_expect:
2217 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2218 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
2219 .addDef(ResVReg)
2220 .addUse(GR.getSPIRVTypeID(ResType))
2221 .addUse(I.getOperand(2).getReg())
2222 .addUse(I.getOperand(3).getReg());
2223 break;
2224 case Intrinsic::spv_thread_id:
2225 return selectSpvThreadId(ResVReg, ResType, I);
2226 case Intrinsic::spv_all:
2227 return selectAll(ResVReg, ResType, I);
2228 case Intrinsic::spv_any:
2229 return selectAny(ResVReg, ResType, I);
2230 case Intrinsic::spv_lerp:
2231 return selectFmix(ResVReg, ResType, I);
2232 case Intrinsic::spv_length:
2233 return selectLength(ResVReg, ResType, I);
2234 case Intrinsic::spv_frac:
2235 return selectFrac(ResVReg, ResType, I);
2236 case Intrinsic::spv_normalize:
2237 return selectNormalize(ResVReg, ResType, I);
2238 case Intrinsic::spv_rsqrt:
2239 return selectRsqrt(ResVReg, ResType, I);
2240 case Intrinsic::spv_lifetime_start:
2241 case Intrinsic::spv_lifetime_end: {
2242 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
2243 : SPIRV::OpLifetimeStop;
2244 int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
2245 Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
2246 if (Size == -1)
2247 Size = 0;
2248 BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
2249 } break;
2250 case Intrinsic::spv_saturate:
2251 return selectSaturate(ResVReg, ResType, I);
2252 default: {
2253 std::string DiagMsg;
2254 raw_string_ostream OS(DiagMsg);
2255 I.print(OS);
2256 DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
2257 report_fatal_error(DiagMsg.c_str(), false);
2258 }
2259 }
2260 return true;
2261}
2262
2263bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
2264 const SPIRVType *ResType,
2265 MachineInstr &I) const {
2266 // there was an allocation size parameter to the allocation instruction
2267 // that is not 1
2268 MachineBasicBlock &BB = *I.getParent();
2269 return BuildMI(BB, I, I.getDebugLoc(),
2270 TII.get(SPIRV::OpVariableLengthArrayINTEL))
2271 .addDef(ResVReg)
2272 .addUse(GR.getSPIRVTypeID(ResType))
2273 .addUse(I.getOperand(2).getReg())
2274 .constrainAllUses(TII, TRI, RBI);
2275}
2276
2277bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
2278 const SPIRVType *ResType,
2279 MachineInstr &I) const {
2280 // Change order of instructions if needed: all OpVariable instructions in a
2281 // function must be the first instructions in the first block
2282 MachineFunction *MF = I.getParent()->getParent();
2283 MachineBasicBlock *MBB = &MF->front();
2284 auto It = MBB->SkipPHIsAndLabels(MBB->begin()), E = MBB->end();
2285 bool IsHeader = false;
2286 unsigned Opcode;
2287 for (; It != E && It != I; ++It) {
2288 Opcode = It->getOpcode();
2289 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
2290 IsHeader = true;
2291 } else if (IsHeader &&
2292 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
2293 ++It;
2294 break;
2295 }
2296 }
2297 return BuildMI(*MBB, It, It->getDebugLoc(), TII.get(SPIRV::OpVariable))
2298 .addDef(ResVReg)
2299 .addUse(GR.getSPIRVTypeID(ResType))
2300 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
2301 .constrainAllUses(TII, TRI, RBI);
2302}
2303
2304bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
2305 // InstructionSelector walks backwards through the instructions. We can use
2306 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
2307 // first, so can generate an OpBranchConditional here. If there is no
2308 // G_BRCOND, we just use OpBranch for a regular unconditional branch.
2309 const MachineInstr *PrevI = I.getPrevNode();
2310 MachineBasicBlock &MBB = *I.getParent();
2311 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
2312 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2313 .addUse(PrevI->getOperand(0).getReg())
2314 .addMBB(PrevI->getOperand(1).getMBB())
2315 .addMBB(I.getOperand(0).getMBB())
2316 .constrainAllUses(TII, TRI, RBI);
2317 }
2318 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
2319 .addMBB(I.getOperand(0).getMBB())
2320 .constrainAllUses(TII, TRI, RBI);
2321}
2322
2323bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
2324 // InstructionSelector walks backwards through the instructions. For an
2325 // explicit conditional branch with no fallthrough, we use both a G_BR and a
2326 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
2327 // generate the OpBranchConditional in selectBranch above.
2328 //
2329 // If an OpBranchConditional has been generated, we simply return, as the work
2330 // is alread done. If there is no OpBranchConditional, LLVM must be relying on
2331 // implicit fallthrough to the next basic block, so we need to create an
2332 // OpBranchConditional with an explicit "false" argument pointing to the next
2333 // basic block that LLVM would fall through to.
2334 const MachineInstr *NextI = I.getNextNode();
2335 // Check if this has already been successfully selected.
2336 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
2337 return true;
2338 // Must be relying on implicit block fallthrough, so generate an
2339 // OpBranchConditional with the "next" basic block as the "false" target.
2340 MachineBasicBlock &MBB = *I.getParent();
2341 unsigned NextMBBNum = MBB.getNextNode()->getNumber();
2342 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
2343 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2344 .addUse(I.getOperand(0).getReg())
2345 .addMBB(I.getOperand(1).getMBB())
2346 .addMBB(NextMBB)
2347 .constrainAllUses(TII, TRI, RBI);
2348}
2349
2350bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
2351 const SPIRVType *ResType,
2352 MachineInstr &I) const {
2353 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
2354 .addDef(ResVReg)
2355 .addUse(GR.getSPIRVTypeID(ResType));
2356 const unsigned NumOps = I.getNumOperands();
2357 for (unsigned i = 1; i < NumOps; i += 2) {
2358 MIB.addUse(I.getOperand(i + 0).getReg());
2359 MIB.addMBB(I.getOperand(i + 1).getMBB());
2360 }
2361 return MIB.constrainAllUses(TII, TRI, RBI);
2362}
2363
2364bool SPIRVInstructionSelector::selectGlobalValue(
2365 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
2366 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
2367 MachineIRBuilder MIRBuilder(I);
2368 const GlobalValue *GV = I.getOperand(1).getGlobal();
2369 Type *GVType = toTypedPointer(GR.getDeducedGlobalValueType(GV));
2370 SPIRVType *PointerBaseType;
2371 if (GVType->isArrayTy()) {
2372 SPIRVType *ArrayElementType =
2373 GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
2374 SPIRV::AccessQualifier::ReadWrite, false);
2375 PointerBaseType = GR.getOrCreateSPIRVArrayType(
2376 ArrayElementType, GVType->getArrayNumElements(), I, TII);
2377 } else {
2378 PointerBaseType = GR.getOrCreateSPIRVType(
2379 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
2380 }
2381 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2382 PointerBaseType, I, TII,
2384
2385 std::string GlobalIdent;
2386 if (!GV->hasName()) {
2387 unsigned &ID = UnnamedGlobalIDs[GV];
2388 if (ID == 0)
2389 ID = UnnamedGlobalIDs.size();
2390 GlobalIdent = "__unnamed_" + Twine(ID).str();
2391 } else {
2392 GlobalIdent = GV->getGlobalIdentifier();
2393 }
2394
2395 // Behaviour of functions as operands depends on availability of the
2396 // corresponding extension (SPV_INTEL_function_pointers):
2397 // - If there is an extension to operate with functions as operands:
2398 // We create a proper constant operand and evaluate a correct type for a
2399 // function pointer.
2400 // - Without the required extension:
2401 // We have functions as operands in tests with blocks of instruction e.g. in
2402 // transcoding/global_block.ll. These operands are not used and should be
2403 // substituted by zero constants. Their type is expected to be always
2404 // OpTypePointer Function %uchar.
2405 if (isa<Function>(GV)) {
2406 const Constant *ConstVal = GV;
2407 MachineBasicBlock &BB = *I.getParent();
2408 Register NewReg = GR.find(ConstVal, GR.CurMF);
2409 if (!NewReg.isValid()) {
2410 Register NewReg = ResVReg;
2411 GR.add(ConstVal, GR.CurMF, NewReg);
2412 const Function *GVFun =
2413 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2414 ? dyn_cast<Function>(GV)
2415 : nullptr;
2416 if (GVFun) {
2417 // References to a function via function pointers generate virtual
2418 // registers without a definition. We will resolve it later, during
2419 // module analysis stage.
2420 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2421 Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
2422 MRI->setRegClass(FuncVReg, &SPIRV::iIDRegClass);
2424 BuildMI(BB, I, I.getDebugLoc(),
2425 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2426 .addDef(NewReg)
2427 .addUse(GR.getSPIRVTypeID(ResType))
2428 .addUse(FuncVReg);
2429 // mapping the function pointer to the used Function
2430 GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
2431 return MB.constrainAllUses(TII, TRI, RBI);
2432 }
2433 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2434 .addDef(NewReg)
2435 .addUse(GR.getSPIRVTypeID(ResType))
2436 .constrainAllUses(TII, TRI, RBI);
2437 }
2438 assert(NewReg != ResVReg);
2439 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
2440 .addDef(ResVReg)
2441 .addUse(NewReg)
2442 .constrainAllUses(TII, TRI, RBI);
2443 }
2444 auto GlobalVar = cast<GlobalVariable>(GV);
2445 assert(GlobalVar->getName() != "llvm.global.annotations");
2446
2447 bool HasInit = GlobalVar->hasInitializer() &&
2448 !isa<UndefValue>(GlobalVar->getInitializer());
2449 // Skip empty declaration for GVs with initilaizers till we get the decl with
2450 // passed initializer.
2451 if (HasInit && !Init)
2452 return true;
2453
2454 unsigned AddrSpace = GV->getAddressSpace();
2455 SPIRV::StorageClass::StorageClass Storage =
2456 addressSpaceToStorageClass(AddrSpace, STI);
2457 bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
2458 Storage != SPIRV::StorageClass::Function;
2459 SPIRV::LinkageType::LinkageType LnkType =
2461 ? SPIRV::LinkageType::Import
2463 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2464 ? SPIRV::LinkageType::LinkOnceODR
2465 : SPIRV::LinkageType::Export);
2466
2467 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2468 Storage, Init, GlobalVar->isConstant(),
2469 HasLnkTy, LnkType, MIRBuilder, true);
2470 return Reg.isValid();
2471}
2472
2473bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
2474 const SPIRVType *ResType,
2475 MachineInstr &I) const {
2476 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2477 return selectExtInst(ResVReg, ResType, I, CL::log10);
2478 }
2479
2480 // There is no log10 instruction in the GLSL Extended Instruction set, so it
2481 // is implemented as:
2482 // log10(x) = log2(x) * (1 / log2(10))
2483 // = log2(x) * 0.30103
2484
2485 MachineIRBuilder MIRBuilder(I);
2486 MachineBasicBlock &BB = *I.getParent();
2487
2488 // Build log2(x).
2489 Register VarReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
2490 bool Result =
2491 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
2492 .addDef(VarReg)
2493 .addUse(GR.getSPIRVTypeID(ResType))
2494 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2495 .addImm(GL::Log2)
2496 .add(I.getOperand(1))
2497 .constrainAllUses(TII, TRI, RBI);
2498
2499 // Build 0.30103.
2500 assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
2501 ResType->getOpcode() == SPIRV::OpTypeFloat);
2502 // TODO: Add matrix implementation once supported by the HLSL frontend.
2503 const SPIRVType *SpirvScalarType =
2504 ResType->getOpcode() == SPIRV::OpTypeVector
2505 ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
2506 : ResType;
2507 Register ScaleReg =
2508 GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2509
2510 // Multiply log2(x) by 0.30103 to get log10(x) result.
2511 auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
2512 ? SPIRV::OpVectorTimesScalar
2513 : SPIRV::OpFMulS;
2514 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2515 .addDef(ResVReg)
2516 .addUse(GR.getSPIRVTypeID(ResType))
2517 .addUse(VarReg)
2518 .addUse(ScaleReg)
2519 .constrainAllUses(TII, TRI, RBI);
2520
2521 return Result;
2522}
2523
2524bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
2525 const SPIRVType *ResType,
2526 MachineInstr &I) const {
2527 // DX intrinsic: @llvm.dx.thread.id(i32)
2528 // ID Name Description
2529 // 93 ThreadId reads the thread ID
2530
2531 MachineIRBuilder MIRBuilder(I);
2532 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2533 const SPIRVType *Vec3Ty =
2534 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2535 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2536 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2537
2538 // Create new register for GlobalInvocationID builtin variable.
2539 Register NewRegister =
2540 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::iIDRegClass);
2541 MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 64));
2542 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2543
2544 // Build GlobalInvocationID global variable with the necessary decorations.
2545 Register Variable = GR.buildGlobalVariable(
2546 NewRegister, PtrType,
2547 getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
2548 SPIRV::StorageClass::Input, nullptr, true, true,
2549 SPIRV::LinkageType::Import, MIRBuilder, false);
2550
2551 // Create new register for loading value.
2552 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2553 Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2554 MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 64));
2555 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2556
2557 // Load v3uint value from the global variable.
2558 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2559 .addDef(LoadedRegister)
2560 .addUse(GR.getSPIRVTypeID(Vec3Ty))
2561 .addUse(Variable);
2562
2563 // Get Thread ID index. Expecting operand is a constant immediate value,
2564 // wrapped in a type assignment.
2565 assert(I.getOperand(2).isReg());
2566 Register ThreadIdReg = I.getOperand(2).getReg();
2567 SPIRVType *ConstTy = this->MRI->getVRegDef(ThreadIdReg);
2568 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
2569 ConstTy->getOperand(1).isReg());
2570 Register ConstReg = ConstTy->getOperand(1).getReg();
2571 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
2572 assert(Const && Const->getOpcode() == TargetOpcode::G_CONSTANT);
2573 const llvm::APInt &Val = Const->getOperand(1).getCImm()->getValue();
2574 const uint32_t ThreadId = Val.getZExtValue();
2575
2576 // Extract the thread ID from the loaded vector value.
2577 MachineBasicBlock &BB = *I.getParent();
2578 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2579 .addDef(ResVReg)
2580 .addUse(GR.getSPIRVTypeID(ResType))
2581 .addUse(LoadedRegister)
2582 .addImm(ThreadId);
2583 return MIB.constrainAllUses(TII, TRI, RBI);
2584}
2585
2586namespace llvm {
2589 const SPIRVSubtarget &Subtarget,
2590 const RegisterBankInfo &RBI) {
2591 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2592}
2593} // namespace llvm
unsigned const MachineRegisterInfo * MRI
#define Success
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord, const SyncScopeIDs &SSIDs)
static unsigned getPtrCmpOpcode(unsigned Pred)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
raw_pwrite_stream & OS
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
Definition: APFloat.h:999
APInt bitcastToAPInt() const
Definition: APFloat.h:1262
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:990
Class for arbitrary precision integers.
Definition: APInt.h:77
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:211
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1497
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:760
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:763
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:772
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:761
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:762
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:781
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:784
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:771
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:765
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:768
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:769
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:764
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:766
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:785
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:773
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:770
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:767
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const APFloat & getValueAPF() const
Definition: Constants.h:312
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
const Function & getFunction() const
Definition: Function.h:170
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:290
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
Definition: Globals.cpp:153
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:512
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
constexpr bool isScalar() const
Definition: LowLevelType.h:146
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isVector() const
Definition: LowLevelType.h:148
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
constexpr bool isPointer() const
Definition: LowLevelType.h:149
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
constexpr bool isValid() const
Definition: Register.h:116
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:441
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:248
Type * getArrayElementType() const
Definition: Type.h:399
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
Definition: Type.h:56
@ FloatTyID
32-bit floating point type
Definition: Type.h:58
@ DoubleTyID
64-bit floating point type
Definition: Type.h:59
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:136
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:343
bool hasName() const
Definition: Value.h:261
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:54
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:80
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:273
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:218
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:117
Type * toTypedPointer(Type *Ty)
Definition: SPIRVUtils.h:208
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
Definition: SPIRVUtils.cpp:190
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:279
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
Definition: SPIRVUtils.cpp:236
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
#define N