LLVM 20.0.0git
SPIRVInstructionSelector.cpp
Go to the documentation of this file.
1//===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the targeting of the InstructionSelector class for
10// SPIRV.
11// TODO: This should be generated by TableGen.
12//
13//===----------------------------------------------------------------------===//
14
17#include "SPIRV.h"
18#include "SPIRVGlobalRegistry.h"
19#include "SPIRVInstrInfo.h"
21#include "SPIRVRegisterInfo.h"
22#include "SPIRVTargetMachine.h"
23#include "SPIRVUtils.h"
24#include "llvm/ADT/APFloat.h"
32#include "llvm/IR/IntrinsicsSPIRV.h"
33#include "llvm/Support/Debug.h"
34
35namespace {
36
37struct SyncScopeIDs {
38 llvm::SyncScope::ID Work_ItemSSID;
39 llvm::SyncScope::ID WorkGroupSSID;
40 llvm::SyncScope::ID DeviceSSID;
41 llvm::SyncScope::ID AllSVMDevicesSSID;
42 llvm::SyncScope::ID SubGroupSSID;
43
44 SyncScopeIDs() {}
45 SyncScopeIDs(llvm::LLVMContext &Context) {
46 Work_ItemSSID = Context.getOrInsertSyncScopeID("work_item");
47 WorkGroupSSID = Context.getOrInsertSyncScopeID("workgroup");
48 DeviceSSID = Context.getOrInsertSyncScopeID("device");
49 AllSVMDevicesSSID = Context.getOrInsertSyncScopeID("all_svm_devices");
50 SubGroupSSID = Context.getOrInsertSyncScopeID("sub_group");
51 }
52};
53
54} // namespace
55
56#define DEBUG_TYPE "spirv-isel"
57
58using namespace llvm;
59namespace CL = SPIRV::OpenCLExtInst;
60namespace GL = SPIRV::GLSLExtInst;
61
63 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
64
65namespace {
66
67#define GET_GLOBALISEL_PREDICATE_BITSET
68#include "SPIRVGenGlobalISel.inc"
69#undef GET_GLOBALISEL_PREDICATE_BITSET
70
71class SPIRVInstructionSelector : public InstructionSelector {
72 const SPIRVSubtarget &STI;
73 const SPIRVInstrInfo &TII;
75 const RegisterBankInfo &RBI;
78 SyncScopeIDs SSIDs;
79
80 /// We need to keep track of the number we give to anonymous global values to
81 /// generate the same name every time when this is needed.
82 mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
83
84public:
85 SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
86 const SPIRVSubtarget &ST,
87 const RegisterBankInfo &RBI);
88 void setupMF(MachineFunction &MF, GISelKnownBits *KB,
89 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
90 BlockFrequencyInfo *BFI) override;
91 // Common selection code. Instruction-specific selection occurs in spvSelect.
92 bool select(MachineInstr &I) override;
93 static const char *getName() { return DEBUG_TYPE; }
94
95#define GET_GLOBALISEL_PREDICATES_DECL
96#include "SPIRVGenGlobalISel.inc"
97#undef GET_GLOBALISEL_PREDICATES_DECL
98
99#define GET_GLOBALISEL_TEMPORARIES_DECL
100#include "SPIRVGenGlobalISel.inc"
101#undef GET_GLOBALISEL_TEMPORARIES_DECL
102
103private:
104 // tblgen-erated 'select' implementation, used as the initial selector for
105 // the patterns that don't require complex C++.
106 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
107
108 // All instruction-specific selection that didn't happen in "select()".
109 // Is basically a large Switch/Case delegating to all other select method.
110 bool spvSelect(Register ResVReg, const SPIRVType *ResType,
111 MachineInstr &I) const;
112
113 bool selectGlobalValue(Register ResVReg, MachineInstr &I,
114 const MachineInstr *Init = nullptr) const;
115
116 bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
117 MachineInstr &I, Register SrcReg,
118 unsigned Opcode) const;
119 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
120 unsigned Opcode) const;
121
122 bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
123 MachineInstr &I) const;
124
125 bool selectLoad(Register ResVReg, const SPIRVType *ResType,
126 MachineInstr &I) const;
127 bool selectStore(MachineInstr &I) const;
128
129 bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
130 MachineInstr &I) const;
131 bool selectStackRestore(MachineInstr &I) const;
132
133 bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
134
135 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
136 MachineInstr &I, unsigned NewOpcode,
137 unsigned NegateOpcode = 0) const;
138
139 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
140 MachineInstr &I) const;
141
142 bool selectFence(MachineInstr &I) const;
143
144 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
145 MachineInstr &I) const;
146
147 bool selectAnyOrAll(Register ResVReg, const SPIRVType *ResType,
148 MachineInstr &I, unsigned OpType) const;
149
150 bool selectAll(Register ResVReg, const SPIRVType *ResType,
151 MachineInstr &I) const;
152
153 bool selectAny(Register ResVReg, const SPIRVType *ResType,
154 MachineInstr &I) const;
155
156 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
157 MachineInstr &I) const;
158
159 bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
160 MachineInstr &I) const;
161 bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
162 MachineInstr &I) const;
163
164 bool selectCmp(Register ResVReg, const SPIRVType *ResType,
165 unsigned comparisonOpcode, MachineInstr &I) const;
166
167 bool selectICmp(Register ResVReg, const SPIRVType *ResType,
168 MachineInstr &I) const;
169 bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
170 MachineInstr &I) const;
171
172 bool selectFmix(Register ResVReg, const SPIRVType *ResType,
173 MachineInstr &I) const;
174
175 bool selectFrac(Register ResVReg, const SPIRVType *ResType,
176 MachineInstr &I) const;
177
178 bool selectRsqrt(Register ResVReg, const SPIRVType *ResType,
179 MachineInstr &I) const;
180
181 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
182 int OpIdx) const;
183 void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
184 int OpIdx) const;
185
186 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
187 MachineInstr &I) const;
188
189 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
190 bool IsSigned) const;
191 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
192 bool IsSigned, unsigned Opcode) const;
193 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
194 bool IsSigned) const;
195
196 bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
197 MachineInstr &I) const;
198
199 bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
200 const SPIRVType *intTy, const SPIRVType *boolTy) const;
201
202 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
203 MachineInstr &I) const;
204 bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
205 MachineInstr &I) const;
206 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
207 MachineInstr &I) const;
208 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
209 MachineInstr &I) const;
210 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
211 MachineInstr &I) const;
212 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
213 MachineInstr &I) const;
214 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
215 MachineInstr &I) const;
216 bool selectGEP(Register ResVReg, const SPIRVType *ResType,
217 MachineInstr &I) const;
218
219 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
220 MachineInstr &I) const;
221 bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
222 MachineInstr &I) const;
223
224 bool selectBranch(MachineInstr &I) const;
225 bool selectBranchCond(MachineInstr &I) const;
226
227 bool selectPhi(Register ResVReg, const SPIRVType *ResType,
228 MachineInstr &I) const;
229
230 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
231 MachineInstr &I, CL::OpenCLExtInst CLInst) const;
232 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
233 MachineInstr &I, CL::OpenCLExtInst CLInst,
234 GL::GLSLExtInst GLInst) const;
235 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
236 MachineInstr &I, const ExtInstList &ExtInsts) const;
237
238 bool selectLog10(Register ResVReg, const SPIRVType *ResType,
239 MachineInstr &I) const;
240
241 bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
242 MachineInstr &I) const;
243
245
246 Register buildI32Constant(uint32_t Val, MachineInstr &I,
247 const SPIRVType *ResType = nullptr) const;
248
249 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
250 Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
251 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
252 MachineInstr &I) const;
253
254 bool wrapIntoSpecConstantOp(MachineInstr &I,
255 SmallVector<Register> &CompositeArgs) const;
256};
257
258} // end anonymous namespace
259
260#define GET_GLOBALISEL_IMPL
261#include "SPIRVGenGlobalISel.inc"
262#undef GET_GLOBALISEL_IMPL
263
264SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
265 const SPIRVSubtarget &ST,
266 const RegisterBankInfo &RBI)
267 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
268 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
270#include "SPIRVGenGlobalISel.inc"
273#include "SPIRVGenGlobalISel.inc"
275{
276}
277
278void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
279 CodeGenCoverage *CoverageInfo,
281 BlockFrequencyInfo *BFI) {
282 SSIDs = SyncScopeIDs(MF.getFunction().getContext());
283 MRI = &MF.getRegInfo();
284 GR.setCurrentFunc(MF);
285 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
286}
287
288static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
289
290// Defined in SPIRVLegalizerInfo.cpp.
291extern bool isTypeFoldingSupported(unsigned Opcode);
292
293bool SPIRVInstructionSelector::select(MachineInstr &I) {
294 assert(I.getParent() && "Instruction should be in a basic block!");
295 assert(I.getParent()->getParent() && "Instruction should be in a function!");
296
297 Register Opcode = I.getOpcode();
298 // If it's not a GMIR instruction, we've selected it already.
299 if (!isPreISelGenericOpcode(Opcode)) {
300 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
301 Register DstReg = I.getOperand(0).getReg();
302 Register SrcReg = I.getOperand(1).getReg();
303 auto *Def = MRI->getVRegDef(SrcReg);
304 if (isTypeFoldingSupported(Def->getOpcode())) {
305 if (MRI->getType(DstReg).isPointer())
306 MRI->setType(DstReg, LLT::scalar(32));
307 bool Res = selectImpl(I, *CoverageInfo);
308 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
309 if (Res)
310 return Res;
311 }
312 MRI->replaceRegWith(SrcReg, DstReg);
313 I.removeFromParent();
314 return true;
315 } else if (I.getNumDefs() == 1) {
316 // Make all vregs 32 bits (for SPIR-V IDs).
317 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
318 }
320 }
321
322 if (I.getNumOperands() != I.getNumExplicitOperands()) {
323 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
324 return false;
325 }
326
327 // Common code for getting return reg+type, and removing selected instr
328 // from parent occurs here. Instr-specific selection happens in spvSelect().
329 bool HasDefs = I.getNumDefs() > 0;
330 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
331 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
332 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
333 if (spvSelect(ResVReg, ResType, I)) {
334 if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
335 for (unsigned i = 0; i < I.getNumDefs(); ++i)
336 MRI->setType(I.getOperand(i).getReg(), LLT::scalar(32));
337 I.removeFromParent();
338 return true;
339 }
340 return false;
341}
342
343bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
344 const SPIRVType *ResType,
345 MachineInstr &I) const {
346 const unsigned Opcode = I.getOpcode();
347 if (isTypeFoldingSupported(Opcode) && Opcode != TargetOpcode::G_CONSTANT)
348 return selectImpl(I, *CoverageInfo);
349 switch (Opcode) {
350 case TargetOpcode::G_CONSTANT:
351 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
352 I);
353 case TargetOpcode::G_GLOBAL_VALUE:
354 return selectGlobalValue(ResVReg, I);
355 case TargetOpcode::G_IMPLICIT_DEF:
356 return selectOpUndef(ResVReg, ResType, I);
357 case TargetOpcode::G_FREEZE:
358 return selectFreeze(ResVReg, ResType, I);
359
360 case TargetOpcode::G_INTRINSIC:
361 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
362 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
363 return selectIntrinsic(ResVReg, ResType, I);
364 case TargetOpcode::G_BITREVERSE:
365 return selectBitreverse(ResVReg, ResType, I);
366
367 case TargetOpcode::G_BUILD_VECTOR:
368 return selectConstVector(ResVReg, ResType, I);
369 case TargetOpcode::G_SPLAT_VECTOR:
370 return selectSplatVector(ResVReg, ResType, I);
371
372 case TargetOpcode::G_SHUFFLE_VECTOR: {
373 MachineBasicBlock &BB = *I.getParent();
374 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
375 .addDef(ResVReg)
376 .addUse(GR.getSPIRVTypeID(ResType))
377 .addUse(I.getOperand(1).getReg())
378 .addUse(I.getOperand(2).getReg());
379 for (auto V : I.getOperand(3).getShuffleMask())
380 MIB.addImm(V);
381 return MIB.constrainAllUses(TII, TRI, RBI);
382 }
383 case TargetOpcode::G_MEMMOVE:
384 case TargetOpcode::G_MEMCPY:
385 case TargetOpcode::G_MEMSET:
386 return selectMemOperation(ResVReg, I);
387
388 case TargetOpcode::G_ICMP:
389 return selectICmp(ResVReg, ResType, I);
390 case TargetOpcode::G_FCMP:
391 return selectFCmp(ResVReg, ResType, I);
392
393 case TargetOpcode::G_FRAME_INDEX:
394 return selectFrameIndex(ResVReg, ResType, I);
395
396 case TargetOpcode::G_LOAD:
397 return selectLoad(ResVReg, ResType, I);
398 case TargetOpcode::G_STORE:
399 return selectStore(I);
400
401 case TargetOpcode::G_BR:
402 return selectBranch(I);
403 case TargetOpcode::G_BRCOND:
404 return selectBranchCond(I);
405
406 case TargetOpcode::G_PHI:
407 return selectPhi(ResVReg, ResType, I);
408
409 case TargetOpcode::G_FPTOSI:
410 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
411 case TargetOpcode::G_FPTOUI:
412 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
413
414 case TargetOpcode::G_SITOFP:
415 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
416 case TargetOpcode::G_UITOFP:
417 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
418
419 case TargetOpcode::G_CTPOP:
420 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
421 case TargetOpcode::G_SMIN:
422 return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
423 case TargetOpcode::G_UMIN:
424 return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
425
426 case TargetOpcode::G_SMAX:
427 return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
428 case TargetOpcode::G_UMAX:
429 return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
430
431 case TargetOpcode::G_FMA:
432 return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
433
434 case TargetOpcode::G_FPOW:
435 return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
436 case TargetOpcode::G_FPOWI:
437 return selectExtInst(ResVReg, ResType, I, CL::pown);
438
439 case TargetOpcode::G_FEXP:
440 return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
441 case TargetOpcode::G_FEXP2:
442 return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
443
444 case TargetOpcode::G_FLOG:
445 return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
446 case TargetOpcode::G_FLOG2:
447 return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
448 case TargetOpcode::G_FLOG10:
449 return selectLog10(ResVReg, ResType, I);
450
451 case TargetOpcode::G_FABS:
452 return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
453 case TargetOpcode::G_ABS:
454 return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
455
456 case TargetOpcode::G_FMINNUM:
457 case TargetOpcode::G_FMINIMUM:
458 return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::NMin);
459 case TargetOpcode::G_FMAXNUM:
460 case TargetOpcode::G_FMAXIMUM:
461 return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::NMax);
462
463 case TargetOpcode::G_FCOPYSIGN:
464 return selectExtInst(ResVReg, ResType, I, CL::copysign);
465
466 case TargetOpcode::G_FCEIL:
467 return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
468 case TargetOpcode::G_FFLOOR:
469 return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
470
471 case TargetOpcode::G_FCOS:
472 return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
473 case TargetOpcode::G_FSIN:
474 return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
475 case TargetOpcode::G_FTAN:
476 return selectExtInst(ResVReg, ResType, I, CL::tan, GL::Tan);
477 case TargetOpcode::G_FACOS:
478 return selectExtInst(ResVReg, ResType, I, CL::acos, GL::Acos);
479 case TargetOpcode::G_FASIN:
480 return selectExtInst(ResVReg, ResType, I, CL::asin, GL::Asin);
481 case TargetOpcode::G_FATAN:
482 return selectExtInst(ResVReg, ResType, I, CL::atan, GL::Atan);
483 case TargetOpcode::G_FCOSH:
484 return selectExtInst(ResVReg, ResType, I, CL::cosh, GL::Cosh);
485 case TargetOpcode::G_FSINH:
486 return selectExtInst(ResVReg, ResType, I, CL::sinh, GL::Sinh);
487 case TargetOpcode::G_FTANH:
488 return selectExtInst(ResVReg, ResType, I, CL::tanh, GL::Tanh);
489
490 case TargetOpcode::G_FSQRT:
491 return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
492
493 case TargetOpcode::G_CTTZ:
494 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
495 return selectExtInst(ResVReg, ResType, I, CL::ctz);
496 case TargetOpcode::G_CTLZ:
497 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
498 return selectExtInst(ResVReg, ResType, I, CL::clz);
499
500 case TargetOpcode::G_INTRINSIC_ROUND:
501 return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
502 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
503 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
504 case TargetOpcode::G_INTRINSIC_TRUNC:
505 return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
506 case TargetOpcode::G_FRINT:
507 case TargetOpcode::G_FNEARBYINT:
508 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
509
510 case TargetOpcode::G_SMULH:
511 return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
512 case TargetOpcode::G_UMULH:
513 return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
514
515 case TargetOpcode::G_SADDSAT:
516 return selectExtInst(ResVReg, ResType, I, CL::s_add_sat);
517 case TargetOpcode::G_UADDSAT:
518 return selectExtInst(ResVReg, ResType, I, CL::u_add_sat);
519 case TargetOpcode::G_SSUBSAT:
520 return selectExtInst(ResVReg, ResType, I, CL::s_sub_sat);
521 case TargetOpcode::G_USUBSAT:
522 return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat);
523
524 case TargetOpcode::G_SEXT:
525 return selectExt(ResVReg, ResType, I, true);
526 case TargetOpcode::G_ANYEXT:
527 case TargetOpcode::G_ZEXT:
528 return selectExt(ResVReg, ResType, I, false);
529 case TargetOpcode::G_TRUNC:
530 return selectTrunc(ResVReg, ResType, I);
531 case TargetOpcode::G_FPTRUNC:
532 case TargetOpcode::G_FPEXT:
533 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
534
535 case TargetOpcode::G_PTRTOINT:
536 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
537 case TargetOpcode::G_INTTOPTR:
538 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
539 case TargetOpcode::G_BITCAST:
540 return selectBitcast(ResVReg, ResType, I);
541 case TargetOpcode::G_ADDRSPACE_CAST:
542 return selectAddrSpaceCast(ResVReg, ResType, I);
543 case TargetOpcode::G_PTR_ADD: {
544 // Currently, we get G_PTR_ADD only as a result of translating
545 // global variables, initialized with constant expressions like GV + Const
546 // (see test opencl/basic/progvar_prog_scope_init.ll).
547 // TODO: extend the handler once we have other cases.
548 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
549 Register GV = I.getOperand(1).getReg();
550 MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
551 (void)II;
552 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
553 (*II).getOpcode() == TargetOpcode::COPY ||
554 (*II).getOpcode() == SPIRV::OpVariable) &&
555 isImm(I.getOperand(2), MRI));
556 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
557 MachineBasicBlock &BB = *I.getParent();
558 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
559 .addDef(ResVReg)
560 .addUse(GR.getSPIRVTypeID(ResType))
561 .addImm(static_cast<uint32_t>(
562 SPIRV::Opcode::InBoundsPtrAccessChain))
563 .addUse(GV)
564 .addUse(Idx)
565 .addUse(I.getOperand(2).getReg());
566 return MIB.constrainAllUses(TII, TRI, RBI);
567 }
568
569 case TargetOpcode::G_ATOMICRMW_OR:
570 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
571 case TargetOpcode::G_ATOMICRMW_ADD:
572 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
573 case TargetOpcode::G_ATOMICRMW_AND:
574 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
575 case TargetOpcode::G_ATOMICRMW_MAX:
576 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
577 case TargetOpcode::G_ATOMICRMW_MIN:
578 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
579 case TargetOpcode::G_ATOMICRMW_SUB:
580 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
581 case TargetOpcode::G_ATOMICRMW_XOR:
582 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
583 case TargetOpcode::G_ATOMICRMW_UMAX:
584 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
585 case TargetOpcode::G_ATOMICRMW_UMIN:
586 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
587 case TargetOpcode::G_ATOMICRMW_XCHG:
588 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
589 case TargetOpcode::G_ATOMIC_CMPXCHG:
590 return selectAtomicCmpXchg(ResVReg, ResType, I);
591
592 case TargetOpcode::G_ATOMICRMW_FADD:
593 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
594 case TargetOpcode::G_ATOMICRMW_FSUB:
595 // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
596 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
597 SPIRV::OpFNegate);
598 case TargetOpcode::G_ATOMICRMW_FMIN:
599 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
600 case TargetOpcode::G_ATOMICRMW_FMAX:
601 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
602
603 case TargetOpcode::G_FENCE:
604 return selectFence(I);
605
606 case TargetOpcode::G_STACKSAVE:
607 return selectStackSave(ResVReg, ResType, I);
608 case TargetOpcode::G_STACKRESTORE:
609 return selectStackRestore(I);
610
611 case TargetOpcode::G_UNMERGE_VALUES:
612 return selectUnmergeValues(I);
613
614 default:
615 return false;
616 }
617}
618
619bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
620 const SPIRVType *ResType,
622 CL::OpenCLExtInst CLInst) const {
623 return selectExtInst(ResVReg, ResType, I,
624 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
625}
626
627bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
628 const SPIRVType *ResType,
630 CL::OpenCLExtInst CLInst,
631 GL::GLSLExtInst GLInst) const {
632 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
633 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
634 return selectExtInst(ResVReg, ResType, I, ExtInsts);
635}
636
637bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
638 const SPIRVType *ResType,
640 const ExtInstList &Insts) const {
641
642 for (const auto &Ex : Insts) {
643 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
644 uint32_t Opcode = Ex.second;
645 if (STI.canUseExtInstSet(Set)) {
646 MachineBasicBlock &BB = *I.getParent();
647 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
648 .addDef(ResVReg)
649 .addUse(GR.getSPIRVTypeID(ResType))
650 .addImm(static_cast<uint32_t>(Set))
651 .addImm(Opcode);
652 const unsigned NumOps = I.getNumOperands();
653 for (unsigned i = 1; i < NumOps; ++i)
654 MIB.add(I.getOperand(i));
655 return MIB.constrainAllUses(TII, TRI, RBI);
656 }
657 }
658 return false;
659}
660
661bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
662 const SPIRVType *ResType,
664 Register SrcReg,
665 unsigned Opcode) const {
666 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
667 .addDef(ResVReg)
668 .addUse(GR.getSPIRVTypeID(ResType))
669 .addUse(SrcReg)
670 .constrainAllUses(TII, TRI, RBI);
671}
672
673bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
674 const SPIRVType *ResType,
676 unsigned Opcode) const {
677 if (STI.isOpenCLEnv() && I.getOperand(1).isReg()) {
678 Register SrcReg = I.getOperand(1).getReg();
679 bool IsGV = false;
681 MRI->def_instr_begin(SrcReg);
682 DefIt != MRI->def_instr_end(); DefIt = std::next(DefIt)) {
683 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
684 IsGV = true;
685 break;
686 }
687 }
688 if (IsGV) {
689 uint32_t SpecOpcode = 0;
690 switch (Opcode) {
691 case SPIRV::OpConvertPtrToU:
692 SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
693 break;
694 case SPIRV::OpConvertUToPtr:
695 SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
696 break;
697 }
698 if (SpecOpcode)
699 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
700 TII.get(SPIRV::OpSpecConstantOp))
701 .addDef(ResVReg)
702 .addUse(GR.getSPIRVTypeID(ResType))
703 .addImm(SpecOpcode)
704 .addUse(SrcReg)
705 .constrainAllUses(TII, TRI, RBI);
706 }
707 }
708 return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
709 Opcode);
710}
711
712bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
713 const SPIRVType *ResType,
714 MachineInstr &I) const {
715 Register OpReg = I.getOperand(1).getReg();
716 SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
717 if (!GR.isBitcastCompatible(ResType, OpType))
718 report_fatal_error("incompatible result and operand types in a bitcast");
719 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
720}
721
722static SPIRV::Scope::Scope getScope(SyncScope::ID Ord,
723 const SyncScopeIDs &SSIDs) {
724 if (Ord == SyncScope::SingleThread || Ord == SSIDs.Work_ItemSSID)
725 return SPIRV::Scope::Invocation;
726 else if (Ord == SyncScope::System || Ord == SSIDs.DeviceSSID)
727 return SPIRV::Scope::Device;
728 else if (Ord == SSIDs.WorkGroupSSID)
729 return SPIRV::Scope::Workgroup;
730 else if (Ord == SSIDs.AllSVMDevicesSSID)
731 return SPIRV::Scope::CrossDevice;
732 else if (Ord == SSIDs.SubGroupSSID)
733 return SPIRV::Scope::Subgroup;
734 else
735 // OpenCL approach is: "The functions that do not have memory_scope argument
736 // have the same semantics as the corresponding functions with the
737 // memory_scope argument set to memory_scope_device." See ref.: //
738 // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
739 // In our case if the scope is unknown, assuming that SPIR-V code is to be
740 // consumed in an OpenCL environment, we use the same approach and set the
741 // scope to memory_scope_device.
742 return SPIRV::Scope::Device;
743}
744
746 MachineInstrBuilder &MIB) {
747 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
748 if (MemOp->isVolatile())
749 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
750 if (MemOp->isNonTemporal())
751 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
752 if (MemOp->getAlign().value())
753 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
754
755 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
756 MIB.addImm(SpvMemOp);
757 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
758 MIB.addImm(MemOp->getAlign().value());
759 }
760}
761
763 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
764 if (Flags & MachineMemOperand::Flags::MOVolatile)
765 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
766 if (Flags & MachineMemOperand::Flags::MONonTemporal)
767 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
768
769 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
770 MIB.addImm(SpvMemOp);
771}
772
773bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
774 const SPIRVType *ResType,
775 MachineInstr &I) const {
776 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
777 Register Ptr = I.getOperand(1 + OpOffset).getReg();
778 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
779 .addDef(ResVReg)
780 .addUse(GR.getSPIRVTypeID(ResType))
781 .addUse(Ptr);
782 if (!I.getNumMemOperands()) {
783 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
784 I.getOpcode() ==
785 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
786 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
787 } else {
788 addMemoryOperands(*I.memoperands_begin(), MIB);
789 }
790 return MIB.constrainAllUses(TII, TRI, RBI);
791}
792
793bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
794 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
795 Register StoreVal = I.getOperand(0 + OpOffset).getReg();
796 Register Ptr = I.getOperand(1 + OpOffset).getReg();
797 MachineBasicBlock &BB = *I.getParent();
798 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
799 .addUse(Ptr)
800 .addUse(StoreVal);
801 if (!I.getNumMemOperands()) {
802 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
803 I.getOpcode() ==
804 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
805 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
806 } else {
807 addMemoryOperands(*I.memoperands_begin(), MIB);
808 }
809 return MIB.constrainAllUses(TII, TRI, RBI);
810}
811
812bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
813 const SPIRVType *ResType,
814 MachineInstr &I) const {
815 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
817 "llvm.stacksave intrinsic: this instruction requires the following "
818 "SPIR-V extension: SPV_INTEL_variable_length_array",
819 false);
820 MachineBasicBlock &BB = *I.getParent();
821 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
822 .addDef(ResVReg)
823 .addUse(GR.getSPIRVTypeID(ResType))
824 .constrainAllUses(TII, TRI, RBI);
825}
826
827bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
828 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
830 "llvm.stackrestore intrinsic: this instruction requires the following "
831 "SPIR-V extension: SPV_INTEL_variable_length_array",
832 false);
833 if (!I.getOperand(0).isReg())
834 return false;
835 MachineBasicBlock &BB = *I.getParent();
836 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
837 .addUse(I.getOperand(0).getReg())
838 .constrainAllUses(TII, TRI, RBI);
839}
840
841bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
842 MachineInstr &I) const {
843 MachineBasicBlock &BB = *I.getParent();
844 Register SrcReg = I.getOperand(1).getReg();
845 if (I.getOpcode() == TargetOpcode::G_MEMSET) {
846 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
847 unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
848 unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
849 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
850 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
851 Register Const = GR.getOrCreateConstIntArray(Val, Num, I, ArrTy, TII);
852 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
853 ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
854 // TODO: check if we have such GV, add init, use buildGlobalVariable.
855 Function &CurFunction = GR.CurMF->getFunction();
856 Type *LLVMArrTy =
857 ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
858 // Module takes ownership of the global var.
859 GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
861 Constant::getNullValue(LLVMArrTy));
862 Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
863 GR.add(GV, GR.CurMF, VarReg);
864
865 buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
866 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
867 .addDef(VarReg)
868 .addUse(GR.getSPIRVTypeID(VarTy))
869 .addImm(SPIRV::StorageClass::UniformConstant)
870 .addUse(Const)
871 .constrainAllUses(TII, TRI, RBI);
872 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
873 ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
874 SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
875 selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
876 }
877 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
878 .addUse(I.getOperand(0).getReg())
879 .addUse(SrcReg)
880 .addUse(I.getOperand(2).getReg());
881 if (I.getNumMemOperands())
882 addMemoryOperands(*I.memoperands_begin(), MIB);
883 bool Result = MIB.constrainAllUses(TII, TRI, RBI);
884 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
885 BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
886 .addUse(MIB->getOperand(0).getReg());
887 return Result;
888}
889
890bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
891 const SPIRVType *ResType,
893 unsigned NewOpcode,
894 unsigned NegateOpcode) const {
895 assert(I.hasOneMemOperand());
896 const MachineMemOperand *MemOp = *I.memoperands_begin();
898 static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), SSIDs));
899 Register ScopeReg = buildI32Constant(Scope, I);
900
901 Register Ptr = I.getOperand(1).getReg();
902 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
903 // auto ScSem =
904 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
905 AtomicOrdering AO = MemOp->getSuccessOrdering();
906 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
907 Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
908
909 bool Result = false;
910 Register ValueReg = I.getOperand(2).getReg();
911 if (NegateOpcode != 0) {
912 // Translation with negative value operand is requested
913 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
914 Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
915 ValueReg = TmpReg;
916 }
917
918 Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
919 .addDef(ResVReg)
920 .addUse(GR.getSPIRVTypeID(ResType))
921 .addUse(Ptr)
922 .addUse(ScopeReg)
923 .addUse(MemSemReg)
924 .addUse(ValueReg)
925 .constrainAllUses(TII, TRI, RBI);
926 return Result;
927}
928
929bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
930 unsigned ArgI = I.getNumOperands() - 1;
931 Register SrcReg =
932 I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
933 SPIRVType *DefType =
934 SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
935 if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
937 "cannot select G_UNMERGE_VALUES with a non-vector argument");
938
939 SPIRVType *ScalarType =
940 GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
941 MachineBasicBlock &BB = *I.getParent();
942 bool Res = false;
943 for (unsigned i = 0; i < I.getNumDefs(); ++i) {
944 Register ResVReg = I.getOperand(i).getReg();
945 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
946 if (!ResType) {
947 // There was no "assign type" actions, let's fix this now
948 ResType = ScalarType;
949 MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
950 MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
951 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
952 }
953 auto MIB =
954 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
955 .addDef(ResVReg)
956 .addUse(GR.getSPIRVTypeID(ResType))
957 .addUse(SrcReg)
958 .addImm(static_cast<int64_t>(i));
959 Res |= MIB.constrainAllUses(TII, TRI, RBI);
960 }
961 return Res;
962}
963
964bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
965 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
966 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
967 Register MemSemReg = buildI32Constant(MemSem, I);
968 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
969 uint32_t Scope = static_cast<uint32_t>(getScope(Ord, SSIDs));
970 Register ScopeReg = buildI32Constant(Scope, I);
971 MachineBasicBlock &BB = *I.getParent();
972 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
973 .addUse(ScopeReg)
974 .addUse(MemSemReg)
975 .constrainAllUses(TII, TRI, RBI);
976}
977
978bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
979 const SPIRVType *ResType,
980 MachineInstr &I) const {
981 Register ScopeReg;
982 Register MemSemEqReg;
983 Register MemSemNeqReg;
984 Register Ptr = I.getOperand(2).getReg();
985 if (!isa<GIntrinsic>(I)) {
986 assert(I.hasOneMemOperand());
987 const MachineMemOperand *MemOp = *I.memoperands_begin();
988 unsigned Scope =
989 static_cast<uint32_t>(getScope(MemOp->getSyncScopeID(), SSIDs));
990 ScopeReg = buildI32Constant(Scope, I);
991
992 unsigned ScSem = static_cast<uint32_t>(
993 getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
994 AtomicOrdering AO = MemOp->getSuccessOrdering();
995 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
996 MemSemEqReg = buildI32Constant(MemSemEq, I);
997 AtomicOrdering FO = MemOp->getFailureOrdering();
998 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
999 MemSemNeqReg =
1000 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
1001 } else {
1002 ScopeReg = I.getOperand(5).getReg();
1003 MemSemEqReg = I.getOperand(6).getReg();
1004 MemSemNeqReg = I.getOperand(7).getReg();
1005 }
1006
1007 Register Cmp = I.getOperand(3).getReg();
1008 Register Val = I.getOperand(4).getReg();
1009 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1010 Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1011 const DebugLoc &DL = I.getDebugLoc();
1012 bool Result =
1013 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
1014 .addDef(ACmpRes)
1015 .addUse(GR.getSPIRVTypeID(SpvValTy))
1016 .addUse(Ptr)
1017 .addUse(ScopeReg)
1018 .addUse(MemSemEqReg)
1019 .addUse(MemSemNeqReg)
1020 .addUse(Val)
1021 .addUse(Cmp)
1022 .constrainAllUses(TII, TRI, RBI);
1023 Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1024 SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
1025 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
1026 .addDef(CmpSuccReg)
1027 .addUse(GR.getSPIRVTypeID(BoolTy))
1028 .addUse(ACmpRes)
1029 .addUse(Cmp)
1030 .constrainAllUses(TII, TRI, RBI);
1031 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1032 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1033 .addDef(TmpReg)
1034 .addUse(GR.getSPIRVTypeID(ResType))
1035 .addUse(ACmpRes)
1036 .addUse(GR.getOrCreateUndef(I, ResType, TII))
1037 .addImm(0)
1038 .constrainAllUses(TII, TRI, RBI);
1039 Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1040 .addDef(ResVReg)
1041 .addUse(GR.getSPIRVTypeID(ResType))
1042 .addUse(CmpSuccReg)
1043 .addUse(TmpReg)
1044 .addImm(1)
1045 .constrainAllUses(TII, TRI, RBI);
1046 return Result;
1047}
1048
1049static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
1050 switch (SC) {
1051 case SPIRV::StorageClass::Workgroup:
1052 case SPIRV::StorageClass::CrossWorkgroup:
1053 case SPIRV::StorageClass::Function:
1054 return true;
1055 default:
1056 return false;
1057 }
1058}
1059
1060static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
1061 switch (SC) {
1062 case SPIRV::StorageClass::DeviceOnlyINTEL:
1063 case SPIRV::StorageClass::HostOnlyINTEL:
1064 return true;
1065 default:
1066 return false;
1067 }
1068}
1069
1070// In SPIR-V address space casting can only happen to and from the Generic
1071// storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
1072// pointers to and from Generic pointers. As such, we can convert e.g. from
1073// Workgroup to Function by going via a Generic pointer as an intermediary. All
1074// other combinations can only be done by a bitcast, and are probably not safe.
1075bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
1076 const SPIRVType *ResType,
1077 MachineInstr &I) const {
1078 // If the AddrSpaceCast user is single and in OpConstantComposite or
1079 // OpVariable, we should select OpSpecConstantOp.
1080 auto UIs = MRI->use_instructions(ResVReg);
1081 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1082 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1083 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1084 isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
1085 Register NewReg = I.getOperand(1).getReg();
1086 MachineBasicBlock &BB = *I.getParent();
1087 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1088 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
1089 SPIRV::StorageClass::Generic);
1090 bool Result =
1091 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1092 .addDef(ResVReg)
1093 .addUse(GR.getSPIRVTypeID(ResType))
1094 .addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
1095 .addUse(NewReg)
1096 .constrainAllUses(TII, TRI, RBI);
1097 return Result;
1098 }
1099 Register SrcPtr = I.getOperand(1).getReg();
1100 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1101 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1102 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1103
1104 // don't generate a cast between identical storage classes
1105 if (SrcSC == DstSC)
1106 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1107 TII.get(TargetOpcode::COPY))
1108 .addDef(ResVReg)
1109 .addUse(SrcPtr)
1110 .constrainAllUses(TII, TRI, RBI);
1111
1112 // Casting from an eligible pointer to Generic.
1113 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1114 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1115 // Casting from Generic to an eligible pointer.
1116 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1117 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1118 // Casting between 2 eligible pointers using Generic as an intermediary.
1119 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1120 Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1121 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1122 GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
1123 MachineBasicBlock &BB = *I.getParent();
1124 const DebugLoc &DL = I.getDebugLoc();
1125 bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1126 .addDef(Tmp)
1127 .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1128 .addUse(SrcPtr)
1129 .constrainAllUses(TII, TRI, RBI);
1130 return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1131 .addDef(ResVReg)
1132 .addUse(GR.getSPIRVTypeID(ResType))
1133 .addUse(Tmp)
1134 .constrainAllUses(TII, TRI, RBI);
1135 }
1136
1137 // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1138 // be applied
1139 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1140 return selectUnOp(ResVReg, ResType, I,
1141 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1142 if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1143 return selectUnOp(ResVReg, ResType, I,
1144 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1145 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::Generic)
1146 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1147 if (SrcSC == SPIRV::StorageClass::Generic && isUSMStorageClass(DstSC))
1148 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1149
1150 // Bitcast for pointers requires that the address spaces must match
1151 return false;
1152}
1153
1154static unsigned getFCmpOpcode(unsigned PredNum) {
1155 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1156 switch (Pred) {
1157 case CmpInst::FCMP_OEQ:
1158 return SPIRV::OpFOrdEqual;
1159 case CmpInst::FCMP_OGE:
1160 return SPIRV::OpFOrdGreaterThanEqual;
1161 case CmpInst::FCMP_OGT:
1162 return SPIRV::OpFOrdGreaterThan;
1163 case CmpInst::FCMP_OLE:
1164 return SPIRV::OpFOrdLessThanEqual;
1165 case CmpInst::FCMP_OLT:
1166 return SPIRV::OpFOrdLessThan;
1167 case CmpInst::FCMP_ONE:
1168 return SPIRV::OpFOrdNotEqual;
1169 case CmpInst::FCMP_ORD:
1170 return SPIRV::OpOrdered;
1171 case CmpInst::FCMP_UEQ:
1172 return SPIRV::OpFUnordEqual;
1173 case CmpInst::FCMP_UGE:
1174 return SPIRV::OpFUnordGreaterThanEqual;
1175 case CmpInst::FCMP_UGT:
1176 return SPIRV::OpFUnordGreaterThan;
1177 case CmpInst::FCMP_ULE:
1178 return SPIRV::OpFUnordLessThanEqual;
1179 case CmpInst::FCMP_ULT:
1180 return SPIRV::OpFUnordLessThan;
1181 case CmpInst::FCMP_UNE:
1182 return SPIRV::OpFUnordNotEqual;
1183 case CmpInst::FCMP_UNO:
1184 return SPIRV::OpUnordered;
1185 default:
1186 llvm_unreachable("Unknown predicate type for FCmp");
1187 }
1188}
1189
1190static unsigned getICmpOpcode(unsigned PredNum) {
1191 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1192 switch (Pred) {
1193 case CmpInst::ICMP_EQ:
1194 return SPIRV::OpIEqual;
1195 case CmpInst::ICMP_NE:
1196 return SPIRV::OpINotEqual;
1197 case CmpInst::ICMP_SGE:
1198 return SPIRV::OpSGreaterThanEqual;
1199 case CmpInst::ICMP_SGT:
1200 return SPIRV::OpSGreaterThan;
1201 case CmpInst::ICMP_SLE:
1202 return SPIRV::OpSLessThanEqual;
1203 case CmpInst::ICMP_SLT:
1204 return SPIRV::OpSLessThan;
1205 case CmpInst::ICMP_UGE:
1206 return SPIRV::OpUGreaterThanEqual;
1207 case CmpInst::ICMP_UGT:
1208 return SPIRV::OpUGreaterThan;
1209 case CmpInst::ICMP_ULE:
1210 return SPIRV::OpULessThanEqual;
1211 case CmpInst::ICMP_ULT:
1212 return SPIRV::OpULessThan;
1213 default:
1214 llvm_unreachable("Unknown predicate type for ICmp");
1215 }
1216}
1217
1218static unsigned getPtrCmpOpcode(unsigned Pred) {
1219 switch (static_cast<CmpInst::Predicate>(Pred)) {
1220 case CmpInst::ICMP_EQ:
1221 return SPIRV::OpPtrEqual;
1222 case CmpInst::ICMP_NE:
1223 return SPIRV::OpPtrNotEqual;
1224 default:
1225 llvm_unreachable("Unknown predicate type for pointer comparison");
1226 }
1227}
1228
1229// Return the logical operation, or abort if none exists.
1230static unsigned getBoolCmpOpcode(unsigned PredNum) {
1231 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1232 switch (Pred) {
1233 case CmpInst::ICMP_EQ:
1234 return SPIRV::OpLogicalEqual;
1235 case CmpInst::ICMP_NE:
1236 return SPIRV::OpLogicalNotEqual;
1237 default:
1238 llvm_unreachable("Unknown predicate type for Bool comparison");
1239 }
1240}
1241
1242bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
1243 const SPIRVType *ResType,
1244 MachineInstr &I,
1245 unsigned OpAnyOrAll) const {
1246 assert(I.getNumOperands() == 3);
1247 assert(I.getOperand(2).isReg());
1248 MachineBasicBlock &BB = *I.getParent();
1249 Register InputRegister = I.getOperand(2).getReg();
1250 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1251
1252 if (!InputType)
1253 report_fatal_error("Input Type could not be determined.");
1254
1255 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1256 bool IsVectorTy = InputType->getOpcode() == SPIRV::OpTypeVector;
1257 if (IsBoolTy && !IsVectorTy) {
1258 assert(ResVReg == I.getOperand(0).getReg());
1259 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1260 TII.get(TargetOpcode::COPY))
1261 .addDef(ResVReg)
1262 .addUse(InputRegister)
1263 .constrainAllUses(TII, TRI, RBI);
1264 }
1265
1266 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1267 unsigned SpirvNotEqualId =
1268 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1269 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(I, TII);
1270 SPIRVType *SpvBoolTy = SpvBoolScalarTy;
1271 Register NotEqualReg = ResVReg;
1272
1273 if (IsVectorTy) {
1274 NotEqualReg = IsBoolTy ? InputRegister
1275 : MRI->createVirtualRegister(&SPIRV::IDRegClass);
1276 const unsigned NumElts = InputType->getOperand(2).getImm();
1277 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
1278 }
1279
1280 if (!IsBoolTy) {
1281 Register ConstZeroReg =
1282 IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
1283
1284 BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
1285 .addDef(NotEqualReg)
1286 .addUse(GR.getSPIRVTypeID(SpvBoolTy))
1287 .addUse(InputRegister)
1288 .addUse(ConstZeroReg)
1289 .constrainAllUses(TII, TRI, RBI);
1290 }
1291
1292 if (!IsVectorTy)
1293 return true;
1294
1295 return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
1296 .addDef(ResVReg)
1297 .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1298 .addUse(NotEqualReg)
1299 .constrainAllUses(TII, TRI, RBI);
1300}
1301
1302bool SPIRVInstructionSelector::selectAll(Register ResVReg,
1303 const SPIRVType *ResType,
1304 MachineInstr &I) const {
1305 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAll);
1306}
1307
1308bool SPIRVInstructionSelector::selectAny(Register ResVReg,
1309 const SPIRVType *ResType,
1310 MachineInstr &I) const {
1311 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAny);
1312}
1313
1314bool SPIRVInstructionSelector::selectFmix(Register ResVReg,
1315 const SPIRVType *ResType,
1316 MachineInstr &I) const {
1317
1318 assert(I.getNumOperands() == 5);
1319 assert(I.getOperand(2).isReg());
1320 assert(I.getOperand(3).isReg());
1321 assert(I.getOperand(4).isReg());
1322 MachineBasicBlock &BB = *I.getParent();
1323
1324 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1325 .addDef(ResVReg)
1326 .addUse(GR.getSPIRVTypeID(ResType))
1327 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1328 .addImm(GL::FMix)
1329 .addUse(I.getOperand(2).getReg())
1330 .addUse(I.getOperand(3).getReg())
1331 .addUse(I.getOperand(4).getReg())
1332 .constrainAllUses(TII, TRI, RBI);
1333}
1334
1335bool SPIRVInstructionSelector::selectFrac(Register ResVReg,
1336 const SPIRVType *ResType,
1337 MachineInstr &I) const {
1338
1339 assert(I.getNumOperands() == 3);
1340 assert(I.getOperand(2).isReg());
1341 MachineBasicBlock &BB = *I.getParent();
1342
1343 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1344 .addDef(ResVReg)
1345 .addUse(GR.getSPIRVTypeID(ResType))
1346 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1347 .addImm(GL::Fract)
1348 .addUse(I.getOperand(2).getReg())
1349 .constrainAllUses(TII, TRI, RBI);
1350}
1351
1352bool SPIRVInstructionSelector::selectRsqrt(Register ResVReg,
1353 const SPIRVType *ResType,
1354 MachineInstr &I) const {
1355
1356 assert(I.getNumOperands() == 3);
1357 assert(I.getOperand(2).isReg());
1358 MachineBasicBlock &BB = *I.getParent();
1359
1360 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1361 .addDef(ResVReg)
1362 .addUse(GR.getSPIRVTypeID(ResType))
1363 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1364 .addImm(GL::InverseSqrt)
1365 .addUse(I.getOperand(2).getReg())
1366 .constrainAllUses(TII, TRI, RBI);
1367}
1368
1369bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
1370 const SPIRVType *ResType,
1371 MachineInstr &I) const {
1372 MachineBasicBlock &BB = *I.getParent();
1373 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
1374 .addDef(ResVReg)
1375 .addUse(GR.getSPIRVTypeID(ResType))
1376 .addUse(I.getOperand(1).getReg())
1377 .constrainAllUses(TII, TRI, RBI);
1378}
1379
1380bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
1381 const SPIRVType *ResType,
1382 MachineInstr &I) const {
1383 // There is no way to implement `freeze` correctly without support on SPIR-V
1384 // standard side, but we may at least address a simple (static) case when
1385 // undef/poison value presence is obvious. The main benefit of even
1386 // incomplete `freeze` support is preventing of translation from crashing due
1387 // to lack of support on legalization and instruction selection steps.
1388 if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
1389 return false;
1390 Register OpReg = I.getOperand(1).getReg();
1391 if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
1392 Register Reg;
1393 switch (Def->getOpcode()) {
1394 case SPIRV::ASSIGN_TYPE:
1395 if (MachineInstr *AssignToDef =
1396 MRI->getVRegDef(Def->getOperand(1).getReg())) {
1397 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1398 Reg = Def->getOperand(2).getReg();
1399 }
1400 break;
1401 case SPIRV::OpUndef:
1402 Reg = Def->getOperand(1).getReg();
1403 break;
1404 }
1405 unsigned DestOpCode;
1406 if (Reg.isValid()) {
1407 DestOpCode = SPIRV::OpConstantNull;
1408 } else {
1409 DestOpCode = TargetOpcode::COPY;
1410 Reg = OpReg;
1411 }
1412 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
1413 .addDef(I.getOperand(0).getReg())
1414 .addUse(Reg)
1415 .constrainAllUses(TII, TRI, RBI);
1416 }
1417 return false;
1418}
1419
1420bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
1421 const SPIRVType *ResType,
1422 MachineInstr &I) const {
1423 // TODO: only const case is supported for now.
1424 assert(std::all_of(
1425 I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
1426 if (MO.isDef())
1427 return true;
1428 if (!MO.isReg())
1429 return false;
1430 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1431 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1432 ConstTy->getOperand(1).isReg());
1433 Register ConstReg = ConstTy->getOperand(1).getReg();
1434 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1435 assert(Const);
1436 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1437 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1438 }));
1439
1440 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1441 TII.get(SPIRV::OpConstantComposite))
1442 .addDef(ResVReg)
1443 .addUse(GR.getSPIRVTypeID(ResType));
1444 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
1445 MIB.addUse(I.getOperand(i).getReg());
1446 return MIB.constrainAllUses(TII, TRI, RBI);
1447}
1448
1450 const SPIRVType *ResType) {
1451 Register OpReg = ResType->getOperand(2).getReg();
1452 SPIRVType *OpDef = MRI->getVRegDef(OpReg);
1453 if (!OpDef)
1454 return 0;
1455 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1456 OpDef->getOperand(1).isReg()) {
1457 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1458 OpDef = RefDef;
1459 }
1460 unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
1461 ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
1462 : 0;
1463 return N;
1464}
1465
1466// Return true if the type represents a constant register
1468 SmallPtrSet<SPIRVType *, 4> &Visited) {
1469 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
1470 OpDef->getOperand(1).isReg()) {
1471 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
1472 OpDef = RefDef;
1473 }
1474
1475 if (Visited.contains(OpDef))
1476 return true;
1477 Visited.insert(OpDef);
1478
1479 unsigned Opcode = OpDef->getOpcode();
1480 switch (Opcode) {
1481 case TargetOpcode::G_CONSTANT:
1482 case TargetOpcode::G_FCONSTANT:
1483 return true;
1484 case TargetOpcode::G_INTRINSIC:
1485 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1486 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
1487 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
1488 Intrinsic::spv_const_composite;
1489 case TargetOpcode::G_BUILD_VECTOR:
1490 case TargetOpcode::G_SPLAT_VECTOR: {
1491 for (unsigned i = OpDef->getNumExplicitDefs(); i < OpDef->getNumOperands();
1492 i++) {
1493 SPIRVType *OpNestedDef =
1494 OpDef->getOperand(i).isReg()
1495 ? MRI->getVRegDef(OpDef->getOperand(i).getReg())
1496 : nullptr;
1497 if (OpNestedDef && !isConstReg(MRI, OpNestedDef, Visited))
1498 return false;
1499 }
1500 return true;
1501 }
1502 }
1503 return false;
1504}
1505
1506// Return true if the virtual register represents a constant
1509 if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
1510 return isConstReg(MRI, OpDef, Visited);
1511 return false;
1512}
1513
1514bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
1515 const SPIRVType *ResType,
1516 MachineInstr &I) const {
1517 unsigned N = 0;
1518 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1519 N = GR.getScalarOrVectorComponentCount(ResType);
1520 else if (ResType->getOpcode() == SPIRV::OpTypeArray)
1521 N = getArrayComponentCount(MRI, ResType);
1522 else
1523 report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
1524
1525 unsigned OpIdx = I.getNumExplicitDefs();
1526 if (!I.getOperand(OpIdx).isReg())
1527 report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
1528
1529 // check if we may construct a constant vector
1530 Register OpReg = I.getOperand(OpIdx).getReg();
1531 bool IsConst = isConstReg(MRI, OpReg);
1532
1533 if (!IsConst && N < 2)
1535 "There must be at least two constituent operands in a vector");
1536
1537 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
1538 TII.get(IsConst ? SPIRV::OpConstantComposite
1539 : SPIRV::OpCompositeConstruct))
1540 .addDef(ResVReg)
1541 .addUse(GR.getSPIRVTypeID(ResType));
1542 for (unsigned i = 0; i < N; ++i)
1543 MIB.addUse(OpReg);
1544 return MIB.constrainAllUses(TII, TRI, RBI);
1545}
1546
1547bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
1548 const SPIRVType *ResType,
1549 unsigned CmpOpc,
1550 MachineInstr &I) const {
1551 Register Cmp0 = I.getOperand(2).getReg();
1552 Register Cmp1 = I.getOperand(3).getReg();
1553 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1554 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1555 "CMP operands should have the same type");
1556 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
1557 .addDef(ResVReg)
1558 .addUse(GR.getSPIRVTypeID(ResType))
1559 .addUse(Cmp0)
1560 .addUse(Cmp1)
1561 .constrainAllUses(TII, TRI, RBI);
1562}
1563
1564bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
1565 const SPIRVType *ResType,
1566 MachineInstr &I) const {
1567 auto Pred = I.getOperand(1).getPredicate();
1568 unsigned CmpOpc;
1569
1570 Register CmpOperand = I.getOperand(2).getReg();
1571 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1572 CmpOpc = getPtrCmpOpcode(Pred);
1573 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1574 CmpOpc = getBoolCmpOpcode(Pred);
1575 else
1576 CmpOpc = getICmpOpcode(Pred);
1577 return selectCmp(ResVReg, ResType, CmpOpc, I);
1578}
1579
1580void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
1581 const MachineInstr &I,
1582 int OpIdx) const {
1583 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1584 "Expected G_FCONSTANT");
1585 const ConstantFP *FPImm = I.getOperand(1).getFPImm();
1586 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
1587}
1588
1589void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
1590 const MachineInstr &I,
1591 int OpIdx) const {
1592 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1593 "Expected G_CONSTANT");
1594 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
1595}
1596
1598SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
1599 const SPIRVType *ResType) const {
1600 Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
1601 const SPIRVType *SpvI32Ty =
1602 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
1603 // Find a constant in DT or build a new one.
1604 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1605 Register NewReg = GR.find(ConstInt, GR.CurMF);
1606 if (!NewReg.isValid()) {
1607 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
1608 GR.add(ConstInt, GR.CurMF, NewReg);
1610 MachineBasicBlock &BB = *I.getParent();
1611 if (Val == 0) {
1612 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1613 .addDef(NewReg)
1614 .addUse(GR.getSPIRVTypeID(SpvI32Ty));
1615 } else {
1616 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1617 .addDef(NewReg)
1618 .addUse(GR.getSPIRVTypeID(SpvI32Ty))
1619 .addImm(APInt(32, Val).getZExtValue());
1620 }
1622 }
1623 return NewReg;
1624}
1625
1626bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
1627 const SPIRVType *ResType,
1628 MachineInstr &I) const {
1629 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
1630 return selectCmp(ResVReg, ResType, CmpOp, I);
1631}
1632
1633Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
1634 MachineInstr &I) const {
1635 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1636 bool ZeroAsNull = STI.isOpenCLEnv();
1637 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1638 return GR.getOrCreateConstVector(0UL, I, ResType, TII, ZeroAsNull);
1639 return GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
1640}
1641
1642static APFloat getZeroFP(const Type *LLVMFloatTy) {
1643 if (!LLVMFloatTy)
1644 return APFloat::getZero(APFloat::IEEEsingle());
1645 switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1646 case Type::HalfTyID:
1647 return APFloat::getZero(APFloat::IEEEhalf());
1648 default:
1649 case Type::FloatTyID:
1650 return APFloat::getZero(APFloat::IEEEsingle());
1651 case Type::DoubleTyID:
1652 return APFloat::getZero(APFloat::IEEEdouble());
1653 }
1654}
1655
1656Register SPIRVInstructionSelector::buildZerosValF(const SPIRVType *ResType,
1657 MachineInstr &I) const {
1658 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
1659 bool ZeroAsNull = STI.isOpenCLEnv();
1660 APFloat VZero = getZeroFP(GR.getTypeForSPIRVType(ResType));
1661 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1662 return GR.getOrCreateConstVector(VZero, I, ResType, TII, ZeroAsNull);
1663 return GR.getOrCreateConstFP(VZero, I, ResType, TII, ZeroAsNull);
1664}
1665
1666Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
1667 const SPIRVType *ResType,
1668 MachineInstr &I) const {
1669 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1670 APInt One =
1671 AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
1672 if (ResType->getOpcode() == SPIRV::OpTypeVector)
1673 return GR.getOrCreateConstVector(One.getZExtValue(), I, ResType, TII);
1674 return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
1675}
1676
1677bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
1678 const SPIRVType *ResType,
1679 MachineInstr &I,
1680 bool IsSigned) const {
1681 // To extend a bool, we need to use OpSelect between constants.
1682 Register ZeroReg = buildZerosVal(ResType, I);
1683 Register OneReg = buildOnesVal(IsSigned, ResType, I);
1684 bool IsScalarBool =
1685 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1686 unsigned Opcode =
1687 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1688 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1689 .addDef(ResVReg)
1690 .addUse(GR.getSPIRVTypeID(ResType))
1691 .addUse(I.getOperand(1).getReg())
1692 .addUse(OneReg)
1693 .addUse(ZeroReg)
1694 .constrainAllUses(TII, TRI, RBI);
1695}
1696
1697bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
1698 const SPIRVType *ResType,
1699 MachineInstr &I, bool IsSigned,
1700 unsigned Opcode) const {
1701 Register SrcReg = I.getOperand(1).getReg();
1702 // We can convert bool value directly to float type without OpConvert*ToF,
1703 // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
1704 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1705 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1706 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
1707 if (ResType->getOpcode() == SPIRV::OpTypeVector) {
1708 const unsigned NumElts = ResType->getOperand(2).getImm();
1709 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
1710 }
1711 SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1712 selectSelect(SrcReg, TmpType, I, false);
1713 }
1714 return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
1715}
1716
1717bool SPIRVInstructionSelector::selectExt(Register ResVReg,
1718 const SPIRVType *ResType,
1719 MachineInstr &I, bool IsSigned) const {
1720 Register SrcReg = I.getOperand(1).getReg();
1721 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
1722 return selectSelect(ResVReg, ResType, I, IsSigned);
1723
1724 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
1725 if (SrcType == ResType)
1726 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1727 TII.get(TargetOpcode::COPY))
1728 .addDef(ResVReg)
1729 .addUse(SrcReg)
1730 .constrainAllUses(TII, TRI, RBI);
1731
1732 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1733 return selectUnOp(ResVReg, ResType, I, Opcode);
1734}
1735
1736bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
1737 Register ResVReg,
1738 MachineInstr &I,
1739 const SPIRVType *IntTy,
1740 const SPIRVType *BoolTy) const {
1741 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
1742 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1743 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
1744 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1745 Register Zero = buildZerosVal(IntTy, I);
1746 Register One = buildOnesVal(false, IntTy, I);
1747 MachineBasicBlock &BB = *I.getParent();
1748 BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
1749 .addDef(BitIntReg)
1750 .addUse(GR.getSPIRVTypeID(IntTy))
1751 .addUse(IntReg)
1752 .addUse(One)
1753 .constrainAllUses(TII, TRI, RBI);
1754 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1755 .addDef(ResVReg)
1756 .addUse(GR.getSPIRVTypeID(BoolTy))
1757 .addUse(BitIntReg)
1758 .addUse(Zero)
1759 .constrainAllUses(TII, TRI, RBI);
1760}
1761
1762bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
1763 const SPIRVType *ResType,
1764 MachineInstr &I) const {
1765 Register IntReg = I.getOperand(1).getReg();
1766 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1767 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
1768 return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
1769 if (ArgType == ResType)
1770 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1771 TII.get(TargetOpcode::COPY))
1772 .addDef(ResVReg)
1773 .addUse(IntReg)
1774 .constrainAllUses(TII, TRI, RBI);
1775 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1776 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1777 return selectUnOp(ResVReg, ResType, I, Opcode);
1778}
1779
1780bool SPIRVInstructionSelector::selectConst(Register ResVReg,
1781 const SPIRVType *ResType,
1782 const APInt &Imm,
1783 MachineInstr &I) const {
1784 unsigned TyOpcode = ResType->getOpcode();
1785 assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
1786 MachineBasicBlock &BB = *I.getParent();
1787 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1788 Imm.isZero())
1789 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
1790 .addDef(ResVReg)
1791 .addUse(GR.getSPIRVTypeID(ResType))
1792 .constrainAllUses(TII, TRI, RBI);
1793 if (TyOpcode == SPIRV::OpTypeInt) {
1794 assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
1795 Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
1796 if (Reg == ResVReg)
1797 return true;
1798 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
1799 .addDef(ResVReg)
1800 .addUse(Reg)
1801 .constrainAllUses(TII, TRI, RBI);
1802 }
1803 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
1804 .addDef(ResVReg)
1805 .addUse(GR.getSPIRVTypeID(ResType));
1806 // <=32-bit integers should be caught by the sdag pattern.
1807 assert(Imm.getBitWidth() > 32);
1808 addNumImm(Imm, MIB);
1809 return MIB.constrainAllUses(TII, TRI, RBI);
1810}
1811
1812bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
1813 const SPIRVType *ResType,
1814 MachineInstr &I) const {
1815 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
1816 .addDef(ResVReg)
1817 .addUse(GR.getSPIRVTypeID(ResType))
1818 .constrainAllUses(TII, TRI, RBI);
1819}
1820
1822 assert(MO.isReg());
1823 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1824 if (TypeInst->getOpcode() == SPIRV::ASSIGN_TYPE) {
1825 assert(TypeInst->getOperand(1).isReg());
1826 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1827 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
1828 }
1829 return TypeInst->getOpcode() == SPIRV::OpConstantI;
1830}
1831
1832static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
1833 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
1834 if (TypeInst->getOpcode() == SPIRV::OpConstantI)
1835 return TypeInst->getOperand(2).getImm();
1836 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
1837 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
1838 return ImmInst->getOperand(1).getCImm()->getZExtValue();
1839}
1840
1841bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
1842 const SPIRVType *ResType,
1843 MachineInstr &I) const {
1844 MachineBasicBlock &BB = *I.getParent();
1845 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
1846 .addDef(ResVReg)
1847 .addUse(GR.getSPIRVTypeID(ResType))
1848 // object to insert
1849 .addUse(I.getOperand(3).getReg())
1850 // composite to insert into
1851 .addUse(I.getOperand(2).getReg());
1852 for (unsigned i = 4; i < I.getNumOperands(); i++)
1853 MIB.addImm(foldImm(I.getOperand(i), MRI));
1854 return MIB.constrainAllUses(TII, TRI, RBI);
1855}
1856
1857bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
1858 const SPIRVType *ResType,
1859 MachineInstr &I) const {
1860 MachineBasicBlock &BB = *I.getParent();
1861 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1862 .addDef(ResVReg)
1863 .addUse(GR.getSPIRVTypeID(ResType))
1864 .addUse(I.getOperand(2).getReg());
1865 for (unsigned i = 3; i < I.getNumOperands(); i++)
1866 MIB.addImm(foldImm(I.getOperand(i), MRI));
1867 return MIB.constrainAllUses(TII, TRI, RBI);
1868}
1869
1870bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
1871 const SPIRVType *ResType,
1872 MachineInstr &I) const {
1873 if (isImm(I.getOperand(4), MRI))
1874 return selectInsertVal(ResVReg, ResType, I);
1875 MachineBasicBlock &BB = *I.getParent();
1876 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
1877 .addDef(ResVReg)
1878 .addUse(GR.getSPIRVTypeID(ResType))
1879 .addUse(I.getOperand(2).getReg())
1880 .addUse(I.getOperand(3).getReg())
1881 .addUse(I.getOperand(4).getReg())
1882 .constrainAllUses(TII, TRI, RBI);
1883}
1884
1885bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
1886 const SPIRVType *ResType,
1887 MachineInstr &I) const {
1888 if (isImm(I.getOperand(3), MRI))
1889 return selectExtractVal(ResVReg, ResType, I);
1890 MachineBasicBlock &BB = *I.getParent();
1891 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
1892 .addDef(ResVReg)
1893 .addUse(GR.getSPIRVTypeID(ResType))
1894 .addUse(I.getOperand(2).getReg())
1895 .addUse(I.getOperand(3).getReg())
1896 .constrainAllUses(TII, TRI, RBI);
1897}
1898
1899bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
1900 const SPIRVType *ResType,
1901 MachineInstr &I) const {
1902 const bool IsGEPInBounds = I.getOperand(2).getImm();
1903
1904 // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
1905 // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
1906 // we have to use Op[InBounds]AccessChain.
1907 const unsigned Opcode = STI.isVulkanEnv()
1908 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1909 : SPIRV::OpAccessChain)
1910 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1911 : SPIRV::OpPtrAccessChain);
1912
1913 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
1914 .addDef(ResVReg)
1915 .addUse(GR.getSPIRVTypeID(ResType))
1916 // Object to get a pointer to.
1917 .addUse(I.getOperand(3).getReg());
1918 // Adding indices.
1919 const unsigned StartingIndex =
1920 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1921 ? 5
1922 : 4;
1923 for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
1924 Res.addUse(I.getOperand(i).getReg());
1925 return Res.constrainAllUses(TII, TRI, RBI);
1926}
1927
1928// Maybe wrap a value into OpSpecConstantOp
1929bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
1930 MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
1931 bool Result = true;
1932 unsigned Lim = I.getNumExplicitOperands();
1933 for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
1934 Register OpReg = I.getOperand(i).getReg();
1935 SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
1936 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
1938 if (!OpDefine || !OpType || isConstReg(MRI, OpDefine, Visited) ||
1939 OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
1940 GR.isAggregateType(OpType)) {
1941 // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
1942 // by selectAddrSpaceCast()
1943 CompositeArgs.push_back(OpReg);
1944 continue;
1945 }
1946 MachineFunction *MF = I.getMF();
1947 Register WrapReg = GR.find(OpDefine, MF);
1948 if (WrapReg.isValid()) {
1949 CompositeArgs.push_back(WrapReg);
1950 continue;
1951 }
1952 // Create a new register for the wrapper
1953 WrapReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1954 GR.add(OpDefine, MF, WrapReg);
1955 CompositeArgs.push_back(WrapReg);
1956 // Decorate the wrapper register and generate a new instruction
1957 MRI->setType(WrapReg, LLT::pointer(0, 32));
1958 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
1959 MachineBasicBlock &BB = *I.getParent();
1960 Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
1961 .addDef(WrapReg)
1962 .addUse(GR.getSPIRVTypeID(OpType))
1963 .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
1964 .addUse(OpReg)
1965 .constrainAllUses(TII, TRI, RBI);
1966 if (!Result)
1967 break;
1968 }
1969 return Result;
1970}
1971
1972bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
1973 const SPIRVType *ResType,
1974 MachineInstr &I) const {
1975 MachineBasicBlock &BB = *I.getParent();
1976 Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
1977 switch (IID) {
1978 case Intrinsic::spv_load:
1979 return selectLoad(ResVReg, ResType, I);
1980 case Intrinsic::spv_store:
1981 return selectStore(I);
1982 case Intrinsic::spv_extractv:
1983 return selectExtractVal(ResVReg, ResType, I);
1984 case Intrinsic::spv_insertv:
1985 return selectInsertVal(ResVReg, ResType, I);
1986 case Intrinsic::spv_extractelt:
1987 return selectExtractElt(ResVReg, ResType, I);
1988 case Intrinsic::spv_insertelt:
1989 return selectInsertElt(ResVReg, ResType, I);
1990 case Intrinsic::spv_gep:
1991 return selectGEP(ResVReg, ResType, I);
1992 case Intrinsic::spv_unref_global:
1993 case Intrinsic::spv_init_global: {
1994 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
1995 MachineInstr *Init = I.getNumExplicitOperands() > 2
1996 ? MRI->getVRegDef(I.getOperand(2).getReg())
1997 : nullptr;
1998 assert(MI);
1999 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
2000 }
2001 case Intrinsic::spv_undef: {
2002 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
2003 .addDef(ResVReg)
2004 .addUse(GR.getSPIRVTypeID(ResType));
2005 return MIB.constrainAllUses(TII, TRI, RBI);
2006 }
2007 case Intrinsic::spv_const_composite: {
2008 // If no values are attached, the composite is null constant.
2009 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
2010 // Select a proper instruction.
2011 unsigned Opcode = SPIRV::OpConstantNull;
2012 SmallVector<Register> CompositeArgs;
2013 if (!IsNull) {
2014 Opcode = SPIRV::OpConstantComposite;
2015 if (!wrapIntoSpecConstantOp(I, CompositeArgs))
2016 return false;
2017 }
2018 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2019 .addDef(ResVReg)
2020 .addUse(GR.getSPIRVTypeID(ResType));
2021 // skip type MD node we already used when generated assign.type for this
2022 if (!IsNull) {
2023 for (Register OpReg : CompositeArgs)
2024 MIB.addUse(OpReg);
2025 }
2026 return MIB.constrainAllUses(TII, TRI, RBI);
2027 }
2028 case Intrinsic::spv_assign_name: {
2029 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
2030 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
2031 for (unsigned i = I.getNumExplicitDefs() + 2;
2032 i < I.getNumExplicitOperands(); ++i) {
2033 MIB.addImm(I.getOperand(i).getImm());
2034 }
2035 return MIB.constrainAllUses(TII, TRI, RBI);
2036 }
2037 case Intrinsic::spv_switch: {
2038 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
2039 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
2040 if (I.getOperand(i).isReg())
2041 MIB.addReg(I.getOperand(i).getReg());
2042 else if (I.getOperand(i).isCImm())
2043 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
2044 else if (I.getOperand(i).isMBB())
2045 MIB.addMBB(I.getOperand(i).getMBB());
2046 else
2047 llvm_unreachable("Unexpected OpSwitch operand");
2048 }
2049 return MIB.constrainAllUses(TII, TRI, RBI);
2050 }
2051 case Intrinsic::spv_cmpxchg:
2052 return selectAtomicCmpXchg(ResVReg, ResType, I);
2053 case Intrinsic::spv_unreachable:
2054 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
2055 break;
2056 case Intrinsic::spv_alloca:
2057 return selectFrameIndex(ResVReg, ResType, I);
2058 case Intrinsic::spv_alloca_array:
2059 return selectAllocaArray(ResVReg, ResType, I);
2060 case Intrinsic::spv_assume:
2061 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2062 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
2063 .addUse(I.getOperand(1).getReg());
2064 break;
2065 case Intrinsic::spv_expect:
2066 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2067 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
2068 .addDef(ResVReg)
2069 .addUse(GR.getSPIRVTypeID(ResType))
2070 .addUse(I.getOperand(2).getReg())
2071 .addUse(I.getOperand(3).getReg());
2072 break;
2073 case Intrinsic::spv_thread_id:
2074 return selectSpvThreadId(ResVReg, ResType, I);
2075 case Intrinsic::spv_all:
2076 return selectAll(ResVReg, ResType, I);
2077 case Intrinsic::spv_any:
2078 return selectAny(ResVReg, ResType, I);
2079 case Intrinsic::spv_lerp:
2080 return selectFmix(ResVReg, ResType, I);
2081 case Intrinsic::spv_frac:
2082 return selectFrac(ResVReg, ResType, I);
2083 case Intrinsic::spv_rsqrt:
2084 return selectRsqrt(ResVReg, ResType, I);
2085 case Intrinsic::spv_lifetime_start:
2086 case Intrinsic::spv_lifetime_end: {
2087 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
2088 : SPIRV::OpLifetimeStop;
2089 int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
2090 Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
2091 unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
2092 bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
2093 if (Size == -1 || IsNonvoidPtr)
2094 Size = 0;
2095 BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
2096 } break;
2097 default: {
2098 std::string DiagMsg;
2099 raw_string_ostream OS(DiagMsg);
2100 I.print(OS);
2101 DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
2102 report_fatal_error(DiagMsg.c_str(), false);
2103 }
2104 }
2105 return true;
2106}
2107
2108bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
2109 const SPIRVType *ResType,
2110 MachineInstr &I) const {
2111 // there was an allocation size parameter to the allocation instruction
2112 // that is not 1
2113 MachineBasicBlock &BB = *I.getParent();
2114 return BuildMI(BB, I, I.getDebugLoc(),
2115 TII.get(SPIRV::OpVariableLengthArrayINTEL))
2116 .addDef(ResVReg)
2117 .addUse(GR.getSPIRVTypeID(ResType))
2118 .addUse(I.getOperand(2).getReg())
2119 .constrainAllUses(TII, TRI, RBI);
2120}
2121
2122bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
2123 const SPIRVType *ResType,
2124 MachineInstr &I) const {
2125 // Change order of instructions if needed: all OpVariable instructions in a
2126 // function must be the first instructions in the first block
2127 MachineFunction *MF = I.getParent()->getParent();
2128 MachineBasicBlock *MBB = &MF->front();
2129 auto It = MBB->SkipPHIsAndLabels(MBB->begin()), E = MBB->end();
2130 bool IsHeader = false;
2131 unsigned Opcode;
2132 for (; It != E && It != I; ++It) {
2133 Opcode = It->getOpcode();
2134 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
2135 IsHeader = true;
2136 } else if (IsHeader &&
2137 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
2138 ++It;
2139 break;
2140 }
2141 }
2142 return BuildMI(*MBB, It, It->getDebugLoc(), TII.get(SPIRV::OpVariable))
2143 .addDef(ResVReg)
2144 .addUse(GR.getSPIRVTypeID(ResType))
2145 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
2146 .constrainAllUses(TII, TRI, RBI);
2147}
2148
2149bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
2150 // InstructionSelector walks backwards through the instructions. We can use
2151 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
2152 // first, so can generate an OpBranchConditional here. If there is no
2153 // G_BRCOND, we just use OpBranch for a regular unconditional branch.
2154 const MachineInstr *PrevI = I.getPrevNode();
2155 MachineBasicBlock &MBB = *I.getParent();
2156 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
2157 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2158 .addUse(PrevI->getOperand(0).getReg())
2159 .addMBB(PrevI->getOperand(1).getMBB())
2160 .addMBB(I.getOperand(0).getMBB())
2161 .constrainAllUses(TII, TRI, RBI);
2162 }
2163 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
2164 .addMBB(I.getOperand(0).getMBB())
2165 .constrainAllUses(TII, TRI, RBI);
2166}
2167
2168bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
2169 // InstructionSelector walks backwards through the instructions. For an
2170 // explicit conditional branch with no fallthrough, we use both a G_BR and a
2171 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
2172 // generate the OpBranchConditional in selectBranch above.
2173 //
2174 // If an OpBranchConditional has been generated, we simply return, as the work
2175 // is alread done. If there is no OpBranchConditional, LLVM must be relying on
2176 // implicit fallthrough to the next basic block, so we need to create an
2177 // OpBranchConditional with an explicit "false" argument pointing to the next
2178 // basic block that LLVM would fall through to.
2179 const MachineInstr *NextI = I.getNextNode();
2180 // Check if this has already been successfully selected.
2181 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
2182 return true;
2183 // Must be relying on implicit block fallthrough, so generate an
2184 // OpBranchConditional with the "next" basic block as the "false" target.
2185 MachineBasicBlock &MBB = *I.getParent();
2186 unsigned NextMBBNum = MBB.getNextNode()->getNumber();
2187 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
2188 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
2189 .addUse(I.getOperand(0).getReg())
2190 .addMBB(I.getOperand(1).getMBB())
2191 .addMBB(NextMBB)
2192 .constrainAllUses(TII, TRI, RBI);
2193}
2194
2195bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
2196 const SPIRVType *ResType,
2197 MachineInstr &I) const {
2198 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
2199 .addDef(ResVReg)
2200 .addUse(GR.getSPIRVTypeID(ResType));
2201 const unsigned NumOps = I.getNumOperands();
2202 for (unsigned i = 1; i < NumOps; i += 2) {
2203 MIB.addUse(I.getOperand(i + 0).getReg());
2204 MIB.addMBB(I.getOperand(i + 1).getMBB());
2205 }
2206 return MIB.constrainAllUses(TII, TRI, RBI);
2207}
2208
2209bool SPIRVInstructionSelector::selectGlobalValue(
2210 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
2211 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
2212 MachineIRBuilder MIRBuilder(I);
2213 const GlobalValue *GV = I.getOperand(1).getGlobal();
2214 Type *GVType = toTypedPointer(GR.getDeducedGlobalValueType(GV));
2215 SPIRVType *PointerBaseType;
2216 if (GVType->isArrayTy()) {
2217 SPIRVType *ArrayElementType =
2218 GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
2219 SPIRV::AccessQualifier::ReadWrite, false);
2220 PointerBaseType = GR.getOrCreateSPIRVArrayType(
2221 ArrayElementType, GVType->getArrayNumElements(), I, TII);
2222 } else {
2223 PointerBaseType = GR.getOrCreateSPIRVType(
2224 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
2225 }
2226 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2227 PointerBaseType, I, TII,
2229
2230 std::string GlobalIdent;
2231 if (!GV->hasName()) {
2232 unsigned &ID = UnnamedGlobalIDs[GV];
2233 if (ID == 0)
2234 ID = UnnamedGlobalIDs.size();
2235 GlobalIdent = "__unnamed_" + Twine(ID).str();
2236 } else {
2237 GlobalIdent = GV->getGlobalIdentifier();
2238 }
2239
2240 // Behaviour of functions as operands depends on availability of the
2241 // corresponding extension (SPV_INTEL_function_pointers):
2242 // - If there is an extension to operate with functions as operands:
2243 // We create a proper constant operand and evaluate a correct type for a
2244 // function pointer.
2245 // - Without the required extension:
2246 // We have functions as operands in tests with blocks of instruction e.g. in
2247 // transcoding/global_block.ll. These operands are not used and should be
2248 // substituted by zero constants. Their type is expected to be always
2249 // OpTypePointer Function %uchar.
2250 if (isa<Function>(GV)) {
2251 const Constant *ConstVal = GV;
2252 MachineBasicBlock &BB = *I.getParent();
2253 Register NewReg = GR.find(ConstVal, GR.CurMF);
2254 if (!NewReg.isValid()) {
2255 Register NewReg = ResVReg;
2256 GR.add(ConstVal, GR.CurMF, NewReg);
2257 const Function *GVFun =
2258 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2259 ? dyn_cast<Function>(GV)
2260 : nullptr;
2261 if (GVFun) {
2262 // References to a function via function pointers generate virtual
2263 // registers without a definition. We will resolve it later, during
2264 // module analysis stage.
2265 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2266 Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
2267 MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
2269 BuildMI(BB, I, I.getDebugLoc(),
2270 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2271 .addDef(NewReg)
2272 .addUse(GR.getSPIRVTypeID(ResType))
2273 .addUse(FuncVReg);
2274 // mapping the function pointer to the used Function
2275 GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
2276 return MB.constrainAllUses(TII, TRI, RBI);
2277 }
2278 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2279 .addDef(NewReg)
2280 .addUse(GR.getSPIRVTypeID(ResType))
2281 .constrainAllUses(TII, TRI, RBI);
2282 }
2283 assert(NewReg != ResVReg);
2284 return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
2285 .addDef(ResVReg)
2286 .addUse(NewReg)
2287 .constrainAllUses(TII, TRI, RBI);
2288 }
2289 auto GlobalVar = cast<GlobalVariable>(GV);
2290 assert(GlobalVar->getName() != "llvm.global.annotations");
2291
2292 bool HasInit = GlobalVar->hasInitializer() &&
2293 !isa<UndefValue>(GlobalVar->getInitializer());
2294 // Skip empty declaration for GVs with initilaizers till we get the decl with
2295 // passed initializer.
2296 if (HasInit && !Init)
2297 return true;
2298
2299 unsigned AddrSpace = GV->getAddressSpace();
2300 SPIRV::StorageClass::StorageClass Storage =
2301 addressSpaceToStorageClass(AddrSpace, STI);
2302 bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
2303 Storage != SPIRV::StorageClass::Function;
2304 SPIRV::LinkageType::LinkageType LnkType =
2306 ? SPIRV::LinkageType::Import
2308 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2309 ? SPIRV::LinkageType::LinkOnceODR
2310 : SPIRV::LinkageType::Export);
2311
2312 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2313 Storage, Init, GlobalVar->isConstant(),
2314 HasLnkTy, LnkType, MIRBuilder, true);
2315 return Reg.isValid();
2316}
2317
2318bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
2319 const SPIRVType *ResType,
2320 MachineInstr &I) const {
2321 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2322 return selectExtInst(ResVReg, ResType, I, CL::log10);
2323 }
2324
2325 // There is no log10 instruction in the GLSL Extended Instruction set, so it
2326 // is implemented as:
2327 // log10(x) = log2(x) * (1 / log2(10))
2328 // = log2(x) * 0.30103
2329
2330 MachineIRBuilder MIRBuilder(I);
2331 MachineBasicBlock &BB = *I.getParent();
2332
2333 // Build log2(x).
2334 Register VarReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2335 bool Result =
2336 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
2337 .addDef(VarReg)
2338 .addUse(GR.getSPIRVTypeID(ResType))
2339 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2340 .addImm(GL::Log2)
2341 .add(I.getOperand(1))
2342 .constrainAllUses(TII, TRI, RBI);
2343
2344 // Build 0.30103.
2345 assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
2346 ResType->getOpcode() == SPIRV::OpTypeFloat);
2347 // TODO: Add matrix implementation once supported by the HLSL frontend.
2348 const SPIRVType *SpirvScalarType =
2349 ResType->getOpcode() == SPIRV::OpTypeVector
2350 ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
2351 : ResType;
2352 Register ScaleReg =
2353 GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2354
2355 // Multiply log2(x) by 0.30103 to get log10(x) result.
2356 auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
2357 ? SPIRV::OpVectorTimesScalar
2358 : SPIRV::OpFMulS;
2359 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2360 .addDef(ResVReg)
2361 .addUse(GR.getSPIRVTypeID(ResType))
2362 .addUse(VarReg)
2363 .addUse(ScaleReg)
2364 .constrainAllUses(TII, TRI, RBI);
2365
2366 return Result;
2367}
2368
2369bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
2370 const SPIRVType *ResType,
2371 MachineInstr &I) const {
2372 // DX intrinsic: @llvm.dx.thread.id(i32)
2373 // ID Name Description
2374 // 93 ThreadId reads the thread ID
2375
2376 MachineIRBuilder MIRBuilder(I);
2377 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2378 const SPIRVType *Vec3Ty =
2379 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2380 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2381 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2382
2383 // Create new register for GlobalInvocationID builtin variable.
2384 Register NewRegister =
2385 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2386 MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 32));
2387 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2388
2389 // Build GlobalInvocationID global variable with the necessary decorations.
2390 Register Variable = GR.buildGlobalVariable(
2391 NewRegister, PtrType,
2392 getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
2393 SPIRV::StorageClass::Input, nullptr, true, true,
2394 SPIRV::LinkageType::Import, MIRBuilder, false);
2395
2396 // Create new register for loading value.
2397 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2398 Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2399 MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 32));
2400 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2401
2402 // Load v3uint value from the global variable.
2403 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
2404 .addDef(LoadedRegister)
2405 .addUse(GR.getSPIRVTypeID(Vec3Ty))
2406 .addUse(Variable);
2407
2408 // Get Thread ID index. Expecting operand is a constant immediate value,
2409 // wrapped in a type assignment.
2410 assert(I.getOperand(2).isReg());
2411 Register ThreadIdReg = I.getOperand(2).getReg();
2412 SPIRVType *ConstTy = this->MRI->getVRegDef(ThreadIdReg);
2413 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
2414 ConstTy->getOperand(1).isReg());
2415 Register ConstReg = ConstTy->getOperand(1).getReg();
2416 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
2417 assert(Const && Const->getOpcode() == TargetOpcode::G_CONSTANT);
2418 const llvm::APInt &Val = Const->getOperand(1).getCImm()->getValue();
2419 const uint32_t ThreadId = Val.getZExtValue();
2420
2421 // Extract the thread ID from the loaded vector value.
2422 MachineBasicBlock &BB = *I.getParent();
2423 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2424 .addDef(ResVReg)
2425 .addUse(GR.getSPIRVTypeID(ResType))
2426 .addUse(LoadedRegister)
2427 .addImm(ThreadId);
2428 return MIB.constrainAllUses(TII, TRI, RBI);
2429}
2430
2431namespace llvm {
2434 const SPIRVSubtarget &Subtarget,
2435 const RegisterBankInfo &RBI) {
2436 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
2437}
2438} // namespace llvm
unsigned const MachineRegisterInfo * MRI
#define Success
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
uint64_t Size
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord, const SyncScopeIDs &SSIDs)
static unsigned getPtrCmpOpcode(unsigned Pred)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
raw_pwrite_stream & OS
APInt bitcastToAPInt() const
Definition: APFloat.h:1266
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:994
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:214
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1500
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:760
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:763
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:772
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:761
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:762
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:781
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:784
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:771
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:765
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:768
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:769
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:764
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:766
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:785
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:773
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:770
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:767
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
const APFloat & getValueAPF() const
Definition: Constants.h:312
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
const Function & getFunction() const
Definition: Function.h:170
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:290
LinkageTypes getLinkage() const
Definition: GlobalValue.h:546
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
Definition: Globals.cpp:153
bool hasAvailableExternallyLinkage() const
Definition: GlobalValue.h:512
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
Definition: GlobalValue.h:55
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:368
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:442
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:503
void push_back(const T &Elt)
Definition: SmallVector.h:427
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1210
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:248
Type * getArrayElementType() const
Definition: Type.h:399
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
Definition: Type.h:56
@ FloatTyID
32-bit floating point type
Definition: Type.h:58
@ DoubleTyID
64-bit floating point type
Definition: Type.h:59
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:136
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:343
bool hasName() const
Definition: Value.h:261
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition: LLVMContext.h:54
@ System
Synchronized with respect to all concurrently executing threads.
Definition: LLVMContext.h:57
Reg
All possible values of the reg field in the ModR/M byte.
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:80
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:273
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:218
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:117
Type * toTypedPointer(Type *Ty)
Definition: SPIRVUtils.h:208
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
Definition: SPIRVUtils.cpp:190
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:279
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
Definition: SPIRVUtils.cpp:236
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
#define N