LLVM 20.0.0git
SPIRVInstructionSelector.cpp
Go to the documentation of this file.
1//===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the targeting of the InstructionSelector class for
10// SPIRV.
11// TODO: This should be generated by TableGen.
12//
13//===----------------------------------------------------------------------===//
14
17#include "SPIRV.h"
18#include "SPIRVGlobalRegistry.h"
19#include "SPIRVInstrInfo.h"
21#include "SPIRVRegisterInfo.h"
22#include "SPIRVTargetMachine.h"
23#include "SPIRVUtils.h"
24#include "llvm/ADT/APFloat.h"
34#include "llvm/IR/IntrinsicsSPIRV.h"
35#include "llvm/Support/Debug.h"
37
38#define DEBUG_TYPE "spirv-isel"
39
40using namespace llvm;
41namespace CL = SPIRV::OpenCLExtInst;
42namespace GL = SPIRV::GLSLExtInst;
43
45 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
46
47namespace {
48
49llvm::SPIRV::SelectionControl::SelectionControl
50getSelectionOperandForImm(int Imm) {
51 if (Imm == 2)
52 return SPIRV::SelectionControl::Flatten;
53 if (Imm == 1)
54 return SPIRV::SelectionControl::DontFlatten;
55 if (Imm == 0)
56 return SPIRV::SelectionControl::None;
57 llvm_unreachable("Invalid immediate");
58}
59
60#define GET_GLOBALISEL_PREDICATE_BITSET
61#include "SPIRVGenGlobalISel.inc"
62#undef GET_GLOBALISEL_PREDICATE_BITSET
63
64class SPIRVInstructionSelector : public InstructionSelector {
65 const SPIRVSubtarget &STI;
66 const SPIRVInstrInfo &TII;
68 const RegisterBankInfo &RBI;
71 MachineFunction *HasVRegsReset = nullptr;
72
73 /// We need to keep track of the number we give to anonymous global values to
74 /// generate the same name every time when this is needed.
75 mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;
77
78public:
79 SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
80 const SPIRVSubtarget &ST,
81 const RegisterBankInfo &RBI);
82 void setupMF(MachineFunction &MF, GISelKnownBits *KB,
83 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
84 BlockFrequencyInfo *BFI) override;
85 // Common selection code. Instruction-specific selection occurs in spvSelect.
86 bool select(MachineInstr &I) override;
87 static const char *getName() { return DEBUG_TYPE; }
88
89#define GET_GLOBALISEL_PREDICATES_DECL
90#include "SPIRVGenGlobalISel.inc"
91#undef GET_GLOBALISEL_PREDICATES_DECL
92
93#define GET_GLOBALISEL_TEMPORARIES_DECL
94#include "SPIRVGenGlobalISel.inc"
95#undef GET_GLOBALISEL_TEMPORARIES_DECL
96
97private:
98 void resetVRegsType(MachineFunction &MF);
99
100 // tblgen-erated 'select' implementation, used as the initial selector for
101 // the patterns that don't require complex C++.
102 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
103
104 // All instruction-specific selection that didn't happen in "select()".
105 // Is basically a large Switch/Case delegating to all other select method.
106 bool spvSelect(Register ResVReg, const SPIRVType *ResType,
107 MachineInstr &I) const;
108
109 bool selectFirstBitHigh(Register ResVReg, const SPIRVType *ResType,
110 MachineInstr &I, bool IsSigned) const;
111
112 bool selectFirstBitLow(Register ResVReg, const SPIRVType *ResType,
113 MachineInstr &I) const;
114
115 bool selectFirstBitSet16(Register ResVReg, const SPIRVType *ResType,
116 MachineInstr &I, unsigned ExtendOpcode,
117 unsigned BitSetOpcode) const;
118
119 bool selectFirstBitSet32(Register ResVReg, const SPIRVType *ResType,
120 MachineInstr &I, Register SrcReg,
121 unsigned BitSetOpcode) const;
122
123 bool selectFirstBitSet64(Register ResVReg, const SPIRVType *ResType,
124 MachineInstr &I, Register SrcReg,
125 unsigned BitSetOpcode, bool SwapPrimarySide) const;
126
127 bool selectFirstBitSet64Overflow(Register ResVReg, const SPIRVType *ResType,
128 MachineInstr &I, Register SrcReg,
129 unsigned BitSetOpcode,
130 bool SwapPrimarySide) const;
131
132 bool selectGlobalValue(Register ResVReg, MachineInstr &I,
133 const MachineInstr *Init = nullptr) const;
134
135 bool selectOpWithSrcs(Register ResVReg, const SPIRVType *ResType,
136 MachineInstr &I, std::vector<Register> SrcRegs,
137 unsigned Opcode) const;
138
139 bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
140 unsigned Opcode) const;
141
142 bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
143 MachineInstr &I) const;
144
145 bool selectLoad(Register ResVReg, const SPIRVType *ResType,
146 MachineInstr &I) const;
147 bool selectStore(MachineInstr &I) const;
148
149 bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
150 MachineInstr &I) const;
151 bool selectStackRestore(MachineInstr &I) const;
152
153 bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
154
155 bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
156 MachineInstr &I, unsigned NewOpcode,
157 unsigned NegateOpcode = 0) const;
158
159 bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
160 MachineInstr &I) const;
161
162 bool selectFence(MachineInstr &I) const;
163
164 bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
165 MachineInstr &I) const;
166
167 bool selectAnyOrAll(Register ResVReg, const SPIRVType *ResType,
168 MachineInstr &I, unsigned OpType) const;
169
170 bool selectAll(Register ResVReg, const SPIRVType *ResType,
171 MachineInstr &I) const;
172
173 bool selectAny(Register ResVReg, const SPIRVType *ResType,
174 MachineInstr &I) const;
175
176 bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
177 MachineInstr &I) const;
178
179 bool selectBuildVector(Register ResVReg, const SPIRVType *ResType,
180 MachineInstr &I) const;
181 bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
182 MachineInstr &I) const;
183
184 bool selectCmp(Register ResVReg, const SPIRVType *ResType,
185 unsigned comparisonOpcode, MachineInstr &I) const;
186 bool selectCross(Register ResVReg, const SPIRVType *ResType,
187 MachineInstr &I) const;
188 bool selectDiscard(Register ResVReg, const SPIRVType *ResType,
189 MachineInstr &I) const;
190
191 bool selectICmp(Register ResVReg, const SPIRVType *ResType,
192 MachineInstr &I) const;
193 bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
194 MachineInstr &I) const;
195
196 bool selectSign(Register ResVReg, const SPIRVType *ResType,
197 MachineInstr &I) const;
198
199 bool selectFloatDot(Register ResVReg, const SPIRVType *ResType,
200 MachineInstr &I) const;
201
202 bool selectOverflowArith(Register ResVReg, const SPIRVType *ResType,
203 MachineInstr &I, unsigned Opcode) const;
204
205 bool selectIntegerDot(Register ResVReg, const SPIRVType *ResType,
206 MachineInstr &I, bool Signed) const;
207
208 bool selectIntegerDotExpansion(Register ResVReg, const SPIRVType *ResType,
209 MachineInstr &I) const;
210
211 template <bool Signed>
212 bool selectDot4AddPacked(Register ResVReg, const SPIRVType *ResType,
213 MachineInstr &I) const;
214 template <bool Signed>
215 bool selectDot4AddPackedExpansion(Register ResVReg, const SPIRVType *ResType,
216 MachineInstr &I) const;
217
218 bool selectWaveReduceSum(Register ResVReg, const SPIRVType *ResType,
219 MachineInstr &I) const;
220
221 void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
222 int OpIdx) const;
223 void renderFImm64(MachineInstrBuilder &MIB, const MachineInstr &I,
224 int OpIdx) const;
225
226 bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
227 MachineInstr &I) const;
228
229 bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
230 bool IsSigned) const;
231 bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
232 bool IsSigned, unsigned Opcode) const;
233 bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
234 bool IsSigned) const;
235
236 bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
237 MachineInstr &I) const;
238
239 bool selectSUCmp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
240 bool IsSigned) const;
241
242 bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
243 const SPIRVType *intTy, const SPIRVType *boolTy) const;
244
245 bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
246 MachineInstr &I) const;
247 bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
248 MachineInstr &I) const;
249 bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
250 MachineInstr &I) const;
251 bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
252 MachineInstr &I) const;
253 bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
254 MachineInstr &I) const;
255 bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
256 MachineInstr &I) const;
257 bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
258 MachineInstr &I) const;
259 bool selectGEP(Register ResVReg, const SPIRVType *ResType,
260 MachineInstr &I) const;
261
262 bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
263 MachineInstr &I) const;
264 bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
265 MachineInstr &I) const;
266
267 bool selectBranch(MachineInstr &I) const;
268 bool selectBranchCond(MachineInstr &I) const;
269
270 bool selectPhi(Register ResVReg, const SPIRVType *ResType,
271 MachineInstr &I) const;
272
273 [[maybe_unused]] bool selectExtInst(Register ResVReg,
274 const SPIRVType *RestType,
276 GL::GLSLExtInst GLInst) const;
277 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
278 MachineInstr &I, CL::OpenCLExtInst CLInst) const;
279 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
280 MachineInstr &I, CL::OpenCLExtInst CLInst,
281 GL::GLSLExtInst GLInst) const;
282 bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
283 MachineInstr &I, const ExtInstList &ExtInsts) const;
284
285 bool selectLog10(Register ResVReg, const SPIRVType *ResType,
286 MachineInstr &I) const;
287
288 bool selectSaturate(Register ResVReg, const SPIRVType *ResType,
289 MachineInstr &I) const;
290
291 bool selectWaveOpInst(Register ResVReg, const SPIRVType *ResType,
292 MachineInstr &I, unsigned Opcode) const;
293
294 bool selectWaveActiveCountBits(Register ResVReg, const SPIRVType *ResType,
295 MachineInstr &I) const;
296
298
299 bool selectHandleFromBinding(Register &ResVReg, const SPIRVType *ResType,
300 MachineInstr &I) const;
301
302 bool selectReadImageIntrinsic(Register &ResVReg, const SPIRVType *ResType,
303 MachineInstr &I) const;
304
305 bool selectImageWriteIntrinsic(MachineInstr &I) const;
306
307 // Utilities
308 std::pair<Register, bool>
309 buildI32Constant(uint32_t Val, MachineInstr &I,
310 const SPIRVType *ResType = nullptr) const;
311
312 Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
313 Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
314 Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
315 MachineInstr &I) const;
316 Register buildOnesValF(const SPIRVType *ResType, MachineInstr &I) const;
317
318 bool wrapIntoSpecConstantOp(MachineInstr &I,
319 SmallVector<Register> &CompositeArgs) const;
320
321 Register getUcharPtrTypeReg(MachineInstr &I,
322 SPIRV::StorageClass::StorageClass SC) const;
323 MachineInstrBuilder buildSpecConstantOp(MachineInstr &I, Register Dest,
324 Register Src, Register DestType,
325 uint32_t Opcode) const;
326 MachineInstrBuilder buildConstGenericPtr(MachineInstr &I, Register SrcPtr,
327 SPIRVType *SrcPtrTy) const;
328 Register buildPointerToResource(const SPIRVType *ResType, uint32_t Set,
329 uint32_t Binding, uint32_t ArraySize,
330 Register IndexReg, bool IsNonUniform,
331 MachineIRBuilder MIRBuilder) const;
332 SPIRVType *widenTypeToVec4(const SPIRVType *Type, MachineInstr &I) const;
333 bool extractSubvector(Register &ResVReg, const SPIRVType *ResType,
334 Register &ReadReg, MachineInstr &InsertionPoint) const;
335 bool BuildCOPY(Register DestReg, Register SrcReg, MachineInstr &I) const;
336 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
337 Register ResVReg, const SPIRVType *ResType,
338 MachineInstr &I) const;
339};
340
341} // end anonymous namespace
342
343#define GET_GLOBALISEL_IMPL
344#include "SPIRVGenGlobalISel.inc"
345#undef GET_GLOBALISEL_IMPL
346
347SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
348 const SPIRVSubtarget &ST,
349 const RegisterBankInfo &RBI)
350 : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
351 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
353#include "SPIRVGenGlobalISel.inc"
356#include "SPIRVGenGlobalISel.inc"
358{
359}
360
361void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
362 CodeGenCoverage *CoverageInfo,
364 BlockFrequencyInfo *BFI) {
365 MRI = &MF.getRegInfo();
366 GR.setCurrentFunc(MF);
367 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
368}
369
370// Ensure that register classes correspond to pattern matching rules.
371void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
372 if (HasVRegsReset == &MF)
373 return;
374 HasVRegsReset = &MF;
375
377 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
379 LLT RegType = MRI.getType(Reg);
380 if (RegType.isScalar())
381 MRI.setType(Reg, LLT::scalar(64));
382 else if (RegType.isPointer())
383 MRI.setType(Reg, LLT::pointer(0, 64));
384 else if (RegType.isVector())
385 MRI.setType(Reg, LLT::fixed_vector(2, LLT::scalar(64)));
386 }
387 for (const auto &MBB : MF) {
388 for (const auto &MI : MBB) {
389 if (MI.getOpcode() != SPIRV::ASSIGN_TYPE)
390 continue;
391 Register DstReg = MI.getOperand(0).getReg();
392 LLT DstType = MRI.getType(DstReg);
393 Register SrcReg = MI.getOperand(1).getReg();
394 LLT SrcType = MRI.getType(SrcReg);
395 if (DstType != SrcType)
396 MRI.setType(DstReg, MRI.getType(SrcReg));
397
398 const TargetRegisterClass *DstRC = MRI.getRegClassOrNull(DstReg);
399 const TargetRegisterClass *SrcRC = MRI.getRegClassOrNull(SrcReg);
400 if (DstRC != SrcRC && SrcRC)
401 MRI.setRegClass(DstReg, SrcRC);
402 }
403 }
404}
405
406static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
407
408// Defined in SPIRVLegalizerInfo.cpp.
409extern bool isTypeFoldingSupported(unsigned Opcode);
410
412 for (const auto &MO : MI.all_defs()) {
413 Register Reg = MO.getReg();
414 if (Reg.isPhysical() || !MRI.use_nodbg_empty(Reg))
415 return false;
416 }
417 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE || MI.isFakeUse() ||
418 MI.isLifetimeMarker())
419 return false;
420 if (MI.isPHI())
421 return true;
422 if (MI.mayStore() || MI.isCall() ||
423 (MI.mayLoad() && MI.hasOrderedMemoryRef()) || MI.isPosition() ||
424 MI.isDebugInstr() || MI.isTerminator() || MI.isJumpTableDebugInfo())
425 return false;
426 return true;
427}
428
429bool SPIRVInstructionSelector::select(MachineInstr &I) {
430 resetVRegsType(*I.getParent()->getParent());
431
432 assert(I.getParent() && "Instruction should be in a basic block!");
433 assert(I.getParent()->getParent() && "Instruction should be in a function!");
434
435 Register Opcode = I.getOpcode();
436 // If it's not a GMIR instruction, we've selected it already.
437 if (!isPreISelGenericOpcode(Opcode)) {
438 if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
439 Register DstReg = I.getOperand(0).getReg();
440 Register SrcReg = I.getOperand(1).getReg();
441 auto *Def = MRI->getVRegDef(SrcReg);
442 if (isTypeFoldingSupported(Def->getOpcode())) {
443 bool Res = selectImpl(I, *CoverageInfo);
444 LLVM_DEBUG({
445 if (!Res && Def->getOpcode() != TargetOpcode::G_CONSTANT) {
446 dbgs() << "Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
447 I.print(dbgs());
448 }
449 });
450 assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
451 if (Res) {
452 if (!isTriviallyDead(*Def, *MRI) && isDead(*Def, *MRI))
453 DeadMIs.insert(Def);
454 return Res;
455 }
456 }
457 MRI->setRegClass(SrcReg, MRI->getRegClass(DstReg));
458 MRI->replaceRegWith(SrcReg, DstReg);
459 GR.invalidateMachineInstr(&I);
460 I.removeFromParent();
461 return true;
462 } else if (I.getNumDefs() == 1) {
463 // Make all vregs 64 bits (for SPIR-V IDs).
464 MRI->setType(I.getOperand(0).getReg(), LLT::scalar(64));
465 }
467 }
468
469 if (DeadMIs.contains(&I)) {
470 // if the instruction has been already made dead by folding it away
471 // erase it
472 LLVM_DEBUG(dbgs() << "Instruction is folded and dead.\n");
474 GR.invalidateMachineInstr(&I);
475 I.eraseFromParent();
476 return true;
477 }
478
479 if (I.getNumOperands() != I.getNumExplicitOperands()) {
480 LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
481 return false;
482 }
483
484 // Common code for getting return reg+type, and removing selected instr
485 // from parent occurs here. Instr-specific selection happens in spvSelect().
486 bool HasDefs = I.getNumDefs() > 0;
487 Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
488 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
489 assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
490 if (spvSelect(ResVReg, ResType, I)) {
491 if (HasDefs) // Make all vregs 64 bits (for SPIR-V IDs).
492 for (unsigned i = 0; i < I.getNumDefs(); ++i)
493 MRI->setType(I.getOperand(i).getReg(), LLT::scalar(64));
494 GR.invalidateMachineInstr(&I);
495 I.removeFromParent();
496 return true;
497 }
498 return false;
499}
500
501static bool mayApplyGenericSelection(unsigned Opcode) {
502 switch (Opcode) {
503 case TargetOpcode::G_CONSTANT:
504 return false;
505 case TargetOpcode::G_SADDO:
506 case TargetOpcode::G_SSUBO:
507 return true;
508 }
509 return isTypeFoldingSupported(Opcode);
510}
511
512bool SPIRVInstructionSelector::BuildCOPY(Register DestReg, Register SrcReg,
513 MachineInstr &I) const {
514 const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(DestReg);
515 const TargetRegisterClass *SrcRC = MRI->getRegClassOrNull(SrcReg);
516 if (DstRC != SrcRC && SrcRC)
517 MRI->setRegClass(DestReg, SrcRC);
518 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
519 TII.get(TargetOpcode::COPY))
520 .addDef(DestReg)
521 .addUse(SrcReg)
522 .constrainAllUses(TII, TRI, RBI);
523}
524
525bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
526 const SPIRVType *ResType,
527 MachineInstr &I) const {
528 const unsigned Opcode = I.getOpcode();
529 if (mayApplyGenericSelection(Opcode))
530 return selectImpl(I, *CoverageInfo);
531 switch (Opcode) {
532 case TargetOpcode::G_CONSTANT:
533 return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
534 I);
535 case TargetOpcode::G_GLOBAL_VALUE:
536 return selectGlobalValue(ResVReg, I);
537 case TargetOpcode::G_IMPLICIT_DEF:
538 return selectOpUndef(ResVReg, ResType, I);
539 case TargetOpcode::G_FREEZE:
540 return selectFreeze(ResVReg, ResType, I);
541
542 case TargetOpcode::G_INTRINSIC:
543 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
544 case TargetOpcode::G_INTRINSIC_CONVERGENT:
545 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
546 return selectIntrinsic(ResVReg, ResType, I);
547 case TargetOpcode::G_BITREVERSE:
548 return selectBitreverse(ResVReg, ResType, I);
549
550 case TargetOpcode::G_BUILD_VECTOR:
551 return selectBuildVector(ResVReg, ResType, I);
552 case TargetOpcode::G_SPLAT_VECTOR:
553 return selectSplatVector(ResVReg, ResType, I);
554
555 case TargetOpcode::G_SHUFFLE_VECTOR: {
556 MachineBasicBlock &BB = *I.getParent();
557 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
558 .addDef(ResVReg)
559 .addUse(GR.getSPIRVTypeID(ResType))
560 .addUse(I.getOperand(1).getReg())
561 .addUse(I.getOperand(2).getReg());
562 for (auto V : I.getOperand(3).getShuffleMask())
563 MIB.addImm(V);
564 return MIB.constrainAllUses(TII, TRI, RBI);
565 }
566 case TargetOpcode::G_MEMMOVE:
567 case TargetOpcode::G_MEMCPY:
568 case TargetOpcode::G_MEMSET:
569 return selectMemOperation(ResVReg, I);
570
571 case TargetOpcode::G_ICMP:
572 return selectICmp(ResVReg, ResType, I);
573 case TargetOpcode::G_FCMP:
574 return selectFCmp(ResVReg, ResType, I);
575
576 case TargetOpcode::G_FRAME_INDEX:
577 return selectFrameIndex(ResVReg, ResType, I);
578
579 case TargetOpcode::G_LOAD:
580 return selectLoad(ResVReg, ResType, I);
581 case TargetOpcode::G_STORE:
582 return selectStore(I);
583
584 case TargetOpcode::G_BR:
585 return selectBranch(I);
586 case TargetOpcode::G_BRCOND:
587 return selectBranchCond(I);
588
589 case TargetOpcode::G_PHI:
590 return selectPhi(ResVReg, ResType, I);
591
592 case TargetOpcode::G_FPTOSI:
593 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
594 case TargetOpcode::G_FPTOUI:
595 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
596
597 case TargetOpcode::G_SITOFP:
598 return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
599 case TargetOpcode::G_UITOFP:
600 return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
601
602 case TargetOpcode::G_CTPOP:
603 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
604 case TargetOpcode::G_SMIN:
605 return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
606 case TargetOpcode::G_UMIN:
607 return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);
608
609 case TargetOpcode::G_SMAX:
610 return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
611 case TargetOpcode::G_UMAX:
612 return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);
613
614 case TargetOpcode::G_SCMP:
615 return selectSUCmp(ResVReg, ResType, I, true);
616 case TargetOpcode::G_UCMP:
617 return selectSUCmp(ResVReg, ResType, I, false);
618
619 case TargetOpcode::G_STRICT_FMA:
620 case TargetOpcode::G_FMA:
621 return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);
622
623 case TargetOpcode::G_STRICT_FLDEXP:
624 return selectExtInst(ResVReg, ResType, I, CL::ldexp);
625
626 case TargetOpcode::G_FPOW:
627 return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
628 case TargetOpcode::G_FPOWI:
629 return selectExtInst(ResVReg, ResType, I, CL::pown);
630
631 case TargetOpcode::G_FEXP:
632 return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
633 case TargetOpcode::G_FEXP2:
634 return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);
635
636 case TargetOpcode::G_FLOG:
637 return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
638 case TargetOpcode::G_FLOG2:
639 return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
640 case TargetOpcode::G_FLOG10:
641 return selectLog10(ResVReg, ResType, I);
642
643 case TargetOpcode::G_FABS:
644 return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
645 case TargetOpcode::G_ABS:
646 return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);
647
648 case TargetOpcode::G_FMINNUM:
649 case TargetOpcode::G_FMINIMUM:
650 return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::NMin);
651 case TargetOpcode::G_FMAXNUM:
652 case TargetOpcode::G_FMAXIMUM:
653 return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::NMax);
654
655 case TargetOpcode::G_FCOPYSIGN:
656 return selectExtInst(ResVReg, ResType, I, CL::copysign);
657
658 case TargetOpcode::G_FCEIL:
659 return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
660 case TargetOpcode::G_FFLOOR:
661 return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);
662
663 case TargetOpcode::G_FCOS:
664 return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
665 case TargetOpcode::G_FSIN:
666 return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
667 case TargetOpcode::G_FTAN:
668 return selectExtInst(ResVReg, ResType, I, CL::tan, GL::Tan);
669 case TargetOpcode::G_FACOS:
670 return selectExtInst(ResVReg, ResType, I, CL::acos, GL::Acos);
671 case TargetOpcode::G_FASIN:
672 return selectExtInst(ResVReg, ResType, I, CL::asin, GL::Asin);
673 case TargetOpcode::G_FATAN:
674 return selectExtInst(ResVReg, ResType, I, CL::atan, GL::Atan);
675 case TargetOpcode::G_FATAN2:
676 return selectExtInst(ResVReg, ResType, I, CL::atan2, GL::Atan2);
677 case TargetOpcode::G_FCOSH:
678 return selectExtInst(ResVReg, ResType, I, CL::cosh, GL::Cosh);
679 case TargetOpcode::G_FSINH:
680 return selectExtInst(ResVReg, ResType, I, CL::sinh, GL::Sinh);
681 case TargetOpcode::G_FTANH:
682 return selectExtInst(ResVReg, ResType, I, CL::tanh, GL::Tanh);
683
684 case TargetOpcode::G_STRICT_FSQRT:
685 case TargetOpcode::G_FSQRT:
686 return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);
687
688 case TargetOpcode::G_CTTZ:
689 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
690 return selectExtInst(ResVReg, ResType, I, CL::ctz);
691 case TargetOpcode::G_CTLZ:
692 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
693 return selectExtInst(ResVReg, ResType, I, CL::clz);
694
695 case TargetOpcode::G_INTRINSIC_ROUND:
696 return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
697 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
698 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
699 case TargetOpcode::G_INTRINSIC_TRUNC:
700 return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
701 case TargetOpcode::G_FRINT:
702 case TargetOpcode::G_FNEARBYINT:
703 return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
704
705 case TargetOpcode::G_SMULH:
706 return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
707 case TargetOpcode::G_UMULH:
708 return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);
709
710 case TargetOpcode::G_SADDSAT:
711 return selectExtInst(ResVReg, ResType, I, CL::s_add_sat);
712 case TargetOpcode::G_UADDSAT:
713 return selectExtInst(ResVReg, ResType, I, CL::u_add_sat);
714 case TargetOpcode::G_SSUBSAT:
715 return selectExtInst(ResVReg, ResType, I, CL::s_sub_sat);
716 case TargetOpcode::G_USUBSAT:
717 return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat);
718
719 case TargetOpcode::G_UADDO:
720 return selectOverflowArith(ResVReg, ResType, I,
721 ResType->getOpcode() == SPIRV::OpTypeVector
722 ? SPIRV::OpIAddCarryV
723 : SPIRV::OpIAddCarryS);
724 case TargetOpcode::G_USUBO:
725 return selectOverflowArith(ResVReg, ResType, I,
726 ResType->getOpcode() == SPIRV::OpTypeVector
727 ? SPIRV::OpISubBorrowV
728 : SPIRV::OpISubBorrowS);
729 case TargetOpcode::G_UMULO:
730 return selectOverflowArith(ResVReg, ResType, I, SPIRV::OpUMulExtended);
731 case TargetOpcode::G_SMULO:
732 return selectOverflowArith(ResVReg, ResType, I, SPIRV::OpSMulExtended);
733
734 case TargetOpcode::G_SEXT:
735 return selectExt(ResVReg, ResType, I, true);
736 case TargetOpcode::G_ANYEXT:
737 case TargetOpcode::G_ZEXT:
738 return selectExt(ResVReg, ResType, I, false);
739 case TargetOpcode::G_TRUNC:
740 return selectTrunc(ResVReg, ResType, I);
741 case TargetOpcode::G_FPTRUNC:
742 case TargetOpcode::G_FPEXT:
743 return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
744
745 case TargetOpcode::G_PTRTOINT:
746 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
747 case TargetOpcode::G_INTTOPTR:
748 return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
749 case TargetOpcode::G_BITCAST:
750 return selectBitcast(ResVReg, ResType, I);
751 case TargetOpcode::G_ADDRSPACE_CAST:
752 return selectAddrSpaceCast(ResVReg, ResType, I);
753 case TargetOpcode::G_PTR_ADD: {
754 // Currently, we get G_PTR_ADD only applied to global variables.
755 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
756 Register GV = I.getOperand(1).getReg();
757 MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
758 (void)II;
759 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
760 (*II).getOpcode() == TargetOpcode::COPY ||
761 (*II).getOpcode() == SPIRV::OpVariable) &&
762 isImm(I.getOperand(2), MRI));
763 // It may be the initialization of a global variable.
764 bool IsGVInit = false;
766 UseIt = MRI->use_instr_begin(I.getOperand(0).getReg()),
767 UseEnd = MRI->use_instr_end();
768 UseIt != UseEnd; UseIt = std::next(UseIt)) {
769 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
770 (*UseIt).getOpcode() == SPIRV::OpVariable) {
771 IsGVInit = true;
772 break;
773 }
774 }
775 MachineBasicBlock &BB = *I.getParent();
776 if (!IsGVInit) {
777 SPIRVType *GVType = GR.getSPIRVTypeForVReg(GV);
778 SPIRVType *GVPointeeType = GR.getPointeeType(GVType);
779 SPIRVType *ResPointeeType = GR.getPointeeType(ResType);
780 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
781 // Build a new virtual register that is associated with the required
782 // data type.
783 Register NewVReg = MRI->createGenericVirtualRegister(MRI->getType(GV));
784 MRI->setRegClass(NewVReg, MRI->getRegClass(GV));
785 // Having a correctly typed base we are ready to build the actually
786 // required GEP. It may not be a constant though, because all Operands
787 // of OpSpecConstantOp is to originate from other const instructions,
788 // and only the AccessChain named opcodes accept a global OpVariable
789 // instruction. We can't use an AccessChain opcode because of the type
790 // mismatch between result and base types.
791 if (!GR.isBitcastCompatible(ResType, GVType))
793 "incompatible result and operand types in a bitcast");
794 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
796 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitcast))
797 .addDef(NewVReg)
798 .addUse(ResTypeReg)
799 .addUse(GV);
800 return MIB.constrainAllUses(TII, TRI, RBI) &&
801 BuildMI(BB, I, I.getDebugLoc(),
802 TII.get(STI.isVulkanEnv()
803 ? SPIRV::OpInBoundsAccessChain
804 : SPIRV::OpInBoundsPtrAccessChain))
805 .addDef(ResVReg)
806 .addUse(ResTypeReg)
807 .addUse(NewVReg)
808 .addUse(I.getOperand(2).getReg())
809 .constrainAllUses(TII, TRI, RBI);
810 } else {
811 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
812 .addDef(ResVReg)
813 .addUse(GR.getSPIRVTypeID(ResType))
814 .addImm(
815 static_cast<uint32_t>(SPIRV::Opcode::InBoundsPtrAccessChain))
816 .addUse(GV)
817 .addUse(I.getOperand(2).getReg())
818 .constrainAllUses(TII, TRI, RBI);
819 }
820 }
821 // It's possible to translate G_PTR_ADD to OpSpecConstantOp: either to
822 // initialize a global variable with a constant expression (e.g., the test
823 // case opencl/basic/progvar_prog_scope_init.ll), or for another use case
824 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
825 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
826 .addDef(ResVReg)
827 .addUse(GR.getSPIRVTypeID(ResType))
828 .addImm(static_cast<uint32_t>(
829 SPIRV::Opcode::InBoundsPtrAccessChain))
830 .addUse(GV)
831 .addUse(Idx)
832 .addUse(I.getOperand(2).getReg());
833 return MIB.constrainAllUses(TII, TRI, RBI);
834 }
835
836 case TargetOpcode::G_ATOMICRMW_OR:
837 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
838 case TargetOpcode::G_ATOMICRMW_ADD:
839 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
840 case TargetOpcode::G_ATOMICRMW_AND:
841 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
842 case TargetOpcode::G_ATOMICRMW_MAX:
843 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
844 case TargetOpcode::G_ATOMICRMW_MIN:
845 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
846 case TargetOpcode::G_ATOMICRMW_SUB:
847 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
848 case TargetOpcode::G_ATOMICRMW_XOR:
849 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
850 case TargetOpcode::G_ATOMICRMW_UMAX:
851 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
852 case TargetOpcode::G_ATOMICRMW_UMIN:
853 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
854 case TargetOpcode::G_ATOMICRMW_XCHG:
855 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
856 case TargetOpcode::G_ATOMIC_CMPXCHG:
857 return selectAtomicCmpXchg(ResVReg, ResType, I);
858
859 case TargetOpcode::G_ATOMICRMW_FADD:
860 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
861 case TargetOpcode::G_ATOMICRMW_FSUB:
862 // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
863 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
864 SPIRV::OpFNegate);
865 case TargetOpcode::G_ATOMICRMW_FMIN:
866 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
867 case TargetOpcode::G_ATOMICRMW_FMAX:
868 return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);
869
870 case TargetOpcode::G_FENCE:
871 return selectFence(I);
872
873 case TargetOpcode::G_STACKSAVE:
874 return selectStackSave(ResVReg, ResType, I);
875 case TargetOpcode::G_STACKRESTORE:
876 return selectStackRestore(I);
877
878 case TargetOpcode::G_UNMERGE_VALUES:
879 return selectUnmergeValues(I);
880
881 // Discard gen opcodes for intrinsics which we do not expect to actually
882 // represent code after lowering or intrinsics which are not implemented but
883 // should not crash when found in a customer's LLVM IR input.
884 case TargetOpcode::G_TRAP:
885 case TargetOpcode::G_DEBUGTRAP:
886 case TargetOpcode::G_UBSANTRAP:
887 case TargetOpcode::DBG_LABEL:
888 return true;
889
890 default:
891 return false;
892 }
893}
894
895bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
896 const SPIRVType *ResType,
898 GL::GLSLExtInst GLInst) const {
899 return selectExtInst(ResVReg, ResType, I,
900 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
901}
902
903bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
904 const SPIRVType *ResType,
906 CL::OpenCLExtInst CLInst) const {
907 return selectExtInst(ResVReg, ResType, I,
908 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
909}
910
911bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
912 const SPIRVType *ResType,
914 CL::OpenCLExtInst CLInst,
915 GL::GLSLExtInst GLInst) const {
916 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
917 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
918 return selectExtInst(ResVReg, ResType, I, ExtInsts);
919}
920
921bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
922 const SPIRVType *ResType,
924 const ExtInstList &Insts) const {
925
926 for (const auto &Ex : Insts) {
927 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
928 uint32_t Opcode = Ex.second;
929 if (STI.canUseExtInstSet(Set)) {
930 MachineBasicBlock &BB = *I.getParent();
931 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
932 .addDef(ResVReg)
933 .addUse(GR.getSPIRVTypeID(ResType))
934 .addImm(static_cast<uint32_t>(Set))
935 .addImm(Opcode);
936 const unsigned NumOps = I.getNumOperands();
937 unsigned Index = 1;
938 if (Index < NumOps &&
939 I.getOperand(Index).getType() ==
940 MachineOperand::MachineOperandType::MO_IntrinsicID)
941 Index = 2;
942 for (; Index < NumOps; ++Index)
943 MIB.add(I.getOperand(Index));
944 return MIB.constrainAllUses(TII, TRI, RBI);
945 }
946 }
947 return false;
948}
949
950bool SPIRVInstructionSelector::selectOpWithSrcs(Register ResVReg,
951 const SPIRVType *ResType,
953 std::vector<Register> Srcs,
954 unsigned Opcode) const {
955 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
956 .addDef(ResVReg)
957 .addUse(GR.getSPIRVTypeID(ResType));
958 for (Register SReg : Srcs) {
959 MIB.addUse(SReg);
960 }
961 return MIB.constrainAllUses(TII, TRI, RBI);
962}
963
964bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
965 const SPIRVType *ResType,
967 unsigned Opcode) const {
968 if (STI.isOpenCLEnv() && I.getOperand(1).isReg()) {
969 Register SrcReg = I.getOperand(1).getReg();
970 bool IsGV = false;
972 MRI->def_instr_begin(SrcReg);
973 DefIt != MRI->def_instr_end(); DefIt = std::next(DefIt)) {
974 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
975 IsGV = true;
976 break;
977 }
978 }
979 if (IsGV) {
980 uint32_t SpecOpcode = 0;
981 switch (Opcode) {
982 case SPIRV::OpConvertPtrToU:
983 SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
984 break;
985 case SPIRV::OpConvertUToPtr:
986 SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
987 break;
988 }
989 if (SpecOpcode)
990 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
991 TII.get(SPIRV::OpSpecConstantOp))
992 .addDef(ResVReg)
993 .addUse(GR.getSPIRVTypeID(ResType))
994 .addImm(SpecOpcode)
995 .addUse(SrcReg)
996 .constrainAllUses(TII, TRI, RBI);
997 }
998 }
999 return selectOpWithSrcs(ResVReg, ResType, I, {I.getOperand(1).getReg()},
1000 Opcode);
1001}
1002
1003bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
1004 const SPIRVType *ResType,
1005 MachineInstr &I) const {
1006 Register OpReg = I.getOperand(1).getReg();
1007 SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
1008 if (!GR.isBitcastCompatible(ResType, OpType))
1009 report_fatal_error("incompatible result and operand types in a bitcast");
1010 return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
1011}
1012
1014 MachineInstrBuilder &MIB) {
1015 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
1016 if (MemOp->isVolatile())
1017 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1018 if (MemOp->isNonTemporal())
1019 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1020 if (MemOp->getAlign().value())
1021 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1022
1023 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1024 MIB.addImm(SpvMemOp);
1025 if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1026 MIB.addImm(MemOp->getAlign().value());
1027 }
1028}
1029
1031 uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
1032 if (Flags & MachineMemOperand::Flags::MOVolatile)
1033 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1034 if (Flags & MachineMemOperand::Flags::MONonTemporal)
1035 SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1036
1037 if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1038 MIB.addImm(SpvMemOp);
1039}
1040
1041bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
1042 const SPIRVType *ResType,
1043 MachineInstr &I) const {
1044 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
1045 Register Ptr = I.getOperand(1 + OpOffset).getReg();
1046 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
1047 .addDef(ResVReg)
1048 .addUse(GR.getSPIRVTypeID(ResType))
1049 .addUse(Ptr);
1050 if (!I.getNumMemOperands()) {
1051 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1052 I.getOpcode() ==
1053 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1054 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
1055 } else {
1056 addMemoryOperands(*I.memoperands_begin(), MIB);
1057 }
1058 return MIB.constrainAllUses(TII, TRI, RBI);
1059}
1060
1061bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
1062 unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
1063 Register StoreVal = I.getOperand(0 + OpOffset).getReg();
1064 Register Ptr = I.getOperand(1 + OpOffset).getReg();
1065 MachineBasicBlock &BB = *I.getParent();
1066 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
1067 .addUse(Ptr)
1068 .addUse(StoreVal);
1069 if (!I.getNumMemOperands()) {
1070 assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1071 I.getOpcode() ==
1072 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1073 addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
1074 } else {
1075 addMemoryOperands(*I.memoperands_begin(), MIB);
1076 }
1077 return MIB.constrainAllUses(TII, TRI, RBI);
1078}
1079
1080bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
1081 const SPIRVType *ResType,
1082 MachineInstr &I) const {
1083 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1085 "llvm.stacksave intrinsic: this instruction requires the following "
1086 "SPIR-V extension: SPV_INTEL_variable_length_array",
1087 false);
1088 MachineBasicBlock &BB = *I.getParent();
1089 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
1090 .addDef(ResVReg)
1091 .addUse(GR.getSPIRVTypeID(ResType))
1092 .constrainAllUses(TII, TRI, RBI);
1093}
1094
1095bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
1096 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1098 "llvm.stackrestore intrinsic: this instruction requires the following "
1099 "SPIR-V extension: SPV_INTEL_variable_length_array",
1100 false);
1101 if (!I.getOperand(0).isReg())
1102 return false;
1103 MachineBasicBlock &BB = *I.getParent();
1104 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
1105 .addUse(I.getOperand(0).getReg())
1106 .constrainAllUses(TII, TRI, RBI);
1107}
1108
1109bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
1110 MachineInstr &I) const {
1111 MachineBasicBlock &BB = *I.getParent();
1112 Register SrcReg = I.getOperand(1).getReg();
1113 bool Result = true;
1114 if (I.getOpcode() == TargetOpcode::G_MEMSET) {
1115 assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
1116 unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
1117 unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
1118 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1119 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
1120 Register Const = GR.getOrCreateConstIntArray(Val, Num, I, ArrTy, TII);
1121 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
1122 ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
1123 // TODO: check if we have such GV, add init, use buildGlobalVariable.
1124 Function &CurFunction = GR.CurMF->getFunction();
1125 Type *LLVMArrTy =
1126 ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
1127 // Module takes ownership of the global var.
1128 GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
1130 Constant::getNullValue(LLVMArrTy));
1131 Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1132 GR.add(GV, GR.CurMF, VarReg);
1133 GR.addGlobalObject(GV, GR.CurMF, VarReg);
1134
1135 Result &=
1136 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
1137 .addDef(VarReg)
1138 .addUse(GR.getSPIRVTypeID(VarTy))
1139 .addImm(SPIRV::StorageClass::UniformConstant)
1140 .addUse(Const)
1141 .constrainAllUses(TII, TRI, RBI);
1142 buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
1143 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
1144 ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
1145 SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1146 selectOpWithSrcs(SrcReg, SourceTy, I, {VarReg}, SPIRV::OpBitcast);
1147 }
1148 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
1149 .addUse(I.getOperand(0).getReg())
1150 .addUse(SrcReg)
1151 .addUse(I.getOperand(2).getReg());
1152 if (I.getNumMemOperands())
1153 addMemoryOperands(*I.memoperands_begin(), MIB);
1154 Result &= MIB.constrainAllUses(TII, TRI, RBI);
1155 if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
1156 Result &= BuildCOPY(ResVReg, MIB->getOperand(0).getReg(), I);
1157 return Result;
1158}
1159
1160bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
1161 const SPIRVType *ResType,
1162 MachineInstr &I,
1163 unsigned NewOpcode,
1164 unsigned NegateOpcode) const {
1165 bool Result = true;
1166 assert(I.hasOneMemOperand());
1167 const MachineMemOperand *MemOp = *I.memoperands_begin();
1168 uint32_t Scope = static_cast<uint32_t>(getMemScope(
1169 GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
1170 auto ScopeConstant = buildI32Constant(Scope, I);
1171 Register ScopeReg = ScopeConstant.first;
1172 Result &= ScopeConstant.second;
1173
1174 Register Ptr = I.getOperand(1).getReg();
1175 // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
1176 // auto ScSem =
1177 // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
1178 AtomicOrdering AO = MemOp->getSuccessOrdering();
1179 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
1180 auto MemSemConstant = buildI32Constant(MemSem /*| ScSem*/, I);
1181 Register MemSemReg = MemSemConstant.first;
1182 Result &= MemSemConstant.second;
1183
1184 Register ValueReg = I.getOperand(2).getReg();
1185 if (NegateOpcode != 0) {
1186 // Translation with negative value operand is requested
1187 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1188 Result &= selectOpWithSrcs(TmpReg, ResType, I, {ValueReg}, NegateOpcode);
1189 ValueReg = TmpReg;
1190 }
1191
1192 return Result &&
1193 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
1194 .addDef(ResVReg)
1195 .addUse(GR.getSPIRVTypeID(ResType))
1196 .addUse(Ptr)
1197 .addUse(ScopeReg)
1198 .addUse(MemSemReg)
1199 .addUse(ValueReg)
1200 .constrainAllUses(TII, TRI, RBI);
1201}
1202
1203bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
1204 unsigned ArgI = I.getNumOperands() - 1;
1205 Register SrcReg =
1206 I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
1207 SPIRVType *DefType =
1208 SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
1209 if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
1211 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1212
1213 SPIRVType *ScalarType =
1214 GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
1215 MachineBasicBlock &BB = *I.getParent();
1216 bool Res = false;
1217 for (unsigned i = 0; i < I.getNumDefs(); ++i) {
1218 Register ResVReg = I.getOperand(i).getReg();
1219 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
1220 if (!ResType) {
1221 // There was no "assign type" actions, let's fix this now
1222 ResType = ScalarType;
1223 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
1224 MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
1225 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
1226 }
1227 auto MIB =
1228 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1229 .addDef(ResVReg)
1230 .addUse(GR.getSPIRVTypeID(ResType))
1231 .addUse(SrcReg)
1232 .addImm(static_cast<int64_t>(i));
1233 Res |= MIB.constrainAllUses(TII, TRI, RBI);
1234 }
1235 return Res;
1236}
1237
1238bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
1239 AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
1240 uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
1241 auto MemSemConstant = buildI32Constant(MemSem, I);
1242 Register MemSemReg = MemSemConstant.first;
1243 bool Result = MemSemConstant.second;
1244 SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
1245 uint32_t Scope = static_cast<uint32_t>(
1246 getMemScope(GR.CurMF->getFunction().getContext(), Ord));
1247 auto ScopeConstant = buildI32Constant(Scope, I);
1248 Register ScopeReg = ScopeConstant.first;
1249 Result &= ScopeConstant.second;
1250 MachineBasicBlock &BB = *I.getParent();
1251 return Result &&
1252 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
1253 .addUse(ScopeReg)
1254 .addUse(MemSemReg)
1255 .constrainAllUses(TII, TRI, RBI);
1256}
1257
1258bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
1259 const SPIRVType *ResType,
1260 MachineInstr &I,
1261 unsigned Opcode) const {
1262 Type *ResTy = nullptr;
1263 StringRef ResName;
1264 if (!GR.findValueAttrs(&I, ResTy, ResName))
1266 "Not enough info to select the arithmetic with overflow instruction");
1267 if (!ResTy || !ResTy->isStructTy())
1268 report_fatal_error("Expect struct type result for the arithmetic "
1269 "with overflow instruction");
1270 // "Result Type must be from OpTypeStruct. The struct must have two members,
1271 // and the two members must be the same type."
1272 Type *ResElemTy = cast<StructType>(ResTy)->getElementType(0);
1273 ResTy = StructType::get(ResElemTy, ResElemTy);
1274 // Build SPIR-V types and constant(s) if needed.
1275 MachineIRBuilder MIRBuilder(I);
1276 SPIRVType *StructType = GR.getOrCreateSPIRVType(
1277 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
1278 assert(I.getNumDefs() > 1 && "Not enought operands");
1279 SPIRVType *BoolType = GR.getOrCreateSPIRVBoolType(I, TII);
1280 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
1281 if (N > 1)
1282 BoolType = GR.getOrCreateSPIRVVectorType(BoolType, N, I, TII);
1283 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
1284 Register ZeroReg = buildZerosVal(ResType, I);
1285 // A new virtual register to store the result struct.
1286 Register StructVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1287 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1288 // Build the result name if needed.
1289 if (ResName.size() > 0)
1290 buildOpName(StructVReg, ResName, MIRBuilder);
1291 // Build the arithmetic with overflow instruction.
1292 MachineBasicBlock &BB = *I.getParent();
1293 auto MIB =
1294 BuildMI(BB, MIRBuilder.getInsertPt(), I.getDebugLoc(), TII.get(Opcode))
1295 .addDef(StructVReg)
1296 .addUse(GR.getSPIRVTypeID(StructType));
1297 for (unsigned i = I.getNumDefs(); i < I.getNumOperands(); ++i)
1298 MIB.addUse(I.getOperand(i).getReg());
1299 bool Result = MIB.constrainAllUses(TII, TRI, RBI);
1300 // Build instructions to extract fields of the instruction's result.
1301 // A new virtual register to store the higher part of the result struct.
1302 Register HigherVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
1303 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1304 for (unsigned i = 0; i < I.getNumDefs(); ++i) {
1305 auto MIB =
1306 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1307 .addDef(i == 1 ? HigherVReg : I.getOperand(i).getReg())
1308 .addUse(GR.getSPIRVTypeID(ResType))
1309 .addUse(StructVReg)
1310 .addImm(i);
1311 Result &= MIB.constrainAllUses(TII, TRI, RBI);
1312 }
1313 // Build boolean value from the higher part.
1314 return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
1315 .addDef(I.getOperand(1).getReg())
1316 .addUse(BoolTypeReg)
1317 .addUse(HigherVReg)
1318 .addUse(ZeroReg)
1319 .constrainAllUses(TII, TRI, RBI);
1320}
1321
1322bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
1323 const SPIRVType *ResType,
1324 MachineInstr &I) const {
1325 bool Result = true;
1326 Register ScopeReg;
1327 Register MemSemEqReg;
1328 Register MemSemNeqReg;
1329 Register Ptr = I.getOperand(2).getReg();
1330 if (!isa<GIntrinsic>(I)) {
1331 assert(I.hasOneMemOperand());
1332 const MachineMemOperand *MemOp = *I.memoperands_begin();
1333 unsigned Scope = static_cast<uint32_t>(getMemScope(
1334 GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
1335 auto ScopeConstant = buildI32Constant(Scope, I);
1336 ScopeReg = ScopeConstant.first;
1337 Result &= ScopeConstant.second;
1338
1339 unsigned ScSem = static_cast<uint32_t>(
1340 getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
1341 AtomicOrdering AO = MemOp->getSuccessOrdering();
1342 unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
1343 auto MemSemEqConstant = buildI32Constant(MemSemEq, I);
1344 MemSemEqReg = MemSemEqConstant.first;
1345 Result &= MemSemEqConstant.second;
1346 AtomicOrdering FO = MemOp->getFailureOrdering();
1347 unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
1348 if (MemSemEq == MemSemNeq)
1349 MemSemNeqReg = MemSemEqReg;
1350 else {
1351 auto MemSemNeqConstant = buildI32Constant(MemSemEq, I);
1352 MemSemNeqReg = MemSemNeqConstant.first;
1353 Result &= MemSemNeqConstant.second;
1354 }
1355 } else {
1356 ScopeReg = I.getOperand(5).getReg();
1357 MemSemEqReg = I.getOperand(6).getReg();
1358 MemSemNeqReg = I.getOperand(7).getReg();
1359 }
1360
1361 Register Cmp = I.getOperand(3).getReg();
1362 Register Val = I.getOperand(4).getReg();
1363 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1364 Register ACmpRes = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1365 const DebugLoc &DL = I.getDebugLoc();
1366 Result &=
1367 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
1368 .addDef(ACmpRes)
1369 .addUse(GR.getSPIRVTypeID(SpvValTy))
1370 .addUse(Ptr)
1371 .addUse(ScopeReg)
1372 .addUse(MemSemEqReg)
1373 .addUse(MemSemNeqReg)
1374 .addUse(Val)
1375 .addUse(Cmp)
1376 .constrainAllUses(TII, TRI, RBI);
1377 Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1378 SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
1379 Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
1380 .addDef(CmpSuccReg)
1381 .addUse(GR.getSPIRVTypeID(BoolTy))
1382 .addUse(ACmpRes)
1383 .addUse(Cmp)
1384 .constrainAllUses(TII, TRI, RBI);
1385 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1386 Result &= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1387 .addDef(TmpReg)
1388 .addUse(GR.getSPIRVTypeID(ResType))
1389 .addUse(ACmpRes)
1390 .addUse(GR.getOrCreateUndef(I, ResType, TII))
1391 .addImm(0)
1392 .constrainAllUses(TII, TRI, RBI);
1393 return Result &&
1394 BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
1395 .addDef(ResVReg)
1396 .addUse(GR.getSPIRVTypeID(ResType))
1397 .addUse(CmpSuccReg)
1398 .addUse(TmpReg)
1399 .addImm(1)
1400 .constrainAllUses(TII, TRI, RBI);
1401}
1402
1403static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
1404 switch (SC) {
1405 case SPIRV::StorageClass::Workgroup:
1406 case SPIRV::StorageClass::CrossWorkgroup:
1407 case SPIRV::StorageClass::Function:
1408 return true;
1409 default:
1410 return false;
1411 }
1412}
1413
1414static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
1415 switch (SC) {
1416 case SPIRV::StorageClass::DeviceOnlyINTEL:
1417 case SPIRV::StorageClass::HostOnlyINTEL:
1418 return true;
1419 default:
1420 return false;
1421 }
1422}
1423
1424// Returns true ResVReg is referred only from global vars and OpName's.
1426 bool IsGRef = false;
1427 bool IsAllowedRefs =
1428 std::all_of(MRI->use_instr_begin(ResVReg), MRI->use_instr_end(),
1429 [&IsGRef](auto const &It) {
1430 unsigned Opcode = It.getOpcode();
1431 if (Opcode == SPIRV::OpConstantComposite ||
1432 Opcode == SPIRV::OpVariable ||
1433 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1434 return IsGRef = true;
1435 return Opcode == SPIRV::OpName;
1436 });
1437 return IsAllowedRefs && IsGRef;
1438}
1439
1440Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1441 MachineInstr &I, SPIRV::StorageClass::StorageClass SC) const {
1442 return GR.getSPIRVTypeID(GR.getOrCreateSPIRVPointerType(
1443 GR.getOrCreateSPIRVIntegerType(8, I, TII), I, TII, SC));
1444}
1445
1447SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &I, Register Dest,
1448 Register Src, Register DestType,
1449 uint32_t Opcode) const {
1450 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
1451 TII.get(SPIRV::OpSpecConstantOp))
1452 .addDef(Dest)
1453 .addUse(DestType)
1454 .addImm(Opcode)
1455 .addUse(Src);
1456}
1457
1459SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &I, Register SrcPtr,
1460 SPIRVType *SrcPtrTy) const {
1461 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1462 GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
1463 Register Tmp = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1465 SPIRV::StorageClass::Generic),
1466 GR.getPointerSize()));
1467 MachineFunction *MF = I.getParent()->getParent();
1468 GR.assignSPIRVTypeToVReg(GenericPtrTy, Tmp, *MF);
1469 MachineInstrBuilder MIB = buildSpecConstantOp(
1470 I, Tmp, SrcPtr, GR.getSPIRVTypeID(GenericPtrTy),
1471 static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric));
1472 GR.add(MIB.getInstr(), MF, Tmp);
1473 return MIB;
1474}
1475
1476// In SPIR-V address space casting can only happen to and from the Generic
1477// storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
1478// pointers to and from Generic pointers. As such, we can convert e.g. from
1479// Workgroup to Function by going via a Generic pointer as an intermediary. All
1480// other combinations can only be done by a bitcast, and are probably not safe.
1481bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
1482 const SPIRVType *ResType,
1483 MachineInstr &I) const {
1484 MachineBasicBlock &BB = *I.getParent();
1485 const DebugLoc &DL = I.getDebugLoc();
1486
1487 Register SrcPtr = I.getOperand(1).getReg();
1488 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1489
1490 // don't generate a cast for a null that may be represented by OpTypeInt
1491 if (SrcPtrTy->getOpcode() != SPIRV::OpTypePointer ||
1492 ResType->getOpcode() != SPIRV::OpTypePointer)
1493 return BuildCOPY(ResVReg, SrcPtr, I);
1494
1495 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtrTy);
1496 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResType);
1497
1498 if (isASCastInGVar(MRI, ResVReg)) {
1499 // AddrSpaceCast uses within OpVariable and OpConstantComposite instructions
1500 // are expressed by OpSpecConstantOp with an Opcode.
1501 // TODO: maybe insert a check whether the Kernel capability was declared and
1502 // so PtrCastToGeneric/GenericCastToPtr are available.
1503 unsigned SpecOpcode =
1504 DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC)
1505 ? static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)
1506 : (SrcSC == SPIRV::StorageClass::Generic &&
1508 ? static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr)
1509 : 0);
1510 // TODO: OpConstantComposite expects i8*, so we are forced to forget a
1511 // correct value of ResType and use general i8* instead. Maybe this should
1512 // be addressed in the emit-intrinsic step to infer a correct
1513 // OpConstantComposite type.
1514 if (SpecOpcode) {
1515 return buildSpecConstantOp(I, ResVReg, SrcPtr,
1516 getUcharPtrTypeReg(I, DstSC), SpecOpcode)
1517 .constrainAllUses(TII, TRI, RBI);
1518 } else if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1519 MachineInstrBuilder MIB = buildConstGenericPtr(I, SrcPtr, SrcPtrTy);
1520 return MIB.constrainAllUses(TII, TRI, RBI) &&
1521 buildSpecConstantOp(
1522 I, ResVReg, MIB->getOperand(0).getReg(),
1523 getUcharPtrTypeReg(I, DstSC),
1524 static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
1525 .constrainAllUses(TII, TRI, RBI);
1526 }
1527 }
1528
1529 // don't generate a cast between identical storage classes
1530 if (SrcSC == DstSC)
1531 return BuildCOPY(ResVReg, SrcPtr, I);
1532
1533 if ((SrcSC == SPIRV::StorageClass::Function &&
1534 DstSC == SPIRV::StorageClass::Private) ||
1535 (DstSC == SPIRV::StorageClass::Function &&
1536 SrcSC == SPIRV::StorageClass::Private))
1537 return BuildCOPY(ResVReg, SrcPtr, I);
1538
1539 // Casting from an eligible pointer to Generic.
1540 if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
1541 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1542 // Casting from Generic to an eligible pointer.
1543 if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
1544 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1545 // Casting between 2 eligible pointers using Generic as an intermediary.
1546 if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
1547 Register Tmp = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1548 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1549 GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
1550 bool Result = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
1551 .addDef(Tmp)
1552 .addUse(GR.getSPIRVTypeID(GenericPtrTy))
1553 .addUse(SrcPtr)
1554 .constrainAllUses(TII, TRI, RBI);
1555 return Result && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
1556 .addDef(ResVReg)
1557 .addUse(GR.getSPIRVTypeID(ResType))
1558 .addUse(Tmp)
1559 .constrainAllUses(TII, TRI, RBI);
1560 }
1561
1562 // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
1563 // be applied
1564 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
1565 return selectUnOp(ResVReg, ResType, I,
1566 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1567 if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
1568 return selectUnOp(ResVReg, ResType, I,
1569 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1570 if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::Generic)
1571 return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
1572 if (SrcSC == SPIRV::StorageClass::Generic && isUSMStorageClass(DstSC))
1573 return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
1574
1575 // Bitcast for pointers requires that the address spaces must match
1576 return false;
1577}
1578
1579static unsigned getFCmpOpcode(unsigned PredNum) {
1580 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1581 switch (Pred) {
1582 case CmpInst::FCMP_OEQ:
1583 return SPIRV::OpFOrdEqual;
1584 case CmpInst::FCMP_OGE:
1585 return SPIRV::OpFOrdGreaterThanEqual;
1586 case CmpInst::FCMP_OGT:
1587 return SPIRV::OpFOrdGreaterThan;
1588 case CmpInst::FCMP_OLE:
1589 return SPIRV::OpFOrdLessThanEqual;
1590 case CmpInst::FCMP_OLT:
1591 return SPIRV::OpFOrdLessThan;
1592 case CmpInst::FCMP_ONE:
1593 return SPIRV::OpFOrdNotEqual;
1594 case CmpInst::FCMP_ORD:
1595 return SPIRV::OpOrdered;
1596 case CmpInst::FCMP_UEQ:
1597 return SPIRV::OpFUnordEqual;
1598 case CmpInst::FCMP_UGE:
1599 return SPIRV::OpFUnordGreaterThanEqual;
1600 case CmpInst::FCMP_UGT:
1601 return SPIRV::OpFUnordGreaterThan;
1602 case CmpInst::FCMP_ULE:
1603 return SPIRV::OpFUnordLessThanEqual;
1604 case CmpInst::FCMP_ULT:
1605 return SPIRV::OpFUnordLessThan;
1606 case CmpInst::FCMP_UNE:
1607 return SPIRV::OpFUnordNotEqual;
1608 case CmpInst::FCMP_UNO:
1609 return SPIRV::OpUnordered;
1610 default:
1611 llvm_unreachable("Unknown predicate type for FCmp");
1612 }
1613}
1614
1615static unsigned getICmpOpcode(unsigned PredNum) {
1616 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1617 switch (Pred) {
1618 case CmpInst::ICMP_EQ:
1619 return SPIRV::OpIEqual;
1620 case CmpInst::ICMP_NE:
1621 return SPIRV::OpINotEqual;
1622 case CmpInst::ICMP_SGE:
1623 return SPIRV::OpSGreaterThanEqual;
1624 case CmpInst::ICMP_SGT:
1625 return SPIRV::OpSGreaterThan;
1626 case CmpInst::ICMP_SLE:
1627 return SPIRV::OpSLessThanEqual;
1628 case CmpInst::ICMP_SLT:
1629 return SPIRV::OpSLessThan;
1630 case CmpInst::ICMP_UGE:
1631 return SPIRV::OpUGreaterThanEqual;
1632 case CmpInst::ICMP_UGT:
1633 return SPIRV::OpUGreaterThan;
1634 case CmpInst::ICMP_ULE:
1635 return SPIRV::OpULessThanEqual;
1636 case CmpInst::ICMP_ULT:
1637 return SPIRV::OpULessThan;
1638 default:
1639 llvm_unreachable("Unknown predicate type for ICmp");
1640 }
1641}
1642
1643static unsigned getPtrCmpOpcode(unsigned Pred) {
1644 switch (static_cast<CmpInst::Predicate>(Pred)) {
1645 case CmpInst::ICMP_EQ:
1646 return SPIRV::OpPtrEqual;
1647 case CmpInst::ICMP_NE:
1648 return SPIRV::OpPtrNotEqual;
1649 default:
1650 llvm_unreachable("Unknown predicate type for pointer comparison");
1651 }
1652}
1653
1654// Return the logical operation, or abort if none exists.
1655static unsigned getBoolCmpOpcode(unsigned PredNum) {
1656 auto Pred = static_cast<CmpInst::Predicate>(PredNum);
1657 switch (Pred) {
1658 case CmpInst::ICMP_EQ:
1659 return SPIRV::OpLogicalEqual;
1660 case CmpInst::ICMP_NE:
1661 return SPIRV::OpLogicalNotEqual;
1662 default:
1663 llvm_unreachable("Unknown predicate type for Bool comparison");
1664 }
1665}
1666
1667static APFloat getZeroFP(const Type *LLVMFloatTy) {
1668 if (!LLVMFloatTy)
1669 return APFloat::getZero(APFloat::IEEEsingle());
1670 switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1671 case Type::HalfTyID:
1672 return APFloat::getZero(APFloat::IEEEhalf());
1673 default:
1674 case Type::FloatTyID:
1675 return APFloat::getZero(APFloat::IEEEsingle());
1676 case Type::DoubleTyID:
1677 return APFloat::getZero(APFloat::IEEEdouble());
1678 }
1679}
1680
1681static APFloat getOneFP(const Type *LLVMFloatTy) {
1682 if (!LLVMFloatTy)
1683 return APFloat::getOne(APFloat::IEEEsingle());
1684 switch (LLVMFloatTy->getScalarType()->getTypeID()) {
1685 case Type::HalfTyID:
1686 return APFloat::getOne(APFloat::IEEEhalf());
1687 default:
1688 case Type::FloatTyID:
1689 return APFloat::getOne(APFloat::IEEEsingle());
1690 case Type::DoubleTyID:
1691 return APFloat::getOne(APFloat::IEEEdouble());
1692 }
1693}
1694
1695bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
1696 const SPIRVType *ResType,
1697 MachineInstr &I,
1698 unsigned OpAnyOrAll) const {
1699 assert(I.getNumOperands() == 3);
1700 assert(I.getOperand(2).isReg());
1701 MachineBasicBlock &BB = *I.getParent();
1702 Register InputRegister = I.getOperand(2).getReg();
1703 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1704
1705 if (!InputType)
1706 report_fatal_error("Input Type could not be determined.");
1707
1708 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1709 bool IsVectorTy = InputType->getOpcode() == SPIRV::OpTypeVector;
1710 if (IsBoolTy && !IsVectorTy) {
1711 assert(ResVReg == I.getOperand(0).getReg());
1712 return BuildCOPY(ResVReg, InputRegister, I);
1713 }
1714
1715 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1716 unsigned SpirvNotEqualId =
1717 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1718 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(I, TII);
1719 SPIRVType *SpvBoolTy = SpvBoolScalarTy;
1720 Register NotEqualReg = ResVReg;
1721
1722 if (IsVectorTy) {
1723 NotEqualReg = IsBoolTy ? InputRegister
1724 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1725 const unsigned NumElts = InputType->getOperand(2).getImm();
1726 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
1727 }
1728
1729 bool Result = true;
1730 if (!IsBoolTy) {
1731 Register ConstZeroReg =
1732 IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);
1733
1734 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
1735 .addDef(NotEqualReg)
1736 .addUse(GR.getSPIRVTypeID(SpvBoolTy))
1737 .addUse(InputRegister)
1738 .addUse(ConstZeroReg)
1739 .constrainAllUses(TII, TRI, RBI);
1740 }
1741
1742 if (!IsVectorTy)
1743 return Result;
1744
1745 return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
1746 .addDef(ResVReg)
1747 .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1748 .addUse(NotEqualReg)
1749 .constrainAllUses(TII, TRI, RBI);
1750}
1751
1752bool SPIRVInstructionSelector::selectAll(Register ResVReg,
1753 const SPIRVType *ResType,
1754 MachineInstr &I) const {
1755 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAll);
1756}
1757
1758bool SPIRVInstructionSelector::selectAny(Register ResVReg,
1759 const SPIRVType *ResType,
1760 MachineInstr &I) const {
1761 return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAny);
1762}
1763
1764// Select the OpDot instruction for the given float dot
1765bool SPIRVInstructionSelector::selectFloatDot(Register ResVReg,
1766 const SPIRVType *ResType,
1767 MachineInstr &I) const {
1768 assert(I.getNumOperands() == 4);
1769 assert(I.getOperand(2).isReg());
1770 assert(I.getOperand(3).isReg());
1771
1772 [[maybe_unused]] SPIRVType *VecType =
1773 GR.getSPIRVTypeForVReg(I.getOperand(2).getReg());
1774
1775 assert(VecType->getOpcode() == SPIRV::OpTypeVector &&
1776 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1777 "dot product requires a vector of at least 2 components");
1778
1779 [[maybe_unused]] SPIRVType *EltType =
1780 GR.getSPIRVTypeForVReg(VecType->getOperand(1).getReg());
1781
1782 assert(EltType->getOpcode() == SPIRV::OpTypeFloat);
1783
1784 MachineBasicBlock &BB = *I.getParent();
1785 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpDot))
1786 .addDef(ResVReg)
1787 .addUse(GR.getSPIRVTypeID(ResType))
1788 .addUse(I.getOperand(2).getReg())
1789 .addUse(I.getOperand(3).getReg())
1790 .constrainAllUses(TII, TRI, RBI);
1791}
1792
1793bool SPIRVInstructionSelector::selectIntegerDot(Register ResVReg,
1794 const SPIRVType *ResType,
1795 MachineInstr &I,
1796 bool Signed) const {
1797 assert(I.getNumOperands() == 4);
1798 assert(I.getOperand(2).isReg());
1799 assert(I.getOperand(3).isReg());
1800 MachineBasicBlock &BB = *I.getParent();
1801
1802 auto DotOp = Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1803 return BuildMI(BB, I, I.getDebugLoc(), TII.get(DotOp))
1804 .addDef(ResVReg)
1805 .addUse(GR.getSPIRVTypeID(ResType))
1806 .addUse(I.getOperand(2).getReg())
1807 .addUse(I.getOperand(3).getReg())
1808 .constrainAllUses(TII, TRI, RBI);
1809}
1810
1811// Since pre-1.6 SPIRV has no integer dot implementation,
1812// expand by piecewise multiplying and adding the results
1813bool SPIRVInstructionSelector::selectIntegerDotExpansion(
1814 Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
1815 assert(I.getNumOperands() == 4);
1816 assert(I.getOperand(2).isReg());
1817 assert(I.getOperand(3).isReg());
1818 MachineBasicBlock &BB = *I.getParent();
1819
1820 // Multiply the vectors, then sum the results
1821 Register Vec0 = I.getOperand(2).getReg();
1822 Register Vec1 = I.getOperand(3).getReg();
1823 Register TmpVec = MRI->createVirtualRegister(GR.getRegClass(ResType));
1824 SPIRVType *VecType = GR.getSPIRVTypeForVReg(Vec0);
1825
1826 bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulV))
1827 .addDef(TmpVec)
1828 .addUse(GR.getSPIRVTypeID(VecType))
1829 .addUse(Vec0)
1830 .addUse(Vec1)
1831 .constrainAllUses(TII, TRI, RBI);
1832
1833 assert(VecType->getOpcode() == SPIRV::OpTypeVector &&
1834 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1835 "dot product requires a vector of at least 2 components");
1836
1837 Register Res = MRI->createVirtualRegister(GR.getRegClass(ResType));
1838 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1839 .addDef(Res)
1840 .addUse(GR.getSPIRVTypeID(ResType))
1841 .addUse(TmpVec)
1842 .addImm(0)
1843 .constrainAllUses(TII, TRI, RBI);
1844
1845 for (unsigned i = 1; i < GR.getScalarOrVectorComponentCount(VecType); i++) {
1846 Register Elt = MRI->createVirtualRegister(GR.getRegClass(ResType));
1847
1848 Result &=
1849 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
1850 .addDef(Elt)
1851 .addUse(GR.getSPIRVTypeID(ResType))
1852 .addUse(TmpVec)
1853 .addImm(i)
1854 .constrainAllUses(TII, TRI, RBI);
1855
1856 Register Sum = i < GR.getScalarOrVectorComponentCount(VecType) - 1
1857 ? MRI->createVirtualRegister(GR.getRegClass(ResType))
1858 : ResVReg;
1859
1860 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
1861 .addDef(Sum)
1862 .addUse(GR.getSPIRVTypeID(ResType))
1863 .addUse(Res)
1864 .addUse(Elt)
1865 .constrainAllUses(TII, TRI, RBI);
1866 Res = Sum;
1867 }
1868
1869 return Result;
1870}
1871
1872template <bool Signed>
1873bool SPIRVInstructionSelector::selectDot4AddPacked(Register ResVReg,
1874 const SPIRVType *ResType,
1875 MachineInstr &I) const {
1876 assert(I.getNumOperands() == 5);
1877 assert(I.getOperand(2).isReg());
1878 assert(I.getOperand(3).isReg());
1879 assert(I.getOperand(4).isReg());
1880 MachineBasicBlock &BB = *I.getParent();
1881
1882 auto DotOp = Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1883 Register Dot = MRI->createVirtualRegister(GR.getRegClass(ResType));
1884 bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(DotOp))
1885 .addDef(Dot)
1886 .addUse(GR.getSPIRVTypeID(ResType))
1887 .addUse(I.getOperand(2).getReg())
1888 .addUse(I.getOperand(3).getReg())
1889 .constrainAllUses(TII, TRI, RBI);
1890
1891 return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
1892 .addDef(ResVReg)
1893 .addUse(GR.getSPIRVTypeID(ResType))
1894 .addUse(Dot)
1895 .addUse(I.getOperand(4).getReg())
1896 .constrainAllUses(TII, TRI, RBI);
1897}
1898
1899// Since pre-1.6 SPIRV has no DotProductInput4x8BitPacked implementation,
1900// extract the elements of the packed inputs, multiply them and add the result
1901// to the accumulator.
1902template <bool Signed>
1903bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
1904 Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
1905 assert(I.getNumOperands() == 5);
1906 assert(I.getOperand(2).isReg());
1907 assert(I.getOperand(3).isReg());
1908 assert(I.getOperand(4).isReg());
1909 MachineBasicBlock &BB = *I.getParent();
1910
1911 bool Result = true;
1912
1913 // Acc = C
1914 Register Acc = I.getOperand(4).getReg();
1915 SPIRVType *EltType = GR.getOrCreateSPIRVIntegerType(8, I, TII);
1916 auto ExtractOp =
1917 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
1918
1919 // Extract the i8 element, multiply and add it to the accumulator
1920 for (unsigned i = 0; i < 4; i++) {
1921 // A[i]
1922 Register AElt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1923 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
1924 .addDef(AElt)
1925 .addUse(GR.getSPIRVTypeID(ResType))
1926 .addUse(I.getOperand(2).getReg())
1927 .addUse(GR.getOrCreateConstInt(i * 8, I, EltType, TII))
1928 .addUse(GR.getOrCreateConstInt(8, I, EltType, TII))
1929 .constrainAllUses(TII, TRI, RBI);
1930
1931 // B[i]
1932 Register BElt = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1933 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
1934 .addDef(BElt)
1935 .addUse(GR.getSPIRVTypeID(ResType))
1936 .addUse(I.getOperand(3).getReg())
1937 .addUse(GR.getOrCreateConstInt(i * 8, I, EltType, TII))
1938 .addUse(GR.getOrCreateConstInt(8, I, EltType, TII))
1939 .constrainAllUses(TII, TRI, RBI);
1940
1941 // A[i] * B[i]
1942 Register Mul = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1943 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulS))
1944 .addDef(Mul)
1945 .addUse(GR.getSPIRVTypeID(ResType))
1946 .addUse(AElt)
1947 .addUse(BElt)
1948 .constrainAllUses(TII, TRI, RBI);
1949
1950 // Discard 24 highest-bits so that stored i32 register is i8 equivalent
1951 Register MaskMul = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1952 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(ExtractOp))
1953 .addDef(MaskMul)
1954 .addUse(GR.getSPIRVTypeID(ResType))
1955 .addUse(Mul)
1956 .addUse(GR.getOrCreateConstInt(0, I, EltType, TII))
1957 .addUse(GR.getOrCreateConstInt(8, I, EltType, TII))
1958 .constrainAllUses(TII, TRI, RBI);
1959
1960 // Acc = Acc + A[i] * B[i]
1961 Register Sum =
1962 i < 3 ? MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
1963 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
1964 .addDef(Sum)
1965 .addUse(GR.getSPIRVTypeID(ResType))
1966 .addUse(Acc)
1967 .addUse(MaskMul)
1968 .constrainAllUses(TII, TRI, RBI);
1969
1970 Acc = Sum;
1971 }
1972
1973 return Result;
1974}
1975
1976/// Transform saturate(x) to clamp(x, 0.0f, 1.0f) as SPIRV
1977/// does not have a saturate builtin.
1978bool SPIRVInstructionSelector::selectSaturate(Register ResVReg,
1979 const SPIRVType *ResType,
1980 MachineInstr &I) const {
1981 assert(I.getNumOperands() == 3);
1982 assert(I.getOperand(2).isReg());
1983 MachineBasicBlock &BB = *I.getParent();
1984 Register VZero = buildZerosValF(ResType, I);
1985 Register VOne = buildOnesValF(ResType, I);
1986
1987 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
1988 .addDef(ResVReg)
1989 .addUse(GR.getSPIRVTypeID(ResType))
1990 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1991 .addImm(GL::FClamp)
1992 .addUse(I.getOperand(2).getReg())
1993 .addUse(VZero)
1994 .addUse(VOne)
1995 .constrainAllUses(TII, TRI, RBI);
1996}
1997
1998bool SPIRVInstructionSelector::selectSign(Register ResVReg,
1999 const SPIRVType *ResType,
2000 MachineInstr &I) const {
2001 assert(I.getNumOperands() == 3);
2002 assert(I.getOperand(2).isReg());
2003 MachineBasicBlock &BB = *I.getParent();
2004 Register InputRegister = I.getOperand(2).getReg();
2005 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2006 auto &DL = I.getDebugLoc();
2007
2008 if (!InputType)
2009 report_fatal_error("Input Type could not be determined.");
2010
2011 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2012
2013 unsigned SignBitWidth = GR.getScalarOrVectorBitWidth(InputType);
2014 unsigned ResBitWidth = GR.getScalarOrVectorBitWidth(ResType);
2015
2016 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2017
2018 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2019 Register SignReg = NeedsConversion
2020 ? MRI->createVirtualRegister(&SPIRV::IDRegClass)
2021 : ResVReg;
2022
2023 bool Result =
2024 BuildMI(BB, I, DL, TII.get(SPIRV::OpExtInst))
2025 .addDef(SignReg)
2026 .addUse(GR.getSPIRVTypeID(InputType))
2027 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2028 .addImm(SignOpcode)
2029 .addUse(InputRegister)
2030 .constrainAllUses(TII, TRI, RBI);
2031
2032 if (NeedsConversion) {
2033 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2034 Result &= BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode))
2035 .addDef(ResVReg)
2036 .addUse(GR.getSPIRVTypeID(ResType))
2037 .addUse(SignReg)
2038 .constrainAllUses(TII, TRI, RBI);
2039 }
2040
2041 return Result;
2042}
2043
2044bool SPIRVInstructionSelector::selectWaveOpInst(Register ResVReg,
2045 const SPIRVType *ResType,
2046 MachineInstr &I,
2047 unsigned Opcode) const {
2048 MachineBasicBlock &BB = *I.getParent();
2049 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
2050
2051 auto BMI = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2052 .addDef(ResVReg)
2053 .addUse(GR.getSPIRVTypeID(ResType))
2054 .addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup, I,
2055 IntTy, TII));
2056
2057 for (unsigned J = 2; J < I.getNumOperands(); J++) {
2058 BMI.addUse(I.getOperand(J).getReg());
2059 }
2060
2061 return BMI.constrainAllUses(TII, TRI, RBI);
2062}
2063
2064bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2065 Register ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
2066
2067 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
2068 SPIRVType *BallotType = GR.getOrCreateSPIRVVectorType(IntTy, 4, I, TII);
2069 Register BallotReg = MRI->createVirtualRegister(GR.getRegClass(BallotType));
2070 bool Result = selectWaveOpInst(BallotReg, BallotType, I,
2071 SPIRV::OpGroupNonUniformBallot);
2072
2073 MachineBasicBlock &BB = *I.getParent();
2074 Result &=
2075 BuildMI(BB, I, I.getDebugLoc(),
2076 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2077 .addDef(ResVReg)
2078 .addUse(GR.getSPIRVTypeID(ResType))
2079 .addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup, I, IntTy, TII))
2080 .addImm(SPIRV::GroupOperation::Reduce)
2081 .addUse(BallotReg)
2082 .constrainAllUses(TII, TRI, RBI);
2083
2084 return Result;
2085}
2086
2087bool SPIRVInstructionSelector::selectWaveReduceSum(Register ResVReg,
2088 const SPIRVType *ResType,
2089 MachineInstr &I) const {
2090 assert(I.getNumOperands() == 3);
2091 assert(I.getOperand(2).isReg());
2092 MachineBasicBlock &BB = *I.getParent();
2093 Register InputRegister = I.getOperand(2).getReg();
2094 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2095
2096 if (!InputType)
2097 report_fatal_error("Input Type could not be determined.");
2098
2099 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
2100 // Retreive the operation to use based on input type
2101 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2102 auto Opcode =
2103 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2104 return BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2105 .addDef(ResVReg)
2106 .addUse(GR.getSPIRVTypeID(ResType))
2107 .addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup, I, IntTy, TII))
2108 .addImm(SPIRV::GroupOperation::Reduce)
2109 .addUse(I.getOperand(2).getReg());
2110}
2111
2112bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
2113 const SPIRVType *ResType,
2114 MachineInstr &I) const {
2115 MachineBasicBlock &BB = *I.getParent();
2116 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
2117 .addDef(ResVReg)
2118 .addUse(GR.getSPIRVTypeID(ResType))
2119 .addUse(I.getOperand(1).getReg())
2120 .constrainAllUses(TII, TRI, RBI);
2121}
2122
2123bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
2124 const SPIRVType *ResType,
2125 MachineInstr &I) const {
2126 // There is no way to implement `freeze` correctly without support on SPIR-V
2127 // standard side, but we may at least address a simple (static) case when
2128 // undef/poison value presence is obvious. The main benefit of even
2129 // incomplete `freeze` support is preventing of translation from crashing due
2130 // to lack of support on legalization and instruction selection steps.
2131 if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
2132 return false;
2133 Register OpReg = I.getOperand(1).getReg();
2134 if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
2135 Register Reg;
2136 switch (Def->getOpcode()) {
2137 case SPIRV::ASSIGN_TYPE:
2138 if (MachineInstr *AssignToDef =
2139 MRI->getVRegDef(Def->getOperand(1).getReg())) {
2140 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2141 Reg = Def->getOperand(2).getReg();
2142 }
2143 break;
2144 case SPIRV::OpUndef:
2145 Reg = Def->getOperand(1).getReg();
2146 break;
2147 }
2148 unsigned DestOpCode;
2149 if (Reg.isValid()) {
2150 DestOpCode = SPIRV::OpConstantNull;
2151 } else {
2152 DestOpCode = TargetOpcode::COPY;
2153 Reg = OpReg;
2154 }
2155 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
2156 .addDef(I.getOperand(0).getReg())
2157 .addUse(Reg)
2158 .constrainAllUses(TII, TRI, RBI);
2159 }
2160 return false;
2161}
2162
2164 const SPIRVType *ResType) {
2165 Register OpReg = ResType->getOperand(2).getReg();
2166 SPIRVType *OpDef = MRI->getVRegDef(OpReg);
2167 if (!OpDef)
2168 return 0;
2169 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
2170 OpDef->getOperand(1).isReg()) {
2171 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
2172 OpDef = RefDef;
2173 }
2174 unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
2175 ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
2176 : 0;
2177 return N;
2178}
2179
2180// Return true if the type represents a constant register
2182 SmallPtrSet<SPIRVType *, 4> &Visited) {
2183 if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
2184 OpDef->getOperand(1).isReg()) {
2185 if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
2186 OpDef = RefDef;
2187 }
2188
2189 if (Visited.contains(OpDef))
2190 return true;
2191 Visited.insert(OpDef);
2192
2193 unsigned Opcode = OpDef->getOpcode();
2194 switch (Opcode) {
2195 case TargetOpcode::G_CONSTANT:
2196 case TargetOpcode::G_FCONSTANT:
2197 return true;
2198 case TargetOpcode::G_INTRINSIC:
2199 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2200 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2201 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
2202 Intrinsic::spv_const_composite;
2203 case TargetOpcode::G_BUILD_VECTOR:
2204 case TargetOpcode::G_SPLAT_VECTOR: {
2205 for (unsigned i = OpDef->getNumExplicitDefs(); i < OpDef->getNumOperands();
2206 i++) {
2207 SPIRVType *OpNestedDef =
2208 OpDef->getOperand(i).isReg()
2209 ? MRI->getVRegDef(OpDef->getOperand(i).getReg())
2210 : nullptr;
2211 if (OpNestedDef && !isConstReg(MRI, OpNestedDef, Visited))
2212 return false;
2213 }
2214 return true;
2215 }
2216 }
2217 return false;
2218}
2219
2220// Return true if the virtual register represents a constant
2223 if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
2224 return isConstReg(MRI, OpDef, Visited);
2225 return false;
2226}
2227
2228bool SPIRVInstructionSelector::selectBuildVector(Register ResVReg,
2229 const SPIRVType *ResType,
2230 MachineInstr &I) const {
2231 unsigned N = 0;
2232 if (ResType->getOpcode() == SPIRV::OpTypeVector)
2233 N = GR.getScalarOrVectorComponentCount(ResType);
2234 else if (ResType->getOpcode() == SPIRV::OpTypeArray)
2235 N = getArrayComponentCount(MRI, ResType);
2236 else
2237 report_fatal_error("Cannot select G_BUILD_VECTOR with a non-vector result");
2238 if (I.getNumExplicitOperands() - I.getNumExplicitDefs() != N)
2239 report_fatal_error("G_BUILD_VECTOR and the result type are inconsistent");
2240
2241 // check if we may construct a constant vector
2242 bool IsConst = true;
2243 for (unsigned i = I.getNumExplicitDefs();
2244 i < I.getNumExplicitOperands() && IsConst; ++i)
2245 if (!isConstReg(MRI, I.getOperand(i).getReg()))
2246 IsConst = false;
2247
2248 if (!IsConst && N < 2)
2250 "There must be at least two constituent operands in a vector");
2251
2252 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2253 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
2254 TII.get(IsConst ? SPIRV::OpConstantComposite
2255 : SPIRV::OpCompositeConstruct))
2256 .addDef(ResVReg)
2257 .addUse(GR.getSPIRVTypeID(ResType));
2258 for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
2259 MIB.addUse(I.getOperand(i).getReg());
2260 return MIB.constrainAllUses(TII, TRI, RBI);
2261}
2262
2263bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
2264 const SPIRVType *ResType,
2265 MachineInstr &I) const {
2266 unsigned N = 0;
2267 if (ResType->getOpcode() == SPIRV::OpTypeVector)
2268 N = GR.getScalarOrVectorComponentCount(ResType);
2269 else if (ResType->getOpcode() == SPIRV::OpTypeArray)
2270 N = getArrayComponentCount(MRI, ResType);
2271 else
2272 report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");
2273
2274 unsigned OpIdx = I.getNumExplicitDefs();
2275 if (!I.getOperand(OpIdx).isReg())
2276 report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");
2277
2278 // check if we may construct a constant vector
2279 Register OpReg = I.getOperand(OpIdx).getReg();
2280 bool IsConst = isConstReg(MRI, OpReg);
2281
2282 if (!IsConst && N < 2)
2284 "There must be at least two constituent operands in a vector");
2285
2286 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2287 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
2288 TII.get(IsConst ? SPIRV::OpConstantComposite
2289 : SPIRV::OpCompositeConstruct))
2290 .addDef(ResVReg)
2291 .addUse(GR.getSPIRVTypeID(ResType));
2292 for (unsigned i = 0; i < N; ++i)
2293 MIB.addUse(OpReg);
2294 return MIB.constrainAllUses(TII, TRI, RBI);
2295}
2296
2297bool SPIRVInstructionSelector::selectDiscard(Register ResVReg,
2298 const SPIRVType *ResType,
2299 MachineInstr &I) const {
2300
2301 unsigned Opcode;
2302
2303 if (STI.canUseExtension(
2304 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2305 STI.isAtLeastSPIRVVer(llvm::VersionTuple(1, 6))) {
2306 Opcode = SPIRV::OpDemoteToHelperInvocation;
2307 } else {
2308 Opcode = SPIRV::OpKill;
2309 // OpKill must be the last operation of any basic block.
2310 if (MachineInstr *NextI = I.getNextNode()) {
2311 GR.invalidateMachineInstr(NextI);
2312 NextI->removeFromParent();
2313 }
2314 }
2315
2316 MachineBasicBlock &BB = *I.getParent();
2317 return BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2318 .constrainAllUses(TII, TRI, RBI);
2319}
2320
2321bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
2322 const SPIRVType *ResType,
2323 unsigned CmpOpc,
2324 MachineInstr &I) const {
2325 Register Cmp0 = I.getOperand(2).getReg();
2326 Register Cmp1 = I.getOperand(3).getReg();
2327 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
2328 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
2329 "CMP operands should have the same type");
2330 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
2331 .addDef(ResVReg)
2332 .addUse(GR.getSPIRVTypeID(ResType))
2333 .addUse(Cmp0)
2334 .addUse(Cmp1)
2335 .constrainAllUses(TII, TRI, RBI);
2336}
2337
2338bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
2339 const SPIRVType *ResType,
2340 MachineInstr &I) const {
2341 auto Pred = I.getOperand(1).getPredicate();
2342 unsigned CmpOpc;
2343
2344 Register CmpOperand = I.getOperand(2).getReg();
2345 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
2346 CmpOpc = getPtrCmpOpcode(Pred);
2347 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
2348 CmpOpc = getBoolCmpOpcode(Pred);
2349 else
2350 CmpOpc = getICmpOpcode(Pred);
2351 return selectCmp(ResVReg, ResType, CmpOpc, I);
2352}
2353
2354void SPIRVInstructionSelector::renderFImm64(MachineInstrBuilder &MIB,
2355 const MachineInstr &I,
2356 int OpIdx) const {
2357 assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
2358 "Expected G_FCONSTANT");
2359 const ConstantFP *FPImm = I.getOperand(1).getFPImm();
2360 addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
2361}
2362
2363void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
2364 const MachineInstr &I,
2365 int OpIdx) const {
2366 assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2367 "Expected G_CONSTANT");
2368 addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
2369}
2370
2371std::pair<Register, bool>
2372SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
2373 const SPIRVType *ResType) const {
2374 Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
2375 const SPIRVType *SpvI32Ty =
2376 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
2377 // Find a constant in DT or build a new one.
2378 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2379 Register NewReg = GR.find(ConstInt, GR.CurMF);
2380 bool Result = true;
2381 if (!NewReg.isValid()) {
2382 NewReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
2383 GR.add(ConstInt, GR.CurMF, NewReg);
2385 MachineBasicBlock &BB = *I.getParent();
2386 if (Val == 0) {
2387 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2388 .addDef(NewReg)
2389 .addUse(GR.getSPIRVTypeID(SpvI32Ty));
2390 } else {
2391 MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
2392 .addDef(NewReg)
2393 .addUse(GR.getSPIRVTypeID(SpvI32Ty))
2394 .addImm(APInt(32, Val).getZExtValue());
2395 }
2397 }
2398 return {NewReg, Result};
2399}
2400
2401bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
2402 const SPIRVType *ResType,
2403 MachineInstr &I) const {
2404 unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
2405 return selectCmp(ResVReg, ResType, CmpOp, I);
2406}
2407
2408Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
2409 MachineInstr &I) const {
2410 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
2411 bool ZeroAsNull = STI.isOpenCLEnv();
2412 if (ResType->getOpcode() == SPIRV::OpTypeVector)
2413 return GR.getOrCreateConstVector(0UL, I, ResType, TII, ZeroAsNull);
2414 return GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
2415}
2416
2417Register SPIRVInstructionSelector::buildZerosValF(const SPIRVType *ResType,
2418 MachineInstr &I) const {
2419 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
2420 bool ZeroAsNull = STI.isOpenCLEnv();
2421 APFloat VZero = getZeroFP(GR.getTypeForSPIRVType(ResType));
2422 if (ResType->getOpcode() == SPIRV::OpTypeVector)
2423 return GR.getOrCreateConstVector(VZero, I, ResType, TII, ZeroAsNull);
2424 return GR.getOrCreateConstFP(VZero, I, ResType, TII, ZeroAsNull);
2425}
2426
2427Register SPIRVInstructionSelector::buildOnesValF(const SPIRVType *ResType,
2428 MachineInstr &I) const {
2429 // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
2430 bool ZeroAsNull = STI.isOpenCLEnv();
2431 APFloat VOne = getOneFP(GR.getTypeForSPIRVType(ResType));
2432 if (ResType->getOpcode() == SPIRV::OpTypeVector)
2433 return GR.getOrCreateConstVector(VOne, I, ResType, TII, ZeroAsNull);
2434 return GR.getOrCreateConstFP(VOne, I, ResType, TII, ZeroAsNull);
2435}
2436
2437Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
2438 const SPIRVType *ResType,
2439 MachineInstr &I) const {
2440 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2441 APInt One =
2442 AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
2443 if (ResType->getOpcode() == SPIRV::OpTypeVector)
2444 return GR.getOrCreateConstVector(One.getZExtValue(), I, ResType, TII);
2445 return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
2446}
2447
2448bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
2449 const SPIRVType *ResType,
2450 MachineInstr &I,
2451 bool IsSigned) const {
2452 // To extend a bool, we need to use OpSelect between constants.
2453 Register ZeroReg = buildZerosVal(ResType, I);
2454 Register OneReg = buildOnesVal(IsSigned, ResType, I);
2455 bool IsScalarBool =
2456 GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
2457 unsigned Opcode =
2458 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2459 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
2460 .addDef(ResVReg)
2461 .addUse(GR.getSPIRVTypeID(ResType))
2462 .addUse(I.getOperand(1).getReg())
2463 .addUse(OneReg)
2464 .addUse(ZeroReg)
2465 .constrainAllUses(TII, TRI, RBI);
2466}
2467
2468bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
2469 const SPIRVType *ResType,
2470 MachineInstr &I, bool IsSigned,
2471 unsigned Opcode) const {
2472 Register SrcReg = I.getOperand(1).getReg();
2473 // We can convert bool value directly to float type without OpConvert*ToF,
2474 // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
2475 if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
2476 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2477 SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
2478 if (ResType->getOpcode() == SPIRV::OpTypeVector) {
2479 const unsigned NumElts = ResType->getOperand(2).getImm();
2480 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
2481 }
2482 SrcReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2483 selectSelect(SrcReg, TmpType, I, false);
2484 }
2485 return selectOpWithSrcs(ResVReg, ResType, I, {SrcReg}, Opcode);
2486}
2487
2488bool SPIRVInstructionSelector::selectExt(Register ResVReg,
2489 const SPIRVType *ResType,
2490 MachineInstr &I, bool IsSigned) const {
2491 Register SrcReg = I.getOperand(1).getReg();
2492 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
2493 return selectSelect(ResVReg, ResType, I, IsSigned);
2494
2495 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
2496 if (SrcType == ResType)
2497 return BuildCOPY(ResVReg, SrcReg, I);
2498
2499 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2500 return selectUnOp(ResVReg, ResType, I, Opcode);
2501}
2502
2503bool SPIRVInstructionSelector::selectSUCmp(Register ResVReg,
2504 const SPIRVType *ResType,
2505 MachineInstr &I,
2506 bool IsSigned) const {
2507 MachineIRBuilder MIRBuilder(I);
2508 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2509 MachineBasicBlock &BB = *I.getParent();
2510 // Ensure we have bool.
2511 SPIRVType *BoolType = GR.getOrCreateSPIRVBoolType(I, TII);
2512 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
2513 if (N > 1)
2514 BoolType = GR.getOrCreateSPIRVVectorType(BoolType, N, I, TII);
2515 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
2516 // Build less-than-equal and less-than.
2517 // TODO: replace with one-liner createVirtualRegister() from
2518 // llvm/lib/Target/SPIRV/SPIRVUtils.cpp when PR #116609 is merged.
2519 Register IsLessEqReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
2520 MRI->setType(IsLessEqReg, LLT::scalar(64));
2521 GR.assignSPIRVTypeToVReg(ResType, IsLessEqReg, MIRBuilder.getMF());
2522 bool Result = BuildMI(BB, I, I.getDebugLoc(),
2523 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2524 : SPIRV::OpULessThanEqual))
2525 .addDef(IsLessEqReg)
2526 .addUse(BoolTypeReg)
2527 .addUse(I.getOperand(1).getReg())
2528 .addUse(I.getOperand(2).getReg())
2529 .constrainAllUses(TII, TRI, RBI);
2530 Register IsLessReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
2531 MRI->setType(IsLessReg, LLT::scalar(64));
2532 GR.assignSPIRVTypeToVReg(ResType, IsLessReg, MIRBuilder.getMF());
2533 Result &= BuildMI(BB, I, I.getDebugLoc(),
2534 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2535 .addDef(IsLessReg)
2536 .addUse(BoolTypeReg)
2537 .addUse(I.getOperand(1).getReg())
2538 .addUse(I.getOperand(2).getReg())
2539 .constrainAllUses(TII, TRI, RBI);
2540 // Build selects.
2541 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
2542 Register NegOneOrZeroReg =
2543 MRI->createVirtualRegister(GR.getRegClass(ResType));
2544 MRI->setType(NegOneOrZeroReg, LLT::scalar(64));
2545 GR.assignSPIRVTypeToVReg(ResType, NegOneOrZeroReg, MIRBuilder.getMF());
2546 unsigned SelectOpcode =
2547 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2548 Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SelectOpcode))
2549 .addDef(NegOneOrZeroReg)
2550 .addUse(ResTypeReg)
2551 .addUse(IsLessReg)
2552 .addUse(buildOnesVal(true, ResType, I)) // -1
2553 .addUse(buildZerosVal(ResType, I))
2554 .constrainAllUses(TII, TRI, RBI);
2555 return Result & BuildMI(BB, I, I.getDebugLoc(), TII.get(SelectOpcode))
2556 .addDef(ResVReg)
2557 .addUse(ResTypeReg)
2558 .addUse(IsLessEqReg)
2559 .addUse(NegOneOrZeroReg) // -1 or 0
2560 .addUse(buildOnesVal(false, ResType, I))
2561 .constrainAllUses(TII, TRI, RBI);
2562}
2563
2564bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
2565 Register ResVReg,
2566 MachineInstr &I,
2567 const SPIRVType *IntTy,
2568 const SPIRVType *BoolTy) const {
2569 // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
2570 Register BitIntReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2571 bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
2572 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2573 Register Zero = buildZerosVal(IntTy, I);
2574 Register One = buildOnesVal(false, IntTy, I);
2575 MachineBasicBlock &BB = *I.getParent();
2576 bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2577 .addDef(BitIntReg)
2578 .addUse(GR.getSPIRVTypeID(IntTy))
2579 .addUse(IntReg)
2580 .addUse(One)
2581 .constrainAllUses(TII, TRI, RBI);
2582 return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
2583 .addDef(ResVReg)
2584 .addUse(GR.getSPIRVTypeID(BoolTy))
2585 .addUse(BitIntReg)
2586 .addUse(Zero)
2587 .constrainAllUses(TII, TRI, RBI);
2588}
2589
2590bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
2591 const SPIRVType *ResType,
2592 MachineInstr &I) const {
2593 Register IntReg = I.getOperand(1).getReg();
2594 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
2595 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
2596 return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
2597 if (ArgType == ResType)
2598 return BuildCOPY(ResVReg, IntReg, I);
2599 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
2600 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2601 return selectUnOp(ResVReg, ResType, I, Opcode);
2602}
2603
2604bool SPIRVInstructionSelector::selectConst(Register ResVReg,
2605 const SPIRVType *ResType,
2606 const APInt &Imm,
2607 MachineInstr &I) const {
2608 unsigned TyOpcode = ResType->getOpcode();
2609 assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
2610 MachineBasicBlock &BB = *I.getParent();
2611 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
2612 Imm.isZero())
2613 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
2614 .addDef(ResVReg)
2615 .addUse(GR.getSPIRVTypeID(ResType))
2616 .constrainAllUses(TII, TRI, RBI);
2617 if (TyOpcode == SPIRV::OpTypeInt) {
2618 assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
2619 Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
2620 return Reg == ResVReg ? true : BuildCOPY(ResVReg, Reg, I);
2621 }
2622 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
2623 .addDef(ResVReg)
2624 .addUse(GR.getSPIRVTypeID(ResType));
2625 // <=32-bit integers should be caught by the sdag pattern.
2626 assert(Imm.getBitWidth() > 32);
2627 addNumImm(Imm, MIB);
2628 return MIB.constrainAllUses(TII, TRI, RBI);
2629}
2630
2631bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
2632 const SPIRVType *ResType,
2633 MachineInstr &I) const {
2634 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
2635 .addDef(ResVReg)
2636 .addUse(GR.getSPIRVTypeID(ResType))
2637 .constrainAllUses(TII, TRI, RBI);
2638}
2639
2641 assert(MO.isReg());
2642 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
2643 if (TypeInst->getOpcode() == SPIRV::ASSIGN_TYPE) {
2644 assert(TypeInst->getOperand(1).isReg());
2645 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
2646 return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
2647 }
2648 return TypeInst->getOpcode() == SPIRV::OpConstantI;
2649}
2650
2651static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
2652 const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
2653 if (TypeInst->getOpcode() == SPIRV::OpConstantI)
2654 return TypeInst->getOperand(2).getImm();
2655 MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
2656 assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
2657 return ImmInst->getOperand(1).getCImm()->getZExtValue();
2658}
2659
2660bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
2661 const SPIRVType *ResType,
2662 MachineInstr &I) const {
2663 MachineBasicBlock &BB = *I.getParent();
2664 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
2665 .addDef(ResVReg)
2666 .addUse(GR.getSPIRVTypeID(ResType))
2667 // object to insert
2668 .addUse(I.getOperand(3).getReg())
2669 // composite to insert into
2670 .addUse(I.getOperand(2).getReg());
2671 for (unsigned i = 4; i < I.getNumOperands(); i++)
2672 MIB.addImm(foldImm(I.getOperand(i), MRI));
2673 return MIB.constrainAllUses(TII, TRI, RBI);
2674}
2675
2676bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
2677 const SPIRVType *ResType,
2678 MachineInstr &I) const {
2679 MachineBasicBlock &BB = *I.getParent();
2680 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
2681 .addDef(ResVReg)
2682 .addUse(GR.getSPIRVTypeID(ResType))
2683 .addUse(I.getOperand(2).getReg());
2684 for (unsigned i = 3; i < I.getNumOperands(); i++)
2685 MIB.addImm(foldImm(I.getOperand(i), MRI));
2686 return MIB.constrainAllUses(TII, TRI, RBI);
2687}
2688
2689bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
2690 const SPIRVType *ResType,
2691 MachineInstr &I) const {
2692 if (isImm(I.getOperand(4), MRI))
2693 return selectInsertVal(ResVReg, ResType, I);
2694 MachineBasicBlock &BB = *I.getParent();
2695 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
2696 .addDef(ResVReg)
2697 .addUse(GR.getSPIRVTypeID(ResType))
2698 .addUse(I.getOperand(2).getReg())
2699 .addUse(I.getOperand(3).getReg())
2700 .addUse(I.getOperand(4).getReg())
2701 .constrainAllUses(TII, TRI, RBI);
2702}
2703
2704bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
2705 const SPIRVType *ResType,
2706 MachineInstr &I) const {
2707 if (isImm(I.getOperand(3), MRI))
2708 return selectExtractVal(ResVReg, ResType, I);
2709 MachineBasicBlock &BB = *I.getParent();
2710 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
2711 .addDef(ResVReg)
2712 .addUse(GR.getSPIRVTypeID(ResType))
2713 .addUse(I.getOperand(2).getReg())
2714 .addUse(I.getOperand(3).getReg())
2715 .constrainAllUses(TII, TRI, RBI);
2716}
2717
2718bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
2719 const SPIRVType *ResType,
2720 MachineInstr &I) const {
2721 const bool IsGEPInBounds = I.getOperand(2).getImm();
2722
2723 // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
2724 // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
2725 // we have to use Op[InBounds]AccessChain.
2726 const unsigned Opcode = STI.isVulkanEnv()
2727 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2728 : SPIRV::OpAccessChain)
2729 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
2730 : SPIRV::OpPtrAccessChain);
2731
2732 auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
2733 .addDef(ResVReg)
2734 .addUse(GR.getSPIRVTypeID(ResType))
2735 // Object to get a pointer to.
2736 .addUse(I.getOperand(3).getReg());
2737 // Adding indices.
2738 const unsigned StartingIndex =
2739 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2740 ? 5
2741 : 4;
2742 for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
2743 Res.addUse(I.getOperand(i).getReg());
2744 return Res.constrainAllUses(TII, TRI, RBI);
2745}
2746
2747// Maybe wrap a value into OpSpecConstantOp
2748bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2749 MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
2750 bool Result = true;
2751 unsigned Lim = I.getNumExplicitOperands();
2752 for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2753 Register OpReg = I.getOperand(i).getReg();
2754 SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
2755 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
2757 if (!OpDefine || !OpType || isConstReg(MRI, OpDefine, Visited) ||
2758 OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2759 GR.isAggregateType(OpType)) {
2760 // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
2761 // by selectAddrSpaceCast()
2762 CompositeArgs.push_back(OpReg);
2763 continue;
2764 }
2765 MachineFunction *MF = I.getMF();
2766 Register WrapReg = GR.find(OpDefine, MF);
2767 if (WrapReg.isValid()) {
2768 CompositeArgs.push_back(WrapReg);
2769 continue;
2770 }
2771 // Create a new register for the wrapper
2772 WrapReg = MRI->createVirtualRegister(GR.getRegClass(OpType));
2773 GR.add(OpDefine, MF, WrapReg);
2774 CompositeArgs.push_back(WrapReg);
2775 // Decorate the wrapper register and generate a new instruction
2776 MRI->setType(WrapReg, LLT::pointer(0, 64));
2777 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
2778 MachineBasicBlock &BB = *I.getParent();
2779 Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
2780 .addDef(WrapReg)
2781 .addUse(GR.getSPIRVTypeID(OpType))
2782 .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
2783 .addUse(OpReg)
2784 .constrainAllUses(TII, TRI, RBI);
2785 if (!Result)
2786 break;
2787 }
2788 return Result;
2789}
2790
2791bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
2792 const SPIRVType *ResType,
2793 MachineInstr &I) const {
2794 MachineBasicBlock &BB = *I.getParent();
2795 Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
2796 switch (IID) {
2797 case Intrinsic::spv_load:
2798 return selectLoad(ResVReg, ResType, I);
2799 case Intrinsic::spv_store:
2800 return selectStore(I);
2801 case Intrinsic::spv_extractv:
2802 return selectExtractVal(ResVReg, ResType, I);
2803 case Intrinsic::spv_insertv:
2804 return selectInsertVal(ResVReg, ResType, I);
2805 case Intrinsic::spv_extractelt:
2806 return selectExtractElt(ResVReg, ResType, I);
2807 case Intrinsic::spv_insertelt:
2808 return selectInsertElt(ResVReg, ResType, I);
2809 case Intrinsic::spv_gep:
2810 return selectGEP(ResVReg, ResType, I);
2811 case Intrinsic::spv_unref_global:
2812 case Intrinsic::spv_init_global: {
2813 MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
2814 MachineInstr *Init = I.getNumExplicitOperands() > 2
2815 ? MRI->getVRegDef(I.getOperand(2).getReg())
2816 : nullptr;
2817 assert(MI);
2818 return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
2819 }
2820 case Intrinsic::spv_undef: {
2821 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
2822 .addDef(ResVReg)
2823 .addUse(GR.getSPIRVTypeID(ResType));
2824 return MIB.constrainAllUses(TII, TRI, RBI);
2825 }
2826 case Intrinsic::spv_const_composite: {
2827 // If no values are attached, the composite is null constant.
2828 bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
2829 // Select a proper instruction.
2830 unsigned Opcode = SPIRV::OpConstantNull;
2831 SmallVector<Register> CompositeArgs;
2832 if (!IsNull) {
2833 Opcode = SPIRV::OpConstantComposite;
2834 if (!wrapIntoSpecConstantOp(I, CompositeArgs))
2835 return false;
2836 }
2837 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2838 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
2839 .addDef(ResVReg)
2840 .addUse(GR.getSPIRVTypeID(ResType));
2841 // skip type MD node we already used when generated assign.type for this
2842 if (!IsNull) {
2843 for (Register OpReg : CompositeArgs)
2844 MIB.addUse(OpReg);
2845 }
2846 return MIB.constrainAllUses(TII, TRI, RBI);
2847 }
2848 case Intrinsic::spv_assign_name: {
2849 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
2850 MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
2851 for (unsigned i = I.getNumExplicitDefs() + 2;
2852 i < I.getNumExplicitOperands(); ++i) {
2853 MIB.addImm(I.getOperand(i).getImm());
2854 }
2855 return MIB.constrainAllUses(TII, TRI, RBI);
2856 }
2857 case Intrinsic::spv_switch: {
2858 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
2859 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
2860 if (I.getOperand(i).isReg())
2861 MIB.addReg(I.getOperand(i).getReg());
2862 else if (I.getOperand(i).isCImm())
2863 addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
2864 else if (I.getOperand(i).isMBB())
2865 MIB.addMBB(I.getOperand(i).getMBB());
2866 else
2867 llvm_unreachable("Unexpected OpSwitch operand");
2868 }
2869 return MIB.constrainAllUses(TII, TRI, RBI);
2870 }
2871 case Intrinsic::spv_loop_merge: {
2872 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpLoopMerge));
2873 for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
2874 assert(I.getOperand(i).isMBB());
2875 MIB.addMBB(I.getOperand(i).getMBB());
2876 }
2877 MIB.addImm(SPIRV::SelectionControl::None);
2878 return MIB.constrainAllUses(TII, TRI, RBI);
2879 }
2880 case Intrinsic::spv_selection_merge: {
2881 auto MIB =
2882 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSelectionMerge));
2883 assert(I.getOperand(1).isMBB() &&
2884 "operand 1 to spv_selection_merge must be a basic block");
2885 MIB.addMBB(I.getOperand(1).getMBB());
2886 MIB.addImm(getSelectionOperandForImm(I.getOperand(2).getImm()));
2887 return MIB.constrainAllUses(TII, TRI, RBI);
2888 }
2889 case Intrinsic::spv_cmpxchg:
2890 return selectAtomicCmpXchg(ResVReg, ResType, I);
2891 case Intrinsic::spv_unreachable:
2892 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable))
2893 .constrainAllUses(TII, TRI, RBI);
2894 case Intrinsic::spv_alloca:
2895 return selectFrameIndex(ResVReg, ResType, I);
2896 case Intrinsic::spv_alloca_array:
2897 return selectAllocaArray(ResVReg, ResType, I);
2898 case Intrinsic::spv_assume:
2899 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2900 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
2901 .addUse(I.getOperand(1).getReg())
2902 .constrainAllUses(TII, TRI, RBI);
2903 break;
2904 case Intrinsic::spv_expect:
2905 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2906 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
2907 .addDef(ResVReg)
2908 .addUse(GR.getSPIRVTypeID(ResType))
2909 .addUse(I.getOperand(2).getReg())
2910 .addUse(I.getOperand(3).getReg())
2911 .constrainAllUses(TII, TRI, RBI);
2912 break;
2913 case Intrinsic::arithmetic_fence:
2914 if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
2915 return BuildMI(BB, I, I.getDebugLoc(),
2916 TII.get(SPIRV::OpArithmeticFenceEXT))
2917 .addDef(ResVReg)
2918 .addUse(GR.getSPIRVTypeID(ResType))
2919 .addUse(I.getOperand(2).getReg())
2920 .constrainAllUses(TII, TRI, RBI);
2921 else
2922 return BuildCOPY(ResVReg, I.getOperand(2).getReg(), I);
2923 break;
2924 case Intrinsic::spv_thread_id:
2925 // The HLSL SV_DispatchThreadID semantic is lowered to llvm.spv.thread.id
2926 // intrinsic in LLVM IR for SPIR-V backend.
2927 //
2928 // In SPIR-V backend, llvm.spv.thread.id is now correctly translated to a
2929 // `GlobalInvocationId` builtin variable
2930 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
2931 ResType, I);
2932 case Intrinsic::spv_thread_id_in_group:
2933 // The HLSL SV_GroupThreadId semantic is lowered to
2934 // llvm.spv.thread.id.in.group intrinsic in LLVM IR for SPIR-V backend.
2935 //
2936 // In SPIR-V backend, llvm.spv.thread.id.in.group is now correctly
2937 // translated to a `LocalInvocationId` builtin variable
2938 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
2939 ResType, I);
2940 case Intrinsic::spv_group_id:
2941 // The HLSL SV_GroupId semantic is lowered to
2942 // llvm.spv.group.id intrinsic in LLVM IR for SPIR-V backend.
2943 //
2944 // In SPIR-V backend, llvm.spv.group.id is now translated to a `WorkgroupId`
2945 // builtin variable
2946 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
2947 I);
2948 case Intrinsic::spv_fdot:
2949 return selectFloatDot(ResVReg, ResType, I);
2950 case Intrinsic::spv_udot:
2951 case Intrinsic::spv_sdot:
2952 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2953 STI.isAtLeastSPIRVVer(VersionTuple(1, 6)))
2954 return selectIntegerDot(ResVReg, ResType, I,
2955 /*Signed=*/IID == Intrinsic::spv_sdot);
2956 return selectIntegerDotExpansion(ResVReg, ResType, I);
2957 case Intrinsic::spv_dot4add_i8packed:
2958 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2959 STI.isAtLeastSPIRVVer(VersionTuple(1, 6)))
2960 return selectDot4AddPacked<true>(ResVReg, ResType, I);
2961 return selectDot4AddPackedExpansion<true>(ResVReg, ResType, I);
2962 case Intrinsic::spv_dot4add_u8packed:
2963 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2964 STI.isAtLeastSPIRVVer(VersionTuple(1, 6)))
2965 return selectDot4AddPacked<false>(ResVReg, ResType, I);
2966 return selectDot4AddPackedExpansion<false>(ResVReg, ResType, I);
2967 case Intrinsic::spv_all:
2968 return selectAll(ResVReg, ResType, I);
2969 case Intrinsic::spv_any:
2970 return selectAny(ResVReg, ResType, I);
2971 case Intrinsic::spv_cross:
2972 return selectExtInst(ResVReg, ResType, I, CL::cross, GL::Cross);
2973 case Intrinsic::spv_distance:
2974 return selectExtInst(ResVReg, ResType, I, CL::distance, GL::Distance);
2975 case Intrinsic::spv_lerp:
2976 return selectExtInst(ResVReg, ResType, I, CL::mix, GL::FMix);
2977 case Intrinsic::spv_length:
2978 return selectExtInst(ResVReg, ResType, I, CL::length, GL::Length);
2979 case Intrinsic::spv_degrees:
2980 return selectExtInst(ResVReg, ResType, I, CL::degrees, GL::Degrees);
2981 case Intrinsic::spv_frac:
2982 return selectExtInst(ResVReg, ResType, I, CL::fract, GL::Fract);
2983 case Intrinsic::spv_normalize:
2984 return selectExtInst(ResVReg, ResType, I, CL::normalize, GL::Normalize);
2985 case Intrinsic::spv_rsqrt:
2986 return selectExtInst(ResVReg, ResType, I, CL::rsqrt, GL::InverseSqrt);
2987 case Intrinsic::spv_sign:
2988 return selectSign(ResVReg, ResType, I);
2989 case Intrinsic::spv_firstbituhigh: // There is no CL equivalent of FindUMsb
2990 return selectFirstBitHigh(ResVReg, ResType, I, /*IsSigned=*/false);
2991 case Intrinsic::spv_firstbitshigh: // There is no CL equivalent of FindSMsb
2992 return selectFirstBitHigh(ResVReg, ResType, I, /*IsSigned=*/true);
2993 case Intrinsic::spv_firstbitlow: // There is no CL equivlent of FindILsb
2994 return selectFirstBitLow(ResVReg, ResType, I);
2995 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
2996 bool Result = true;
2997 auto MemSemConstant =
2998 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent, I);
2999 Register MemSemReg = MemSemConstant.first;
3000 Result &= MemSemConstant.second;
3001 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup, I);
3002 Register ScopeReg = ScopeConstant.first;
3003 Result &= ScopeConstant.second;
3004 MachineBasicBlock &BB = *I.getParent();
3005 return Result &&
3006 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpControlBarrier))
3007 .addUse(ScopeReg)
3008 .addUse(ScopeReg)
3009 .addUse(MemSemReg)
3010 .constrainAllUses(TII, TRI, RBI);
3011 }
3012 case Intrinsic::spv_lifetime_start:
3013 case Intrinsic::spv_lifetime_end: {
3014 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3015 : SPIRV::OpLifetimeStop;
3016 int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
3017 Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
3018 if (Size == -1)
3019 Size = 0;
3020 return BuildMI(BB, I, I.getDebugLoc(), TII.get(Op))
3021 .addUse(PtrReg)
3022 .addImm(Size)
3023 .constrainAllUses(TII, TRI, RBI);
3024 }
3025 case Intrinsic::spv_saturate:
3026 return selectSaturate(ResVReg, ResType, I);
3027 case Intrinsic::spv_nclamp:
3028 return selectExtInst(ResVReg, ResType, I, CL::fclamp, GL::NClamp);
3029 case Intrinsic::spv_uclamp:
3030 return selectExtInst(ResVReg, ResType, I, CL::u_clamp, GL::UClamp);
3031 case Intrinsic::spv_sclamp:
3032 return selectExtInst(ResVReg, ResType, I, CL::s_clamp, GL::SClamp);
3033 case Intrinsic::spv_wave_active_countbits:
3034 return selectWaveActiveCountBits(ResVReg, ResType, I);
3035 case Intrinsic::spv_wave_all:
3036 return selectWaveOpInst(ResVReg, ResType, I, SPIRV::OpGroupNonUniformAll);
3037 case Intrinsic::spv_wave_any:
3038 return selectWaveOpInst(ResVReg, ResType, I, SPIRV::OpGroupNonUniformAny);
3039 case Intrinsic::spv_wave_is_first_lane:
3040 return selectWaveOpInst(ResVReg, ResType, I, SPIRV::OpGroupNonUniformElect);
3041 case Intrinsic::spv_wave_reduce_sum:
3042 return selectWaveReduceSum(ResVReg, ResType, I);
3043 case Intrinsic::spv_wave_readlane:
3044 return selectWaveOpInst(ResVReg, ResType, I,
3045 SPIRV::OpGroupNonUniformShuffle);
3046 case Intrinsic::spv_step:
3047 return selectExtInst(ResVReg, ResType, I, CL::step, GL::Step);
3048 case Intrinsic::spv_radians:
3049 return selectExtInst(ResVReg, ResType, I, CL::radians, GL::Radians);
3050 // Discard intrinsics which we do not expect to actually represent code after
3051 // lowering or intrinsics which are not implemented but should not crash when
3052 // found in a customer's LLVM IR input.
3053 case Intrinsic::instrprof_increment:
3054 case Intrinsic::instrprof_increment_step:
3055 case Intrinsic::instrprof_value_profile:
3056 break;
3057 // Discard internal intrinsics.
3058 case Intrinsic::spv_value_md:
3059 break;
3060 case Intrinsic::spv_resource_handlefrombinding: {
3061 return selectHandleFromBinding(ResVReg, ResType, I);
3062 }
3063 case Intrinsic::spv_resource_store_typedbuffer: {
3064 return selectImageWriteIntrinsic(I);
3065 }
3066 case Intrinsic::spv_resource_load_typedbuffer: {
3067 return selectReadImageIntrinsic(ResVReg, ResType, I);
3068 }
3069 case Intrinsic::spv_discard: {
3070 return selectDiscard(ResVReg, ResType, I);
3071 }
3072 default: {
3073 std::string DiagMsg;
3074 raw_string_ostream OS(DiagMsg);
3075 I.print(OS);
3076 DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
3077 report_fatal_error(DiagMsg.c_str(), false);
3078 }
3079 }
3080 return true;
3081}
3082
3083bool SPIRVInstructionSelector::selectHandleFromBinding(Register &ResVReg,
3084 const SPIRVType *ResType,
3085 MachineInstr &I) const {
3086
3087 uint32_t Set = foldImm(I.getOperand(2), MRI);
3088 uint32_t Binding = foldImm(I.getOperand(3), MRI);
3089 uint32_t ArraySize = foldImm(I.getOperand(4), MRI);
3090 Register IndexReg = I.getOperand(5).getReg();
3091 bool IsNonUniform = ArraySize > 1 && foldImm(I.getOperand(6), MRI);
3092
3093 MachineIRBuilder MIRBuilder(I);
3094 Register VarReg = buildPointerToResource(ResType, Set, Binding, ArraySize,
3095 IndexReg, IsNonUniform, MIRBuilder);
3096
3097 if (IsNonUniform)
3098 buildOpDecorate(ResVReg, I, TII, SPIRV::Decoration::NonUniformEXT, {});
3099
3100 // TODO: For now we assume the resource is an image, which needs to be
3101 // loaded to get the handle. That will not be true for storage buffers.
3102 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
3103 .addDef(ResVReg)
3104 .addUse(GR.getSPIRVTypeID(ResType))
3105 .addUse(VarReg)
3106 .constrainAllUses(TII, TRI, RBI);
3107}
3108
3109bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3110 Register &ResVReg, const SPIRVType *ResType, MachineInstr &I) const {
3111
3112 // If the load of the image is in a different basic block, then
3113 // this will generate invalid code. A proper solution is to move
3114 // the OpLoad from selectHandleFromBinding here. However, to do
3115 // that we will need to change the return type of the intrinsic.
3116 // We will do that when we can, but for now trying to move forward with other
3117 // issues.
3118 Register ImageReg = I.getOperand(2).getReg();
3119 assert(MRI->getVRegDef(ImageReg)->getParent() == I.getParent() &&
3120 "The image must be loaded in the same basic block as its use.");
3121
3122 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3123 if (ResultSize == 4) {
3124 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
3125 TII.get(SPIRV::OpImageRead))
3126 .addDef(ResVReg)
3127 .addUse(GR.getSPIRVTypeID(ResType))
3128 .addUse(ImageReg)
3129 .addUse(I.getOperand(3).getReg())
3130 .constrainAllUses(TII, TRI, RBI);
3131 }
3132
3133 SPIRVType *ReadType = widenTypeToVec4(ResType, I);
3134 Register ReadReg = MRI->createVirtualRegister(GR.getRegClass(ReadType));
3135 bool Succeed =
3136 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpImageRead))
3137 .addDef(ReadReg)
3138 .addUse(GR.getSPIRVTypeID(ReadType))
3139 .addUse(ImageReg)
3140 .addUse(I.getOperand(3).getReg())
3141 .constrainAllUses(TII, TRI, RBI);
3142 if (!Succeed)
3143 return false;
3144
3145 if (ResultSize == 1) {
3146 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
3147 TII.get(SPIRV::OpCompositeExtract))
3148 .addDef(ResVReg)
3149 .addUse(GR.getSPIRVTypeID(ResType))
3150 .addUse(ReadReg)
3151 .addImm(0)
3152 .constrainAllUses(TII, TRI, RBI);
3153 }
3154 return extractSubvector(ResVReg, ResType, ReadReg, I);
3155}
3156
3157bool SPIRVInstructionSelector::extractSubvector(
3158 Register &ResVReg, const SPIRVType *ResType, Register &ReadReg,
3160 SPIRVType *InputType = GR.getResultType(ReadReg);
3161 [[maybe_unused]] uint64_t InputSize =
3162 GR.getScalarOrVectorComponentCount(InputType);
3163 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3164 assert(InputSize > 1 && "The input must be a vector.");
3165 assert(ResultSize > 1 && "The result must be a vector.");
3166 assert(ResultSize < InputSize &&
3167 "Cannot extract more element than there are in the input.");
3168 SmallVector<Register> ComponentRegisters;
3169 SPIRVType *ScalarType = GR.getScalarOrVectorComponentType(ResType);
3170 const TargetRegisterClass *ScalarRegClass = GR.getRegClass(ScalarType);
3171 for (uint64_t I = 0; I < ResultSize; I++) {
3172 Register ComponentReg = MRI->createVirtualRegister(ScalarRegClass);
3173 bool Succeed = BuildMI(*InsertionPoint.getParent(), InsertionPoint,
3174 InsertionPoint.getDebugLoc(),
3175 TII.get(SPIRV::OpCompositeExtract))
3176 .addDef(ComponentReg)
3177 .addUse(ScalarType->getOperand(0).getReg())
3178 .addUse(ReadReg)
3179 .addImm(I)
3180 .constrainAllUses(TII, TRI, RBI);
3181 if (!Succeed)
3182 return false;
3183 ComponentRegisters.emplace_back(ComponentReg);
3184 }
3185
3187 InsertionPoint.getDebugLoc(),
3188 TII.get(SPIRV::OpCompositeConstruct))
3189 .addDef(ResVReg)
3190 .addUse(GR.getSPIRVTypeID(ResType));
3191
3192 for (Register ComponentReg : ComponentRegisters)
3193 MIB.addUse(ComponentReg);
3194 return MIB.constrainAllUses(TII, TRI, RBI);
3195}
3196
3197bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3198 MachineInstr &I) const {
3199 // If the load of the image is in a different basic block, then
3200 // this will generate invalid code. A proper solution is to move
3201 // the OpLoad from selectHandleFromBinding here. However, to do
3202 // that we will need to change the return type of the intrinsic.
3203 // We will do that when we can, but for now trying to move forward with other
3204 // issues.
3205 Register ImageReg = I.getOperand(1).getReg();
3206 assert(MRI->getVRegDef(ImageReg)->getParent() == I.getParent() &&
3207 "The image must be loaded in the same basic block as its use.");
3208 Register CoordinateReg = I.getOperand(2).getReg();
3209 Register DataReg = I.getOperand(3).getReg();
3210 assert(GR.getResultType(DataReg)->getOpcode() == SPIRV::OpTypeVector);
3211 assert(GR.getScalarOrVectorComponentCount(GR.getResultType(DataReg)) == 4);
3212 return BuildMI(*I.getParent(), I, I.getDebugLoc(),
3213 TII.get(SPIRV::OpImageWrite))
3214 .addUse(ImageReg)
3215 .addUse(CoordinateReg)
3216 .addUse(DataReg)
3217 .constrainAllUses(TII, TRI, RBI);
3218}
3219
3220Register SPIRVInstructionSelector::buildPointerToResource(
3221 const SPIRVType *ResType, uint32_t Set, uint32_t Binding,
3222 uint32_t ArraySize, Register IndexReg, bool IsNonUniform,
3223 MachineIRBuilder MIRBuilder) const {
3224 if (ArraySize == 1)
3225 return GR.getOrCreateGlobalVariableWithBinding(ResType, Set, Binding,
3226 MIRBuilder);
3227
3228 const SPIRVType *VarType = GR.getOrCreateSPIRVArrayType(
3229 ResType, ArraySize, *MIRBuilder.getInsertPt(), TII);
3230 Register VarReg = GR.getOrCreateGlobalVariableWithBinding(
3231 VarType, Set, Binding, MIRBuilder);
3232
3233 SPIRVType *ResPointerType = GR.getOrCreateSPIRVPointerType(
3234 ResType, MIRBuilder, SPIRV::StorageClass::UniformConstant);
3235
3236 Register AcReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3237 if (IsNonUniform) {
3238 // It is unclear which value needs to be marked an non-uniform, so both
3239 // the index and the access changed are decorated as non-uniform.
3240 buildOpDecorate(IndexReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3241 buildOpDecorate(AcReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3242 }
3243
3244 MIRBuilder.buildInstr(SPIRV::OpAccessChain)
3245 .addDef(AcReg)
3246 .addUse(GR.getSPIRVTypeID(ResPointerType))
3247 .addUse(VarReg)
3248 .addUse(IndexReg);
3249
3250 return AcReg;
3251}
3252
3253bool SPIRVInstructionSelector::selectFirstBitSet16(
3254 Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
3255 unsigned ExtendOpcode, unsigned BitSetOpcode) const {
3256 Register ExtReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
3257 bool Result = selectOpWithSrcs(ExtReg, ResType, I, {I.getOperand(2).getReg()},
3258 ExtendOpcode);
3259
3260 return Result &&
3261 selectFirstBitSet32(ResVReg, ResType, I, ExtReg, BitSetOpcode);
3262}
3263
3264bool SPIRVInstructionSelector::selectFirstBitSet32(
3265 Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
3266 Register SrcReg, unsigned BitSetOpcode) const {
3267 return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
3268 .addDef(ResVReg)
3269 .addUse(GR.getSPIRVTypeID(ResType))
3270 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3271 .addImm(BitSetOpcode)
3272 .addUse(SrcReg)
3273 .constrainAllUses(TII, TRI, RBI);
3274}
3275
3276bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3277 Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
3278 Register SrcReg, unsigned BitSetOpcode, bool SwapPrimarySide) const {
3279
3280 // SPIR-V allow vectors of size 2,3,4 only. Calling with a larger vectors
3281 // requires creating a param register and return register with an invalid
3282 // vector size. If that is resolved, then this function can be used for
3283 // vectors of any component size.
3284 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3285 assert(ComponentCount < 5 && "Vec 5+ will generate invalid SPIR-V ops");
3286
3287 MachineIRBuilder MIRBuilder(I);
3288 SPIRVType *BaseType = GR.retrieveScalarOrVectorIntType(ResType);
3289 SPIRVType *I64Type = GR.getOrCreateSPIRVIntegerType(64, MIRBuilder);
3290 SPIRVType *I64x2Type = GR.getOrCreateSPIRVVectorType(I64Type, 2, MIRBuilder);
3291 SPIRVType *Vec2ResType =
3292 GR.getOrCreateSPIRVVectorType(BaseType, 2, MIRBuilder);
3293
3294 std::vector<Register> PartialRegs;
3295
3296 // Loops 0, 2, 4, ... but stops one loop early when ComponentCount is odd
3297 unsigned CurrentComponent = 0;
3298 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3299 // This register holds the firstbitX result for each of the i64x2 vectors
3300 // extracted from SrcReg
3301 Register BitSetResult =
3302 MRI->createVirtualRegister(GR.getRegClass(I64x2Type));
3303
3304 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
3305 TII.get(SPIRV::OpVectorShuffle))
3306 .addDef(BitSetResult)
3307 .addUse(GR.getSPIRVTypeID(I64x2Type))
3308 .addUse(SrcReg)
3309 .addUse(SrcReg)
3310 .addImm(CurrentComponent)
3311 .addImm(CurrentComponent + 1);
3312
3313 if (!MIB.constrainAllUses(TII, TRI, RBI))
3314 return false;
3315
3316 Register SubVecBitSetReg =
3317 MRI->createVirtualRegister(GR.getRegClass(Vec2ResType));
3318
3319 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType, I, BitSetResult,
3320 BitSetOpcode, SwapPrimarySide))
3321 return false;
3322
3323 PartialRegs.push_back(SubVecBitSetReg);
3324 }
3325
3326 // On odd component counts we need to handle one more component
3327 if (CurrentComponent != ComponentCount) {
3328 bool ZeroAsNull = STI.isOpenCLEnv();
3329 Register FinalElemReg = MRI->createVirtualRegister(GR.getRegClass(I64Type));
3330 Register ConstIntLastIdx = GR.getOrCreateConstInt(
3331 ComponentCount - 1, I, BaseType, TII, ZeroAsNull);
3332
3333 if (!selectOpWithSrcs(FinalElemReg, I64Type, I, {SrcReg, ConstIntLastIdx},
3334 SPIRV::OpVectorExtractDynamic))
3335 return false;
3336
3337 Register FinalElemBitSetReg =
3338 MRI->createVirtualRegister(GR.getRegClass(BaseType));
3339
3340 if (!selectFirstBitSet64(FinalElemBitSetReg, BaseType, I, FinalElemReg,
3341 BitSetOpcode, SwapPrimarySide))
3342 return false;
3343
3344 PartialRegs.push_back(FinalElemBitSetReg);
3345 }
3346
3347 // Join all the resulting registers back into the return type in order
3348 // (ie i32x2, i32x2, i32x1 -> i32x5)
3349 return selectOpWithSrcs(ResVReg, ResType, I, PartialRegs,
3350 SPIRV::OpCompositeConstruct);
3351}
3352
3353bool SPIRVInstructionSelector::selectFirstBitSet64(
3354 Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
3355 Register SrcReg, unsigned BitSetOpcode, bool SwapPrimarySide) const {
3356 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3357 SPIRVType *BaseType = GR.retrieveScalarOrVectorIntType(ResType);
3358 bool ZeroAsNull = STI.isOpenCLEnv();
3359 Register ConstIntZero =
3360 GR.getOrCreateConstInt(0, I, BaseType, TII, ZeroAsNull);
3361 Register ConstIntOne =
3362 GR.getOrCreateConstInt(1, I, BaseType, TII, ZeroAsNull);
3363
3364 // SPIRV doesn't support vectors with more than 4 components. Since the
3365 // algoritm below converts i64 -> i32x2 and i64x4 -> i32x8 it can only
3366 // operate on vectors with 2 or less components. When largers vectors are
3367 // seen. Split them, recurse, then recombine them.
3368 if (ComponentCount > 2) {
3369 return selectFirstBitSet64Overflow(ResVReg, ResType, I, SrcReg,
3370 BitSetOpcode, SwapPrimarySide);
3371 }
3372
3373 // 1. Split int64 into 2 pieces using a bitcast
3374 MachineIRBuilder MIRBuilder(I);
3375 SPIRVType *PostCastType =
3376 GR.getOrCreateSPIRVVectorType(BaseType, 2 * ComponentCount, MIRBuilder);
3377 Register BitcastReg =
3378 MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3379
3380 if (!selectOpWithSrcs(BitcastReg, PostCastType, I, {SrcReg},
3381 SPIRV::OpBitcast))
3382 return false;
3383
3384 // 2. Find the first set bit from the primary side for all the pieces in #1
3385 Register FBSReg = MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3386 if (!selectFirstBitSet32(FBSReg, PostCastType, I, BitcastReg, BitSetOpcode))
3387 return false;
3388
3389 // 3. Split result vector into high bits and low bits
3390 Register HighReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
3391 Register LowReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
3392
3393 bool IsScalarRes = ResType->getOpcode() != SPIRV::OpTypeVector;
3394 if (IsScalarRes) {
3395 // if scalar do a vector extract
3396 if (!selectOpWithSrcs(HighReg, ResType, I, {FBSReg, ConstIntZero},
3397 SPIRV::OpVectorExtractDynamic))
3398 return false;
3399 if (!selectOpWithSrcs(LowReg, ResType, I, {FBSReg, ConstIntOne},
3400 SPIRV::OpVectorExtractDynamic))
3401 return false;
3402 } else {
3403 // if vector do a shufflevector
3404 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
3405 TII.get(SPIRV::OpVectorShuffle))
3406 .addDef(HighReg)
3407 .addUse(GR.getSPIRVTypeID(ResType))
3408 .addUse(FBSReg)
3409 // Per the spec, repeat the vector if only one vec is needed
3410 .addUse(FBSReg);
3411
3412 // high bits are stored in even indexes. Extract them from FBSReg
3413 for (unsigned J = 0; J < ComponentCount * 2; J += 2) {
3414 MIB.addImm(J);
3415 }
3416
3417 if (!MIB.constrainAllUses(TII, TRI, RBI))
3418 return false;
3419
3420 MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
3421 TII.get(SPIRV::OpVectorShuffle))
3422 .addDef(LowReg)
3423 .addUse(GR.getSPIRVTypeID(ResType))
3424 .addUse(FBSReg)
3425 // Per the spec, repeat the vector if only one vec is needed
3426 .addUse(FBSReg);
3427
3428 // low bits are stored in odd indexes. Extract them from FBSReg
3429 for (unsigned J = 1; J < ComponentCount * 2; J += 2) {
3430 MIB.addImm(J);
3431 }
3432 if (!MIB.constrainAllUses(TII, TRI, RBI))
3433 return false;
3434 }
3435
3436 // 4. Check the result. When primary bits == -1 use secondary, otherwise use
3437 // primary
3438 SPIRVType *BoolType = GR.getOrCreateSPIRVBoolType(I, TII);
3439 Register NegOneReg;
3440 Register Reg0;
3441 Register Reg32;
3442 unsigned SelectOp;
3443 unsigned AddOp;
3444
3445 if (IsScalarRes) {
3446 NegOneReg =
3447 GR.getOrCreateConstInt((unsigned)-1, I, ResType, TII, ZeroAsNull);
3448 Reg0 = GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
3449 Reg32 = GR.getOrCreateConstInt(32, I, ResType, TII, ZeroAsNull);
3450 SelectOp = SPIRV::OpSelectSISCond;
3451 AddOp = SPIRV::OpIAddS;
3452 } else {
3453 BoolType =
3454 GR.getOrCreateSPIRVVectorType(BoolType, ComponentCount, MIRBuilder);
3455 NegOneReg =
3456 GR.getOrCreateConstVector((unsigned)-1, I, ResType, TII, ZeroAsNull);
3457 Reg0 = GR.getOrCreateConstVector(0, I, ResType, TII, ZeroAsNull);
3458 Reg32 = GR.getOrCreateConstVector(32, I, ResType, TII, ZeroAsNull);
3459 SelectOp = SPIRV::OpSelectVIVCond;
3460 AddOp = SPIRV::OpIAddV;
3461 }
3462
3463 Register PrimaryReg = HighReg;
3464 Register SecondaryReg = LowReg;
3465 Register PrimaryShiftReg = Reg32;
3466 Register SecondaryShiftReg = Reg0;
3467
3468 // By default the emitted opcodes check for the set bit from the MSB side.
3469 // Setting SwapPrimarySide checks the set bit from the LSB side
3470 if (SwapPrimarySide) {
3471 PrimaryReg = LowReg;
3472 SecondaryReg = HighReg;
3473 PrimaryShiftReg = Reg0;
3474 SecondaryShiftReg = Reg32;
3475 }
3476
3477 // Check if the primary bits are == -1
3478 Register BReg = MRI->createVirtualRegister(GR.getRegClass(BoolType));
3479 if (!selectOpWithSrcs(BReg, BoolType, I, {PrimaryReg, NegOneReg},
3480 SPIRV::OpIEqual))
3481 return false;
3482
3483 // Select secondary bits if true in BReg, otherwise primary bits
3484 Register TmpReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
3485 if (!selectOpWithSrcs(TmpReg, ResType, I, {BReg, SecondaryReg, PrimaryReg},
3486 SelectOp))
3487 return false;
3488
3489 // 5. Add 32 when high bits are used, otherwise 0 for low bits
3490 Register ValReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
3491 if (!selectOpWithSrcs(ValReg, ResType, I,
3492 {BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
3493 return false;
3494
3495 return selectOpWithSrcs(ResVReg, ResType, I, {ValReg, TmpReg}, AddOp);
3496}
3497
3498bool SPIRVInstructionSelector::selectFirstBitHigh(Register ResVReg,
3499 const SPIRVType *ResType,
3500 MachineInstr &I,
3501 bool IsSigned) const {
3502 // FindUMsb and FindSMsb intrinsics only support 32 bit integers
3503 Register OpReg = I.getOperand(2).getReg();
3504 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3505 // zero or sign extend
3506 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3507 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
3508
3509 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3510 case 16:
3511 return selectFirstBitSet16(ResVReg, ResType, I, ExtendOpcode, BitSetOpcode);
3512 case 32:
3513 return selectFirstBitSet32(ResVReg, ResType, I, OpReg, BitSetOpcode);
3514 case 64:
3515 return selectFirstBitSet64(ResVReg, ResType, I, OpReg, BitSetOpcode,
3516 /*SwapPrimarySide=*/false);
3517 default:
3519 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
3520 }
3521}
3522
3523bool SPIRVInstructionSelector::selectFirstBitLow(Register ResVReg,
3524 const SPIRVType *ResType,
3525 MachineInstr &I) const {
3526 // FindILsb intrinsic only supports 32 bit integers
3527 Register OpReg = I.getOperand(2).getReg();
3528 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3529 // OpUConvert treats the operand bits as an unsigned i16 and zero extends it
3530 // to an unsigned i32. As this leaves all the least significant bits unchanged
3531 // so the first set bit from the LSB side doesn't change.
3532 unsigned ExtendOpcode = SPIRV::OpUConvert;
3533 unsigned BitSetOpcode = GL::FindILsb;
3534
3535 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3536 case 16:
3537 return selectFirstBitSet16(ResVReg, ResType, I, ExtendOpcode, BitSetOpcode);
3538 case 32:
3539 return selectFirstBitSet32(ResVReg, ResType, I, OpReg, BitSetOpcode);
3540 case 64:
3541 return selectFirstBitSet64(ResVReg, ResType, I, OpReg, BitSetOpcode,
3542 /*SwapPrimarySide=*/true);
3543 default:
3544 report_fatal_error("spv_firstbitlow only supports 16,32,64 bits.");
3545 }
3546}
3547
3548bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
3549 const SPIRVType *ResType,
3550 MachineInstr &I) const {
3551 // there was an allocation size parameter to the allocation instruction
3552 // that is not 1
3553 MachineBasicBlock &BB = *I.getParent();
3554 bool Res = BuildMI(BB, I, I.getDebugLoc(),
3555 TII.get(SPIRV::OpVariableLengthArrayINTEL))
3556 .addDef(ResVReg)
3557 .addUse(GR.getSPIRVTypeID(ResType))
3558 .addUse(I.getOperand(2).getReg())
3559 .constrainAllUses(TII, TRI, RBI);
3560 if (!STI.isVulkanEnv()) {
3561 unsigned Alignment = I.getOperand(3).getImm();
3562 buildOpDecorate(ResVReg, I, TII, SPIRV::Decoration::Alignment, {Alignment});
3563 }
3564 return Res;
3565}
3566
3567bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
3568 const SPIRVType *ResType,
3569 MachineInstr &I) const {
3570 // Change order of instructions if needed: all OpVariable instructions in a
3571 // function must be the first instructions in the first block
3572 auto It = getOpVariableMBBIt(I);
3573 bool Res = BuildMI(*It->getParent(), It, It->getDebugLoc(),
3574 TII.get(SPIRV::OpVariable))
3575 .addDef(ResVReg)
3576 .addUse(GR.getSPIRVTypeID(ResType))
3577 .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
3578 .constrainAllUses(TII, TRI, RBI);
3579 if (!STI.isVulkanEnv()) {
3580 unsigned Alignment = I.getOperand(2).getImm();
3581 buildOpDecorate(ResVReg, *It, TII, SPIRV::Decoration::Alignment,
3582 {Alignment});
3583 }
3584 return Res;
3585}
3586
3587bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
3588 // InstructionSelector walks backwards through the instructions. We can use
3589 // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
3590 // first, so can generate an OpBranchConditional here. If there is no
3591 // G_BRCOND, we just use OpBranch for a regular unconditional branch.
3592 const MachineInstr *PrevI = I.getPrevNode();
3593 MachineBasicBlock &MBB = *I.getParent();
3594 if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
3595 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
3596 .addUse(PrevI->getOperand(0).getReg())
3597 .addMBB(PrevI->getOperand(1).getMBB())
3598 .addMBB(I.getOperand(0).getMBB())
3599 .constrainAllUses(TII, TRI, RBI);
3600 }
3601 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
3602 .addMBB(I.getOperand(0).getMBB())
3603 .constrainAllUses(TII, TRI, RBI);
3604}
3605
3606bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
3607 // InstructionSelector walks backwards through the instructions. For an
3608 // explicit conditional branch with no fallthrough, we use both a G_BR and a
3609 // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
3610 // generate the OpBranchConditional in selectBranch above.
3611 //
3612 // If an OpBranchConditional has been generated, we simply return, as the work
3613 // is alread done. If there is no OpBranchConditional, LLVM must be relying on
3614 // implicit fallthrough to the next basic block, so we need to create an
3615 // OpBranchConditional with an explicit "false" argument pointing to the next
3616 // basic block that LLVM would fall through to.
3617 const MachineInstr *NextI = I.getNextNode();
3618 // Check if this has already been successfully selected.
3619 if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
3620 return true;
3621 // Must be relying on implicit block fallthrough, so generate an
3622 // OpBranchConditional with the "next" basic block as the "false" target.
3623 MachineBasicBlock &MBB = *I.getParent();
3624 unsigned NextMBBNum = MBB.getNextNode()->getNumber();
3625 MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
3626 return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
3627 .addUse(I.getOperand(0).getReg())
3628 .addMBB(I.getOperand(1).getMBB())
3629 .addMBB(NextMBB)
3630 .constrainAllUses(TII, TRI, RBI);
3631}
3632
3633bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
3634 const SPIRVType *ResType,
3635 MachineInstr &I) const {
3636 auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
3637 .addDef(ResVReg)
3638 .addUse(GR.getSPIRVTypeID(ResType));
3639 const unsigned NumOps = I.getNumOperands();
3640 for (unsigned i = 1; i < NumOps; i += 2) {
3641 MIB.addUse(I.getOperand(i + 0).getReg());
3642 MIB.addMBB(I.getOperand(i + 1).getMBB());
3643 }
3644 bool Res = MIB.constrainAllUses(TII, TRI, RBI);
3645 MIB->setDesc(TII.get(TargetOpcode::PHI));
3646 MIB->removeOperand(1);
3647 return Res;
3648}
3649
3650bool SPIRVInstructionSelector::selectGlobalValue(
3651 Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
3652 // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
3653 MachineIRBuilder MIRBuilder(I);
3654 const GlobalValue *GV = I.getOperand(1).getGlobal();
3655 Type *GVType = toTypedPointer(GR.getDeducedGlobalValueType(GV));
3656 SPIRVType *PointerBaseType;
3657 if (GVType->isArrayTy()) {
3658 SPIRVType *ArrayElementType =
3659 GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
3660 SPIRV::AccessQualifier::ReadWrite, false);
3661 PointerBaseType = GR.getOrCreateSPIRVArrayType(
3662 ArrayElementType, GVType->getArrayNumElements(), I, TII);
3663 } else {
3664 PointerBaseType = GR.getOrCreateSPIRVType(
3665 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
3666 }
3667
3668 std::string GlobalIdent;
3669 if (!GV->hasName()) {
3670 unsigned &ID = UnnamedGlobalIDs[GV];
3671 if (ID == 0)
3672 ID = UnnamedGlobalIDs.size();
3673 GlobalIdent = "__unnamed_" + Twine(ID).str();
3674 } else {
3675 GlobalIdent = GV->getName();
3676 }
3677
3678 // Behaviour of functions as operands depends on availability of the
3679 // corresponding extension (SPV_INTEL_function_pointers):
3680 // - If there is an extension to operate with functions as operands:
3681 // We create a proper constant operand and evaluate a correct type for a
3682 // function pointer.
3683 // - Without the required extension:
3684 // We have functions as operands in tests with blocks of instruction e.g. in
3685 // transcoding/global_block.ll. These operands are not used and should be
3686 // substituted by zero constants. Their type is expected to be always
3687 // OpTypePointer Function %uchar.
3688 if (isa<Function>(GV)) {
3689 const Constant *ConstVal = GV;
3690 MachineBasicBlock &BB = *I.getParent();
3691 Register NewReg = GR.find(ConstVal, GR.CurMF);
3692 if (!NewReg.isValid()) {
3693 Register NewReg = ResVReg;
3694 GR.add(ConstVal, GR.CurMF, NewReg);
3695 const Function *GVFun =
3696 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
3697 ? dyn_cast<Function>(GV)
3698 : nullptr;
3699 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
3700 PointerBaseType, I, TII,
3701 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
3703 if (GVFun) {
3704 // References to a function via function pointers generate virtual
3705 // registers without a definition. We will resolve it later, during
3706 // module analysis stage.
3707 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
3708 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
3709 Register FuncVReg =
3710 MRI->createGenericVirtualRegister(GR.getRegType(ResType));
3711 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
3712 MachineInstrBuilder MIB1 =
3713 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
3714 .addDef(FuncVReg)
3715 .addUse(ResTypeReg);
3716 MachineInstrBuilder MIB2 =
3717 BuildMI(BB, I, I.getDebugLoc(),
3718 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
3719 .addDef(NewReg)
3720 .addUse(ResTypeReg)
3721 .addUse(FuncVReg);
3722 // mapping the function pointer to the used Function
3723 GR.recordFunctionPointer(&MIB2.getInstr()->getOperand(2), GVFun);
3724 return MIB1.constrainAllUses(TII, TRI, RBI) &&
3725 MIB2.constrainAllUses(TII, TRI, RBI);
3726 }
3727 return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
3728 .addDef(NewReg)
3729 .addUse(GR.getSPIRVTypeID(ResType))
3730 .constrainAllUses(TII, TRI, RBI);
3731 }
3732 assert(NewReg != ResVReg);
3733 return BuildCOPY(ResVReg, NewReg, I);
3734 }
3735 auto GlobalVar = cast<GlobalVariable>(GV);
3736 assert(GlobalVar->getName() != "llvm.global.annotations");
3737
3738 // Skip empty declaration for GVs with initializers till we get the decl with
3739 // passed initializer.
3740 if (hasInitializer(GlobalVar) && !Init)
3741 return true;
3742
3743 bool HasLnkTy = !GV->hasInternalLinkage() && !GV->hasPrivateLinkage();
3744 SPIRV::LinkageType::LinkageType LnkType =
3746 ? SPIRV::LinkageType::Import
3747 : (GV->hasLinkOnceODRLinkage() &&
3748 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
3749 ? SPIRV::LinkageType::LinkOnceODR
3750 : SPIRV::LinkageType::Export);
3751
3752 const unsigned AddrSpace = GV->getAddressSpace();
3753 SPIRV::StorageClass::StorageClass StorageClass =
3754 addressSpaceToStorageClass(AddrSpace, STI);
3755 SPIRVType *ResType =
3756 GR.getOrCreateSPIRVPointerType(PointerBaseType, I, TII, StorageClass);
3757 Register Reg = GR.buildGlobalVariable(
3758 ResVReg, ResType, GlobalIdent, GV, StorageClass, Init,
3759 GlobalVar->isConstant(), HasLnkTy, LnkType, MIRBuilder, true);
3760 return Reg.isValid();
3761}
3762
3763bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
3764 const SPIRVType *ResType,
3765 MachineInstr &I) const {
3766 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
3767 return selectExtInst(ResVReg, ResType, I, CL::log10);
3768 }
3769
3770 // There is no log10 instruction in the GLSL Extended Instruction set, so it
3771 // is implemented as:
3772 // log10(x) = log2(x) * (1 / log2(10))
3773 // = log2(x) * 0.30103
3774
3775 MachineIRBuilder MIRBuilder(I);
3776 MachineBasicBlock &BB = *I.getParent();
3777
3778 // Build log2(x).
3779 Register VarReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
3780 bool Result =
3781 BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
3782 .addDef(VarReg)
3783 .addUse(GR.getSPIRVTypeID(ResType))
3784 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3785 .addImm(GL::Log2)
3786 .add(I.getOperand(1))
3787 .constrainAllUses(TII, TRI, RBI);
3788
3789 // Build 0.30103.
3790 assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
3791 ResType->getOpcode() == SPIRV::OpTypeFloat);
3792 // TODO: Add matrix implementation once supported by the HLSL frontend.
3793 const SPIRVType *SpirvScalarType =
3794 ResType->getOpcode() == SPIRV::OpTypeVector
3795 ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
3796 : ResType;
3797 Register ScaleReg =
3798 GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);
3799
3800 // Multiply log2(x) by 0.30103 to get log10(x) result.
3801 auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
3802 ? SPIRV::OpVectorTimesScalar
3803 : SPIRV::OpFMulS;
3804 return Result && BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
3805 .addDef(ResVReg)
3806 .addUse(GR.getSPIRVTypeID(ResType))
3807 .addUse(VarReg)
3808 .addUse(ScaleReg)
3809 .constrainAllUses(TII, TRI, RBI);
3810}
3811
3812// Generate the instructions to load 3-element vector builtin input
3813// IDs/Indices.
3814// Like: GlobalInvocationId, LocalInvocationId, etc....
3815bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
3816 SPIRV::BuiltIn::BuiltIn BuiltInValue, Register ResVReg,
3817 const SPIRVType *ResType, MachineInstr &I) const {
3818 MachineIRBuilder MIRBuilder(I);
3819 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
3820 const SPIRVType *Vec3Ty =
3821 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
3822 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
3823 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
3824
3825 // Create new register for the input ID builtin variable.
3826 Register NewRegister =
3827 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::iIDRegClass);
3828 MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 64));
3829 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
3830
3831 // Build global variable with the necessary decorations for the input ID
3832 // builtin variable.
3833 Register Variable = GR.buildGlobalVariable(
3834 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltInValue), nullptr,
3835 SPIRV::StorageClass::Input, nullptr, true, true,
3836 SPIRV::LinkageType::Import, MIRBuilder, false);
3837
3838 // Create new register for loading value.
3839 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
3840 Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3841 MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 64));
3842 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
3843
3844 // Load v3uint value from the global variable.
3845 bool Result =
3846 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
3847 .addDef(LoadedRegister)
3848 .addUse(GR.getSPIRVTypeID(Vec3Ty))
3849 .addUse(Variable);
3850
3851 // Get the input ID index. Expecting operand is a constant immediate value,
3852 // wrapped in a type assignment.
3853 assert(I.getOperand(2).isReg());
3854 const uint32_t ThreadId = foldImm(I.getOperand(2), MRI);
3855
3856 // Extract the input ID from the loaded vector value.
3857 MachineBasicBlock &BB = *I.getParent();
3858 auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
3859 .addDef(ResVReg)
3860 .addUse(GR.getSPIRVTypeID(ResType))
3861 .addUse(LoadedRegister)
3862 .addImm(ThreadId);
3863 return Result && MIB.constrainAllUses(TII, TRI, RBI);
3864}
3865
3866SPIRVType *SPIRVInstructionSelector::widenTypeToVec4(const SPIRVType *Type,
3867 MachineInstr &I) const {
3868 MachineIRBuilder MIRBuilder(I);
3869 if (Type->getOpcode() != SPIRV::OpTypeVector)
3870 return GR.getOrCreateSPIRVVectorType(Type, 4, MIRBuilder);
3871
3872 uint64_t VectorSize = Type->getOperand(2).getImm();
3873 if (VectorSize == 4)
3874 return Type;
3875
3876 Register ScalarTypeReg = Type->getOperand(1).getReg();
3877 const SPIRVType *ScalarType = GR.getSPIRVTypeForVReg(ScalarTypeReg);
3878 return GR.getOrCreateSPIRVVectorType(ScalarType, 4, MIRBuilder);
3879}
3880
3881namespace llvm {
3884 const SPIRVSubtarget &Subtarget,
3885 const RegisterBankInfo &RBI) {
3886 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
3887}
3888} // namespace llvm
unsigned const MachineRegisterInfo * MRI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
basic Basic Alias true
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
uint64_t Size
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
raw_pwrite_stream & OS
This file contains some functions that are useful when dealing with strings.
BinaryOperator * Mul
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
Definition: APFloat.h:1090
APInt bitcastToAPInt() const
Definition: APFloat.h:1351
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:1081
Class for arbitrary precision integers.
Definition: APInt.h:78
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:234
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1520
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:673
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:676
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:702
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:703
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:679
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:688
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:677
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:678
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:697
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:696
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:700
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:687
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:681
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:684
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:698
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:685
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:680
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:682
@ ICMP_EQ
equal
Definition: InstrTypes.h:694
@ ICMP_NE
not equal
Definition: InstrTypes.h:695
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:701
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:689
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:699
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:686
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:683
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:271
const APFloat & getValueAPF() const
Definition: Constants.h:314
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:157
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:148
This is an important base class in LLVM.
Definition: Constant.h:42
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:373
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
const Function & getFunction() const
Definition: Function.h:171
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
bool hasPrivateLinkage() const
Definition: GlobalValue.h:527
bool isDeclarationForLinker() const
Definition: GlobalValue.h:618
unsigned getAddressSpace() const
Definition: GlobalValue.h:205
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
bool hasInternalLinkage() const
Definition: GlobalValue.h:526
bool hasLinkOnceODRLinkage() const
Definition: GlobalValue.h:519
@ InternalLinkage
Rename collisions when linking (static functions).
Definition: GlobalValue.h:59
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:311
constexpr bool isScalar() const
Definition: LowLevelType.h:146
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isVector() const
Definition: LowLevelType.h:148
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
constexpr bool isPointer() const
Definition: LowLevelType.h:149
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:575
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:585
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
constexpr bool isValid() const
Definition: Register.h:116
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:384
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:458
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:519
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:937
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:150
Class to represent struct types.
Definition: DerivedTypes.h:218
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Definition: Type.cpp:406
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
std::string str() const
Return the twine contents as a std::string.
Definition: Twine.cpp:17
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition: Type.h:261
Type * getArrayElementType() const
Definition: Type.h:411
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
Definition: Type.h:56
@ FloatTyID
32-bit floating point type
Definition: Type.h:58
@ DoubleTyID
64-bit floating point type
Definition: Type.h:59
bool isStructTy() const
True if this is an instance of StructType.
Definition: Type.h:258
TypeID getTypeID() const
Return the type id for the type.
Definition: Type.h:136
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:355
bool hasName() const
Definition: Value.h:261
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Represents a version number in the form major[.minor[.subminor[.build]]].
Definition: VersionTuple.h:29
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
Reg
All possible values of the reg field in the ModR/M byte.
StorageClass
Definition: XCOFF.h:170
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:103
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:83
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
Definition: Utils.cpp:1683
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition: Utils.cpp:155
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Definition: TargetOpcodes.h:30
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:326
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:245
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.h:162
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:130
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Definition: SPIRVUtils.cpp:177
Type * toTypedPointer(Type *Ty)
Definition: SPIRVUtils.h:352
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
Definition: SPIRVUtils.cpp:211
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
Definition: SPIRVUtils.cpp:281
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
bool hasInitializer(const GlobalVariable *GV)
Definition: SPIRVUtils.h:240
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
Definition: SPIRVUtils.cpp:263
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
Definition: Utils.cpp:222
#define N