LLVM 23.0.0git
SPIRVLegalizerInfo.cpp
Go to the documentation of this file.
1//===- SPIRVLegalizerInfo.cpp --- SPIR-V Legalization Rules ------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the targeting of the Machinelegalizer class for SPIR-V.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SPIRVLegalizerInfo.h"
14#include "SPIRV.h"
15#include "SPIRVGlobalRegistry.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
24#include "llvm/IR/IntrinsicsSPIRV.h"
25#include "llvm/Support/Debug.h"
27
28using namespace llvm;
29using namespace llvm::LegalizeActions;
30using namespace llvm::LegalityPredicates;
31
32#define DEBUG_TYPE "spirv-legalizer"
33
34LegalityPredicate typeOfExtendedScalars(unsigned TypeIdx, bool IsExtendedInts) {
35 return [IsExtendedInts, TypeIdx](const LegalityQuery &Query) {
36 const LLT Ty = Query.Types[TypeIdx];
37 return IsExtendedInts && Ty.isValid() && Ty.isScalar();
38 };
39}
40
42 using namespace TargetOpcode;
43
44 this->ST = &ST;
45 GR = ST.getSPIRVGlobalRegistry();
46
47 const LLT s1 = LLT::scalar(1);
48 const LLT s8 = LLT::scalar(8);
49 const LLT s16 = LLT::scalar(16);
50 const LLT s32 = LLT::scalar(32);
51 const LLT s64 = LLT::scalar(64);
52 const LLT s128 = LLT::scalar(128);
53
54 const LLT v16s64 = LLT::fixed_vector(16, 64);
55 const LLT v16s32 = LLT::fixed_vector(16, 32);
56 const LLT v16s16 = LLT::fixed_vector(16, 16);
57 const LLT v16s8 = LLT::fixed_vector(16, 8);
58 const LLT v16s1 = LLT::fixed_vector(16, 1);
59
60 const LLT v8s64 = LLT::fixed_vector(8, 64);
61 const LLT v8s32 = LLT::fixed_vector(8, 32);
62 const LLT v8s16 = LLT::fixed_vector(8, 16);
63 const LLT v8s8 = LLT::fixed_vector(8, 8);
64 const LLT v8s1 = LLT::fixed_vector(8, 1);
65
66 const LLT v4s64 = LLT::fixed_vector(4, 64);
67 const LLT v4s32 = LLT::fixed_vector(4, 32);
68 const LLT v4s16 = LLT::fixed_vector(4, 16);
69 const LLT v4s8 = LLT::fixed_vector(4, 8);
70 const LLT v4s1 = LLT::fixed_vector(4, 1);
71
72 const LLT v3s64 = LLT::fixed_vector(3, 64);
73 const LLT v3s32 = LLT::fixed_vector(3, 32);
74 const LLT v3s16 = LLT::fixed_vector(3, 16);
75 const LLT v3s8 = LLT::fixed_vector(3, 8);
76 const LLT v3s1 = LLT::fixed_vector(3, 1);
77
78 const LLT v2s64 = LLT::fixed_vector(2, 64);
79 const LLT v2s32 = LLT::fixed_vector(2, 32);
80 const LLT v2s16 = LLT::fixed_vector(2, 16);
81 const LLT v2s8 = LLT::fixed_vector(2, 8);
82 const LLT v2s1 = LLT::fixed_vector(2, 1);
83
84 const unsigned PSize = ST.getPointerSize();
85 const LLT p0 = LLT::pointer(0, PSize); // Function
86 const LLT p1 = LLT::pointer(1, PSize); // CrossWorkgroup
87 const LLT p2 = LLT::pointer(2, PSize); // UniformConstant
88 const LLT p3 = LLT::pointer(3, PSize); // Workgroup
89 const LLT p4 = LLT::pointer(4, PSize); // Generic
90 const LLT p5 =
91 LLT::pointer(5, PSize); // Input, SPV_INTEL_usm_storage_classes (Device)
92 const LLT p6 = LLT::pointer(6, PSize); // SPV_INTEL_usm_storage_classes (Host)
93 const LLT p7 = LLT::pointer(7, PSize); // Input
94 const LLT p8 = LLT::pointer(8, PSize); // Output
95 const LLT p9 =
96 LLT::pointer(9, PSize); // CodeSectionINTEL, SPV_INTEL_function_pointers
97 const LLT p10 = LLT::pointer(10, PSize); // Private
98 const LLT p11 = LLT::pointer(11, PSize); // StorageBuffer
99 const LLT p12 = LLT::pointer(12, PSize); // Uniform
100 const LLT p13 = LLT::pointer(13, PSize); // PushConstant
101
102 // TODO: remove copy-pasting here by using concatenation in some way.
103 auto allPtrsScalarsAndVectors = {
104 p0, p1, p2, p3, p4, p5, p6, p7, p8,
105 p9, p10, p11, p12, p13, s1, s8, s16, s32,
106 s64, v2s1, v2s8, v2s16, v2s32, v2s64, v3s1, v3s8, v3s16,
107 v3s32, v3s64, v4s1, v4s8, v4s16, v4s32, v4s64, v8s1, v8s8,
108 v8s16, v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64};
109
110 auto allVectors = {v2s1, v2s8, v2s16, v2s32, v2s64, v3s1, v3s8,
111 v3s16, v3s32, v3s64, v4s1, v4s8, v4s16, v4s32,
112 v4s64, v8s1, v8s8, v8s16, v8s32, v8s64, v16s1,
113 v16s8, v16s16, v16s32, v16s64};
114
115 auto allShaderVectors = {v2s1, v2s8, v2s16, v2s32, v2s64,
116 v3s1, v3s8, v3s16, v3s32, v3s64,
117 v4s1, v4s8, v4s16, v4s32, v4s64};
118
119 auto allScalars = {s1, s8, s16, s32, s64};
120
121 auto allScalarsAndVectors = {
122 s1, s8, s16, s32, s64, s128, v2s1, v2s8,
123 v2s16, v2s32, v2s64, v3s1, v3s8, v3s16, v3s32, v3s64,
124 v4s1, v4s8, v4s16, v4s32, v4s64, v8s1, v8s8, v8s16,
125 v8s32, v8s64, v16s1, v16s8, v16s16, v16s32, v16s64};
126
127 auto allIntScalarsAndVectors = {
128 s8, s16, s32, s64, s128, v2s8, v2s16, v2s32, v2s64,
129 v3s8, v3s16, v3s32, v3s64, v4s8, v4s16, v4s32, v4s64, v8s8,
130 v8s16, v8s32, v8s64, v16s8, v16s16, v16s32, v16s64};
131
132 auto allBoolScalarsAndVectors = {s1, v2s1, v3s1, v4s1, v8s1, v16s1};
133
134 auto allIntScalars = {s8, s16, s32, s64, s128};
135
136 auto allFloatScalarsAndF16Vector2AndVector4s = {s16, s32, s64, v2s16, v4s16};
137
138 auto allFloatScalarsAndVectors = {
139 s16, s32, s64, v2s16, v2s32, v2s64, v3s16, v3s32, v3s64,
140 v4s16, v4s32, v4s64, v8s16, v8s32, v8s64, v16s16, v16s32, v16s64};
141
142 auto allFloatAndIntScalarsAndPtrs = {s8, s16, s32, s64, p0, p1,
143 p2, p3, p4, p5, p6, p7,
144 p8, p9, p10, p11, p12, p13};
145
146 auto allPtrs = {p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13};
147
148 auto &allowedVectorTypes = ST.isShader() ? allShaderVectors : allVectors;
149
150 bool IsExtendedInts =
151 ST.canUseExtension(
152 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers) ||
153 ST.canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions) ||
154 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_int4);
155 auto extendedScalarsAndVectors =
156 [IsExtendedInts](const LegalityQuery &Query) {
157 const LLT Ty = Query.Types[0];
158 return IsExtendedInts && Ty.isValid() && !Ty.isPointerOrPointerVector();
159 };
160 auto extendedScalarsAndVectorsProduct = [IsExtendedInts](
161 const LegalityQuery &Query) {
162 const LLT Ty1 = Query.Types[0], Ty2 = Query.Types[1];
163 return IsExtendedInts && Ty1.isValid() && Ty2.isValid() &&
164 !Ty1.isPointerOrPointerVector() && !Ty2.isPointerOrPointerVector();
165 };
166 auto extendedPtrsScalarsAndVectors =
167 [IsExtendedInts](const LegalityQuery &Query) {
168 const LLT Ty = Query.Types[0];
169 return IsExtendedInts && Ty.isValid();
170 };
171
172 // The universal validation rules in the SPIR-V specification state that
173 // vector sizes are typically limited to 2, 3, or 4. However, larger vector
174 // sizes (8 and 16) are enabled when the Kernel capability is present. For
175 // shader execution models, vector sizes are strictly limited to 4. In
176 // non-shader contexts, vector sizes of 8 and 16 are also permitted, but
177 // arbitrary sizes (e.g., 6 or 11) are not.
178 uint32_t MaxVectorSize = ST.isShader() ? 4 : 16;
179 LLVM_DEBUG(dbgs() << "MaxVectorSize: " << MaxVectorSize << "\n");
180
181 for (auto Opc : getTypeFoldingSupportedOpcodes()) {
182 switch (Opc) {
183 case G_EXTRACT_VECTOR_ELT:
184 case G_UREM:
185 case G_SREM:
186 case G_UDIV:
187 case G_SDIV:
188 case G_FREM:
189 break;
190 default:
192 .customFor(allScalars)
193 .customFor(allowedVectorTypes)
197 0, ElementCount::getFixed(MaxVectorSize)))
198 .custom();
199 break;
200 }
201 }
202
203 getActionDefinitionsBuilder({G_UREM, G_SREM, G_SDIV, G_UDIV, G_FREM})
204 .customFor(allScalars)
205 .customFor(allowedVectorTypes)
209 0, ElementCount::getFixed(MaxVectorSize)))
210 .custom();
211
212 getActionDefinitionsBuilder({G_FMA, G_STRICT_FMA})
213 .legalFor(allScalars)
214 .legalFor(allowedVectorTypes)
218 0, ElementCount::getFixed(MaxVectorSize)))
219 .alwaysLegal();
220
221 getActionDefinitionsBuilder(G_INTRINSIC_W_SIDE_EFFECTS).custom();
222
223 getActionDefinitionsBuilder(G_SHUFFLE_VECTOR)
224 .legalForCartesianProduct(allowedVectorTypes, allowedVectorTypes)
226 .lowerIf(vectorElementCountIsGreaterThan(0, MaxVectorSize))
228 .lowerIf(vectorElementCountIsGreaterThan(1, MaxVectorSize));
229
230 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT)
234 1, ElementCount::getFixed(MaxVectorSize)))
235 .custom();
236
237 getActionDefinitionsBuilder(G_INSERT_VECTOR_ELT)
241 0, ElementCount::getFixed(MaxVectorSize)))
242 .custom();
243
244 // Illegal G_UNMERGE_VALUES instructions should be handled
245 // during the combine phase.
246 getActionDefinitionsBuilder(G_BUILD_VECTOR)
248
249 // When entering the legalizer, there should be no G_BITCAST instructions.
250 // They should all be calls to the `spv_bitcast` intrinsic. The call to
251 // the intrinsic will be converted to a G_BITCAST during legalization if
252 // the vectors are not legal. After using the rules to legalize a G_BITCAST,
253 // we turn it back into a call to the intrinsic with a custom rule to avoid
254 // potential machine verifier failures.
260 0, ElementCount::getFixed(MaxVectorSize)))
261 .lowerIf(vectorElementCountIsGreaterThan(1, MaxVectorSize))
262 .custom();
263
264 // If the result is still illegal, the combiner should be able to remove it.
265 getActionDefinitionsBuilder(G_CONCAT_VECTORS)
266 .legalForCartesianProduct(allowedVectorTypes, allowedVectorTypes);
267
268 getActionDefinitionsBuilder(G_SPLAT_VECTOR)
269 .legalFor(allowedVectorTypes)
273 .alwaysLegal();
274
275 // Vector Reduction Operations
277 {G_VECREDUCE_SMIN, G_VECREDUCE_SMAX, G_VECREDUCE_UMIN, G_VECREDUCE_UMAX,
278 G_VECREDUCE_ADD, G_VECREDUCE_MUL, G_VECREDUCE_FMUL, G_VECREDUCE_FMIN,
279 G_VECREDUCE_FMAX, G_VECREDUCE_FMINIMUM, G_VECREDUCE_FMAXIMUM,
280 G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
281 .legalFor(allowedVectorTypes)
282 .scalarize(1)
283 .lower();
284
285 getActionDefinitionsBuilder({G_VECREDUCE_SEQ_FADD, G_VECREDUCE_SEQ_FMUL})
286 .scalarize(2)
287 .lower();
288
289 // Illegal G_UNMERGE_VALUES instructions should be handled
290 // during the combine phase.
291 getActionDefinitionsBuilder(G_UNMERGE_VALUES)
293
294 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE})
295 .unsupportedIf(LegalityPredicates::any(typeIs(0, p9), typeIs(1, p9)))
296 .legalIf(all(typeInSet(0, allPtrs), typeInSet(1, allPtrs)));
297
299 .unsupportedIf(typeIs(0, p9))
300 .legalIf(all(typeInSet(0, allPtrs), typeInSet(1, allIntScalars)));
301
302 getActionDefinitionsBuilder(G_ADDRSPACE_CAST)
305 all(typeIsNot(0, p9), typeIs(1, p9))))
306 .legalForCartesianProduct(allPtrs, allPtrs);
307
308 // Should we be legalizing bad scalar sizes like s5 here instead
309 // of handling them in the instruction selector?
310 getActionDefinitionsBuilder({G_LOAD, G_STORE})
311 .unsupportedIf(typeIs(1, p9))
312 .legalForCartesianProduct(allowedVectorTypes, allPtrs)
313 .legalForCartesianProduct(allPtrs, allPtrs)
314 .legalIf(isScalar(0))
315 .custom();
316
317 getActionDefinitionsBuilder({G_SMIN, G_SMAX, G_UMIN, G_UMAX, G_ABS,
318 G_BITREVERSE, G_SADDSAT, G_UADDSAT, G_SSUBSAT,
319 G_USUBSAT, G_SCMP, G_UCMP})
320 .legalFor(allIntScalarsAndVectors)
321 .legalIf(extendedScalarsAndVectors);
322
323 getActionDefinitionsBuilder(G_STRICT_FLDEXP)
324 .legalForCartesianProduct(allFloatScalarsAndVectors, allIntScalars);
325
326 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
327 .legalForCartesianProduct(allIntScalarsAndVectors,
328 allFloatScalarsAndVectors);
329
330 getActionDefinitionsBuilder({G_FPTOSI_SAT, G_FPTOUI_SAT})
331 .legalForCartesianProduct(allIntScalarsAndVectors,
332 allFloatScalarsAndVectors);
333
334 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
335 .legalForCartesianProduct(allFloatScalarsAndVectors,
336 allScalarsAndVectors);
337
339 .legalForCartesianProduct(allIntScalarsAndVectors)
340 .legalIf(extendedScalarsAndVectorsProduct);
341
342 // Extensions.
343 getActionDefinitionsBuilder({G_TRUNC, G_ZEXT, G_SEXT, G_ANYEXT})
344 .legalForCartesianProduct(allScalarsAndVectors)
345 .legalIf(extendedScalarsAndVectorsProduct);
346
348 .legalFor(allPtrsScalarsAndVectors)
349 .legalIf(extendedPtrsScalarsAndVectors);
350
352 all(typeInSet(0, allPtrsScalarsAndVectors),
353 typeInSet(1, allPtrsScalarsAndVectors)));
354
355 getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_FREEZE})
356 .legalFor({s1, s128})
357 .legalFor(allFloatAndIntScalarsAndPtrs)
358 .legalFor(allowedVectorTypes)
362 0, ElementCount::getFixed(MaxVectorSize)));
363
364 getActionDefinitionsBuilder({G_STACKSAVE, G_STACKRESTORE}).alwaysLegal();
365
367 .legalForCartesianProduct(allPtrs, allIntScalars)
368 .legalIf(
369 all(typeInSet(0, allPtrs), typeOfExtendedScalars(1, IsExtendedInts)));
371 .legalForCartesianProduct(allIntScalars, allPtrs)
372 .legalIf(
373 all(typeOfExtendedScalars(0, IsExtendedInts), typeInSet(1, allPtrs)));
375 .legalForCartesianProduct(allPtrs, allIntScalars)
376 .legalIf(
377 all(typeInSet(0, allPtrs), typeOfExtendedScalars(1, IsExtendedInts)));
378
379 // ST.canDirectlyComparePointers() for pointer args is supported in
380 // legalizeCustom().
383 all(typeIs(0, p9), typeInSet(1, allPtrs), typeIsNot(1, p9)),
384 all(typeInSet(0, allPtrs), typeIsNot(0, p9), typeIs(1, p9))))
385 .customIf(all(typeInSet(0, allBoolScalarsAndVectors),
386 typeInSet(1, allPtrsScalarsAndVectors)));
387
389 all(typeInSet(0, allBoolScalarsAndVectors),
390 typeInSet(1, allFloatScalarsAndVectors)));
391
392 getActionDefinitionsBuilder({G_ATOMICRMW_OR, G_ATOMICRMW_ADD, G_ATOMICRMW_AND,
393 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN,
394 G_ATOMICRMW_SUB, G_ATOMICRMW_XOR,
395 G_ATOMICRMW_UMAX, G_ATOMICRMW_UMIN})
396 .legalForCartesianProduct(allIntScalars, allPtrs);
397
399 {G_ATOMICRMW_FADD, G_ATOMICRMW_FSUB, G_ATOMICRMW_FMIN, G_ATOMICRMW_FMAX})
400 .legalForCartesianProduct(allFloatScalarsAndF16Vector2AndVector4s,
401 allPtrs);
402
403 getActionDefinitionsBuilder(G_ATOMICRMW_XCHG)
404 .legalForCartesianProduct(allFloatAndIntScalarsAndPtrs, allPtrs);
405
406 getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS).lower();
407 // TODO: add proper legalization rules.
408 getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG).alwaysLegal();
409
411 {G_UADDO, G_SADDO, G_USUBO, G_SSUBO, G_UMULO, G_SMULO})
412 .alwaysLegal();
413
414 getActionDefinitionsBuilder({G_LROUND, G_LLROUND})
415 .legalForCartesianProduct(allFloatScalarsAndVectors,
416 allIntScalarsAndVectors);
417
418 // FP conversions.
419 getActionDefinitionsBuilder({G_FPTRUNC, G_FPEXT})
420 .legalForCartesianProduct(allFloatScalarsAndVectors);
421
422 // Pointer-handling.
423 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
424
425 getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor(allPtrs);
426
427 // Control-flow. In some cases (e.g. constants) s1 may be promoted to s32.
428 getActionDefinitionsBuilder(G_BRCOND).legalFor({s1, s32});
429
431 allFloatScalarsAndVectors, {s32, v2s32, v3s32, v4s32, v8s32, v16s32});
432
433 // TODO: Review the target OpenCL and GLSL Extended Instruction Set specs to
434 // tighten these requirements. Many of these math functions are only legal on
435 // specific bitwidths, so they are not selectable for
436 // allFloatScalarsAndVectors.
437 getActionDefinitionsBuilder({G_STRICT_FSQRT,
438 G_FPOW,
439 G_FEXP,
440 G_FMODF,
441 G_FEXP2,
442 G_FLOG,
443 G_FLOG2,
444 G_FLOG10,
445 G_FABS,
446 G_FMINNUM,
447 G_FMAXNUM,
448 G_FCEIL,
449 G_FCOS,
450 G_FSIN,
451 G_FTAN,
452 G_FACOS,
453 G_FASIN,
454 G_FATAN,
455 G_FATAN2,
456 G_FCOSH,
457 G_FSINH,
458 G_FTANH,
459 G_FSQRT,
460 G_FFLOOR,
461 G_FRINT,
462 G_FNEARBYINT,
463 G_INTRINSIC_ROUND,
464 G_INTRINSIC_TRUNC,
465 G_FMINIMUM,
466 G_FMAXIMUM,
467 G_INTRINSIC_ROUNDEVEN})
468 .legalFor(allFloatScalarsAndVectors);
469
470 getActionDefinitionsBuilder(G_FCOPYSIGN)
471 .legalForCartesianProduct(allFloatScalarsAndVectors,
472 allFloatScalarsAndVectors);
473
475 allFloatScalarsAndVectors, allIntScalarsAndVectors);
476
477 if (ST.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
479 {G_CTTZ, G_CTTZ_ZERO_UNDEF, G_CTLZ, G_CTLZ_ZERO_UNDEF})
480 .legalForCartesianProduct(allIntScalarsAndVectors,
481 allIntScalarsAndVectors);
482
483 // Struct return types become a single scalar, so cannot easily legalize.
484 getActionDefinitionsBuilder({G_SMULH, G_UMULH}).alwaysLegal();
485 }
486
487 getActionDefinitionsBuilder(G_IS_FPCLASS).custom();
488
490 verify(*ST.getInstrInfo());
491}
492
495 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
496 Register DstReg = MI.getOperand(0).getReg();
497 Register SrcReg = MI.getOperand(1).getReg();
498 Register IdxReg = MI.getOperand(2).getReg();
499
500 MIRBuilder
501 .buildIntrinsic(Intrinsic::spv_extractelt, ArrayRef<Register>{DstReg})
502 .addUse(SrcReg)
503 .addUse(IdxReg);
504 MI.eraseFromParent();
505 return true;
506}
507
510 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
511 Register DstReg = MI.getOperand(0).getReg();
512 Register SrcReg = MI.getOperand(1).getReg();
513 Register ValReg = MI.getOperand(2).getReg();
514 Register IdxReg = MI.getOperand(3).getReg();
515
516 MIRBuilder
517 .buildIntrinsic(Intrinsic::spv_insertelt, ArrayRef<Register>{DstReg})
518 .addUse(SrcReg)
519 .addUse(ValReg)
520 .addUse(IdxReg);
521 MI.eraseFromParent();
522 return true;
523}
524
526 LegalizerHelper &Helper,
529 Register ConvReg = MRI.createGenericVirtualRegister(ConvTy);
530 MRI.setRegClass(ConvReg, GR->getRegClass(SpvType));
531 GR->assignSPIRVTypeToVReg(SpvType, ConvReg, Helper.MIRBuilder.getMF());
532 Helper.MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT)
533 .addDef(ConvReg)
534 .addUse(Reg);
535 return ConvReg;
536}
537
538static bool needsVectorLegalization(const LLT &Ty, const SPIRVSubtarget &ST) {
539 if (!Ty.isVector())
540 return false;
541 unsigned NumElements = Ty.getNumElements();
542 unsigned MaxVectorSize = ST.isShader() ? 4 : 16;
543 return (NumElements > 4 && !isPowerOf2_32(NumElements)) ||
544 NumElements > MaxVectorSize;
545}
546
549 MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
550 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
551 Register DstReg = MI.getOperand(0).getReg();
552 Register PtrReg = MI.getOperand(1).getReg();
553 LLT DstTy = MRI.getType(DstReg);
554
555 if (!DstTy.isVector())
556 return true;
557
558 const SPIRVSubtarget &ST = MI.getMF()->getSubtarget<SPIRVSubtarget>();
559 if (!needsVectorLegalization(DstTy, ST))
560 return true;
561
562 SmallVector<Register, 8> SplitRegs;
563 LLT EltTy = DstTy.getElementType();
564 unsigned NumElts = DstTy.getNumElements();
565
566 LLT PtrTy = MRI.getType(PtrReg);
567 auto Zero = MIRBuilder.buildConstant(LLT::scalar(32), 0);
568
569 for (unsigned i = 0; i < NumElts; ++i) {
570 auto Idx = MIRBuilder.buildConstant(LLT::scalar(32), i);
571 Register EltPtr = MRI.createGenericVirtualRegister(PtrTy);
572
573 MIRBuilder.buildIntrinsic(Intrinsic::spv_gep, ArrayRef<Register>{EltPtr})
574 .addImm(1) // InBounds
575 .addUse(PtrReg)
576 .addUse(Zero.getReg(0))
577 .addUse(Idx.getReg(0));
578
579 MachinePointerInfo EltPtrInfo;
580 Align EltAlign = Align(1);
581 if (!MI.memoperands_empty()) {
582 MachineMemOperand *MMO = *MI.memoperands_begin();
583 EltPtrInfo =
584 MMO->getPointerInfo().getWithOffset(i * EltTy.getSizeInBytes());
585 EltAlign = commonAlignment(MMO->getAlign(), i * EltTy.getSizeInBytes());
586 }
587
588 Register EltReg = MRI.createGenericVirtualRegister(EltTy);
589 MIRBuilder.buildLoad(EltReg, EltPtr, EltPtrInfo, EltAlign);
590 SplitRegs.push_back(EltReg);
591 }
592
593 MIRBuilder.buildBuildVector(DstReg, SplitRegs);
594 MI.eraseFromParent();
595 return true;
596}
597
600 MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
601 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
602 Register ValReg = MI.getOperand(0).getReg();
603 Register PtrReg = MI.getOperand(1).getReg();
604 LLT ValTy = MRI.getType(ValReg);
605
606 assert(ValTy.isVector() && "Expected vector store");
607
608 SmallVector<Register, 8> SplitRegs;
609 LLT EltTy = ValTy.getElementType();
610 unsigned NumElts = ValTy.getNumElements();
611
612 for (unsigned i = 0; i < NumElts; ++i)
613 SplitRegs.push_back(MRI.createGenericVirtualRegister(EltTy));
614
615 MIRBuilder.buildUnmerge(SplitRegs, ValReg);
616
617 LLT PtrTy = MRI.getType(PtrReg);
618 auto Zero = MIRBuilder.buildConstant(LLT::scalar(32), 0);
619
620 for (unsigned i = 0; i < NumElts; ++i) {
621 auto Idx = MIRBuilder.buildConstant(LLT::scalar(32), i);
622 Register EltPtr = MRI.createGenericVirtualRegister(PtrTy);
623
624 MIRBuilder.buildIntrinsic(Intrinsic::spv_gep, ArrayRef<Register>{EltPtr})
625 .addImm(1) // InBounds
626 .addUse(PtrReg)
627 .addUse(Zero.getReg(0))
628 .addUse(Idx.getReg(0));
629
630 MachinePointerInfo EltPtrInfo;
631 Align EltAlign = Align(1);
632 if (!MI.memoperands_empty()) {
633 MachineMemOperand *MMO = *MI.memoperands_begin();
634 EltPtrInfo =
635 MMO->getPointerInfo().getWithOffset(i * EltTy.getSizeInBytes());
636 EltAlign = commonAlignment(MMO->getAlign(), i * EltTy.getSizeInBytes());
637 }
638
639 MIRBuilder.buildStore(SplitRegs[i], EltPtr, EltPtrInfo, EltAlign);
640 }
641
642 MI.eraseFromParent();
643 return true;
644}
645
648 LostDebugLocObserver &LocObserver) const {
649 MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
650 switch (MI.getOpcode()) {
651 default:
652 // TODO: implement legalization for other opcodes.
653 return true;
654 case TargetOpcode::G_BITCAST:
655 return legalizeBitcast(Helper, MI);
656 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
657 return legalizeExtractVectorElt(Helper, MI, GR);
658 case TargetOpcode::G_INSERT_VECTOR_ELT:
659 return legalizeInsertVectorElt(Helper, MI, GR);
660 case TargetOpcode::G_INTRINSIC:
661 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
662 return legalizeIntrinsic(Helper, MI);
663 case TargetOpcode::G_IS_FPCLASS:
664 return legalizeIsFPClass(Helper, MI, LocObserver);
665 case TargetOpcode::G_ICMP: {
666 assert(GR->getSPIRVTypeForVReg(MI.getOperand(0).getReg()));
667 auto &Op0 = MI.getOperand(2);
668 auto &Op1 = MI.getOperand(3);
669 Register Reg0 = Op0.getReg();
670 Register Reg1 = Op1.getReg();
672 static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
673 if ((!ST->canDirectlyComparePointers() ||
675 MRI.getType(Reg0).isPointer() && MRI.getType(Reg1).isPointer()) {
676 LLT ConvT = LLT::scalar(ST->getPointerSize());
677 Type *LLVMTy = IntegerType::get(MI.getMF()->getFunction().getContext(),
678 ST->getPointerSize());
679 SPIRVType *SpirvTy = GR->getOrCreateSPIRVType(
680 LLVMTy, Helper.MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
681 Op0.setReg(convertPtrToInt(Reg0, ConvT, SpirvTy, Helper, MRI, GR));
682 Op1.setReg(convertPtrToInt(Reg1, ConvT, SpirvTy, Helper, MRI, GR));
683 }
684 return true;
685 }
686 case TargetOpcode::G_LOAD:
687 return legalizeLoad(Helper, MI, GR);
688 case TargetOpcode::G_STORE:
689 return legalizeStore(Helper, MI, GR);
690 }
691}
692
695 Register SrcReg, LLT SrcTy,
696 MachinePointerInfo &PtrInfo, Align &VecAlign) {
697 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
698 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
699
700 VecAlign = Helper.getStackTemporaryAlignment(SrcTy);
701 auto StackTemp = Helper.createStackTemporary(
702 TypeSize::getFixed(SrcTy.getSizeInBytes()), VecAlign, PtrInfo);
703
704 // Set the type of StackTemp to a pointer to an array of the element type.
705 SPIRVType *SpvSrcTy = GR->getSPIRVTypeForVReg(SrcReg);
706 SPIRVType *EltSpvTy = GR->getScalarOrVectorComponentType(SpvSrcTy);
707 const Type *LLVMEltTy = GR->getTypeForSPIRVType(EltSpvTy);
708 const Type *LLVMArrTy =
709 ArrayType::get(const_cast<Type *>(LLVMEltTy), SrcTy.getNumElements());
710 SPIRVType *ArrSpvTy = GR->getOrCreateSPIRVType(
711 LLVMArrTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
712 SPIRVType *PtrToArrSpvTy = GR->getOrCreateSPIRVPointerType(
713 ArrSpvTy, MIRBuilder, SPIRV::StorageClass::Function);
714
715 Register StackReg = StackTemp.getReg(0);
716 MRI.setRegClass(StackReg, GR->getRegClass(PtrToArrSpvTy));
717 GR->assignSPIRVTypeToVReg(PtrToArrSpvTy, StackReg, MIRBuilder.getMF());
718
719 return StackTemp;
720}
721
724 LLVM_DEBUG(dbgs() << "Found a bitcast instruction\n");
725 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
726 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
727 const SPIRVSubtarget &ST = MI.getMF()->getSubtarget<SPIRVSubtarget>();
728
729 Register DstReg = MI.getOperand(0).getReg();
730 Register SrcReg = MI.getOperand(2).getReg();
731 LLT DstTy = MRI.getType(DstReg);
732 LLT SrcTy = MRI.getType(SrcReg);
733
734 // If an spv_bitcast needs to be legalized, we convert it to G_BITCAST to
735 // allow using the generic legalization rules.
736 if (needsVectorLegalization(DstTy, ST) ||
737 needsVectorLegalization(SrcTy, ST)) {
738 LLVM_DEBUG(dbgs() << "Replacing with a G_BITCAST\n");
739 MIRBuilder.buildBitcast(DstReg, SrcReg);
740 MI.eraseFromParent();
741 }
742 return true;
743}
744
747 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
748 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
749 const SPIRVSubtarget &ST = MI.getMF()->getSubtarget<SPIRVSubtarget>();
750
751 Register DstReg = MI.getOperand(0).getReg();
752 LLT DstTy = MRI.getType(DstReg);
753
754 if (needsVectorLegalization(DstTy, ST)) {
755 Register SrcReg = MI.getOperand(2).getReg();
756 Register ValReg = MI.getOperand(3).getReg();
757 LLT SrcTy = MRI.getType(SrcReg);
758 MachineOperand &IdxOperand = MI.getOperand(4);
759
760 if (getImm(IdxOperand, &MRI)) {
761 uint64_t IdxVal = foldImm(IdxOperand, &MRI);
762 if (IdxVal < SrcTy.getNumElements()) {
764 SPIRVType *ElementType =
766 LLT ElementLLTTy = GR->getRegType(ElementType);
767 for (unsigned I = 0, E = SrcTy.getNumElements(); I < E; ++I) {
768 Register Reg = MRI.createGenericVirtualRegister(ElementLLTTy);
769 MRI.setRegClass(Reg, GR->getRegClass(ElementType));
770 GR->assignSPIRVTypeToVReg(ElementType, Reg, *MI.getMF());
771 Regs.push_back(Reg);
772 }
773 MIRBuilder.buildUnmerge(Regs, SrcReg);
774 Regs[IdxVal] = ValReg;
775 MIRBuilder.buildBuildVector(DstReg, Regs);
776 MI.eraseFromParent();
777 return true;
778 }
779 }
780
781 LLT EltTy = SrcTy.getElementType();
782 Align VecAlign;
783 MachinePointerInfo PtrInfo;
784 auto StackTemp = createStackTemporaryForVector(Helper, GR, SrcReg, SrcTy,
785 PtrInfo, VecAlign);
786
787 MIRBuilder.buildStore(SrcReg, StackTemp, PtrInfo, VecAlign);
788
789 Register IdxReg = IdxOperand.getReg();
790 LLT PtrTy = MRI.getType(StackTemp.getReg(0));
791 Register EltPtr = MRI.createGenericVirtualRegister(PtrTy);
792 auto Zero = MIRBuilder.buildConstant(LLT::scalar(32), 0);
793
794 MIRBuilder.buildIntrinsic(Intrinsic::spv_gep, ArrayRef<Register>{EltPtr})
795 .addImm(1) // InBounds
796 .addUse(StackTemp.getReg(0))
797 .addUse(Zero.getReg(0))
798 .addUse(IdxReg);
799
801 Align EltAlign = Helper.getStackTemporaryAlignment(EltTy);
802 MIRBuilder.buildStore(ValReg, EltPtr, EltPtrInfo, EltAlign);
803
804 MIRBuilder.buildLoad(DstReg, StackTemp, PtrInfo, VecAlign);
805 MI.eraseFromParent();
806 return true;
807 }
808 return true;
809}
810
813 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
814 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
815 const SPIRVSubtarget &ST = MI.getMF()->getSubtarget<SPIRVSubtarget>();
816
817 Register SrcReg = MI.getOperand(2).getReg();
818 LLT SrcTy = MRI.getType(SrcReg);
819
820 if (needsVectorLegalization(SrcTy, ST)) {
821 Register DstReg = MI.getOperand(0).getReg();
822 MachineOperand &IdxOperand = MI.getOperand(3);
823
824 if (getImm(IdxOperand, &MRI)) {
825 uint64_t IdxVal = foldImm(IdxOperand, &MRI);
826 if (IdxVal < SrcTy.getNumElements()) {
827 LLT DstTy = MRI.getType(DstReg);
829 SPIRVType *DstSpvTy = GR->getSPIRVTypeForVReg(DstReg);
830 for (unsigned I = 0, E = SrcTy.getNumElements(); I < E; ++I) {
831 if (I == IdxVal) {
832 Regs.push_back(DstReg);
833 } else {
834 Register Reg = MRI.createGenericVirtualRegister(DstTy);
835 MRI.setRegClass(Reg, GR->getRegClass(DstSpvTy));
836 GR->assignSPIRVTypeToVReg(DstSpvTy, Reg, *MI.getMF());
837 Regs.push_back(Reg);
838 }
839 }
840 MIRBuilder.buildUnmerge(Regs, SrcReg);
841 MI.eraseFromParent();
842 return true;
843 }
844 }
845
846 LLT EltTy = SrcTy.getElementType();
847 Align VecAlign;
848 MachinePointerInfo PtrInfo;
849 auto StackTemp = createStackTemporaryForVector(Helper, GR, SrcReg, SrcTy,
850 PtrInfo, VecAlign);
851
852 MIRBuilder.buildStore(SrcReg, StackTemp, PtrInfo, VecAlign);
853
854 Register IdxReg = IdxOperand.getReg();
855 LLT PtrTy = MRI.getType(StackTemp.getReg(0));
856 Register EltPtr = MRI.createGenericVirtualRegister(PtrTy);
857 auto Zero = MIRBuilder.buildConstant(LLT::scalar(32), 0);
858
859 MIRBuilder.buildIntrinsic(Intrinsic::spv_gep, ArrayRef<Register>{EltPtr})
860 .addImm(1) // InBounds
861 .addUse(StackTemp.getReg(0))
862 .addUse(Zero.getReg(0))
863 .addUse(IdxReg);
864
866 Align EltAlign = Helper.getStackTemporaryAlignment(EltTy);
867 MIRBuilder.buildLoad(DstReg, EltPtr, EltPtrInfo, EltAlign);
868
869 MI.eraseFromParent();
870 return true;
871 }
872 return true;
873}
874
877 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
878 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
879 const SPIRVSubtarget &ST = MI.getMF()->getSubtarget<SPIRVSubtarget>();
880
881 Register DstReg = MI.getOperand(0).getReg();
882 LLT DstTy = MRI.getType(DstReg);
883
884 if (!needsVectorLegalization(DstTy, ST))
885 return true;
886
888 if (MI.getNumOperands() == 2) {
889 // The "null" case: no values are attached.
890 LLT EltTy = DstTy.getElementType();
891 auto Zero = MIRBuilder.buildConstant(EltTy, 0);
892 SPIRVType *SpvDstTy = GR->getSPIRVTypeForVReg(DstReg);
893 SPIRVType *SpvEltTy = GR->getScalarOrVectorComponentType(SpvDstTy);
894 GR->assignSPIRVTypeToVReg(SpvEltTy, Zero.getReg(0), MIRBuilder.getMF());
895 for (unsigned i = 0; i < DstTy.getNumElements(); ++i)
896 SrcRegs.push_back(Zero.getReg(0));
897 } else {
898 for (unsigned i = 2; i < MI.getNumOperands(); ++i) {
899 SrcRegs.push_back(MI.getOperand(i).getReg());
900 }
901 }
902 MIRBuilder.buildBuildVector(DstReg, SrcRegs);
903 MI.eraseFromParent();
904 return true;
905}
906
908 MachineInstr &MI) const {
909 LLVM_DEBUG(dbgs() << "legalizeIntrinsic: " << MI);
910 auto IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
911 switch (IntrinsicID) {
912 case Intrinsic::spv_bitcast:
913 return legalizeSpvBitcast(Helper, MI, GR);
914 case Intrinsic::spv_insertelt:
915 return legalizeSpvInsertElt(Helper, MI, GR);
916 case Intrinsic::spv_extractelt:
917 return legalizeSpvExtractElt(Helper, MI, GR);
918 case Intrinsic::spv_const_composite:
919 return legalizeSpvConstComposite(Helper, MI, GR);
920 }
921 return true;
922}
923
924bool SPIRVLegalizerInfo::legalizeBitcast(LegalizerHelper &Helper,
925 MachineInstr &MI) const {
926 // Once the G_BITCAST is using vectors that are allowed, we turn it back into
927 // an spv_bitcast to avoid verifier problems when the register types are the
928 // same for the source and the result. Note that the SPIR-V types associated
929 // with the bitcast can be different even if the register types are the same.
930 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
931 Register DstReg = MI.getOperand(0).getReg();
932 Register SrcReg = MI.getOperand(1).getReg();
933 SmallVector<Register, 1> DstRegs = {DstReg};
934 MIRBuilder.buildIntrinsic(Intrinsic::spv_bitcast, DstRegs).addUse(SrcReg);
935 MI.eraseFromParent();
936 return true;
937}
938
939// Note this code was copied from LegalizerHelper::lowerISFPCLASS and adjusted
940// to ensure that all instructions created during the lowering have SPIR-V types
941// assigned to them.
942bool SPIRVLegalizerInfo::legalizeIsFPClass(
944 LostDebugLocObserver &LocObserver) const {
945 auto [DstReg, DstTy, SrcReg, SrcTy] = MI.getFirst2RegLLTs();
946 FPClassTest Mask = static_cast<FPClassTest>(MI.getOperand(2).getImm());
947
948 auto &MIRBuilder = Helper.MIRBuilder;
949 auto &MF = MIRBuilder.getMF();
950 MachineRegisterInfo &MRI = MF.getRegInfo();
951
952 Type *LLVMDstTy =
953 IntegerType::get(MIRBuilder.getContext(), DstTy.getScalarSizeInBits());
954 if (DstTy.isVector())
955 LLVMDstTy = VectorType::get(LLVMDstTy, DstTy.getElementCount());
956 SPIRVType *SPIRVDstTy = GR->getOrCreateSPIRVType(
957 LLVMDstTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
958 /*EmitIR*/ true);
959
960 unsigned BitSize = SrcTy.getScalarSizeInBits();
961 const fltSemantics &Semantics = getFltSemanticForLLT(SrcTy.getScalarType());
962
963 LLT IntTy = LLT::scalar(BitSize);
964 Type *LLVMIntTy = IntegerType::get(MIRBuilder.getContext(), BitSize);
965 if (SrcTy.isVector()) {
966 IntTy = LLT::vector(SrcTy.getElementCount(), IntTy);
967 LLVMIntTy = VectorType::get(LLVMIntTy, SrcTy.getElementCount());
968 }
969 SPIRVType *SPIRVIntTy = GR->getOrCreateSPIRVType(
970 LLVMIntTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
971 /*EmitIR*/ true);
972
973 // Clang doesn't support capture of structured bindings:
974 LLT DstTyCopy = DstTy;
975 const auto assignSPIRVTy = [&](MachineInstrBuilder &&MI) {
976 // Assign this MI's (assumed only) destination to one of the two types we
977 // expect: either the G_IS_FPCLASS's destination type, or the integer type
978 // bitcast from the source type.
979 LLT MITy = MRI.getType(MI.getReg(0));
980 assert((MITy == IntTy || MITy == DstTyCopy) &&
981 "Unexpected LLT type while lowering G_IS_FPCLASS");
982 auto *SPVTy = MITy == IntTy ? SPIRVIntTy : SPIRVDstTy;
983 GR->assignSPIRVTypeToVReg(SPVTy, MI.getReg(0), MF);
984 return MI;
985 };
986
987 // Helper to build and assign a constant in one go
988 const auto buildSPIRVConstant = [&](LLT Ty, auto &&C) -> MachineInstrBuilder {
989 if (!Ty.isFixedVector())
990 return assignSPIRVTy(MIRBuilder.buildConstant(Ty, C));
991 auto ScalarC = MIRBuilder.buildConstant(Ty.getScalarType(), C);
992 assert((Ty == IntTy || Ty == DstTyCopy) &&
993 "Unexpected LLT type while lowering constant for G_IS_FPCLASS");
994 SPIRVType *VecEltTy = GR->getOrCreateSPIRVType(
995 (Ty == IntTy ? LLVMIntTy : LLVMDstTy)->getScalarType(), MIRBuilder,
996 SPIRV::AccessQualifier::ReadWrite,
997 /*EmitIR*/ true);
998 GR->assignSPIRVTypeToVReg(VecEltTy, ScalarC.getReg(0), MF);
999 return assignSPIRVTy(MIRBuilder.buildSplatBuildVector(Ty, ScalarC));
1000 };
1001
1002 if (Mask == fcNone) {
1003 MIRBuilder.buildCopy(DstReg, buildSPIRVConstant(DstTy, 0));
1004 MI.eraseFromParent();
1005 return true;
1006 }
1007 if (Mask == fcAllFlags) {
1008 MIRBuilder.buildCopy(DstReg, buildSPIRVConstant(DstTy, 1));
1009 MI.eraseFromParent();
1010 return true;
1011 }
1012
1013 // Note that rather than creating a COPY here (between a floating-point and
1014 // integer type of the same size) we create a SPIR-V bitcast immediately. We
1015 // can't create a G_BITCAST because the LLTs are the same, and we can't seem
1016 // to correctly lower COPYs to SPIR-V bitcasts at this moment.
1017 Register ResVReg = MRI.createGenericVirtualRegister(IntTy);
1018 MRI.setRegClass(ResVReg, GR->getRegClass(SPIRVIntTy));
1019 GR->assignSPIRVTypeToVReg(SPIRVIntTy, ResVReg, Helper.MIRBuilder.getMF());
1020 auto AsInt = MIRBuilder.buildInstr(SPIRV::OpBitcast)
1021 .addDef(ResVReg)
1022 .addUse(GR->getSPIRVTypeID(SPIRVIntTy))
1023 .addUse(SrcReg);
1024 AsInt = assignSPIRVTy(std::move(AsInt));
1025
1026 // Various masks.
1027 APInt SignBit = APInt::getSignMask(BitSize);
1028 APInt ValueMask = APInt::getSignedMaxValue(BitSize); // All bits but sign.
1029 APInt Inf = APFloat::getInf(Semantics).bitcastToAPInt(); // Exp and int bit.
1030 APInt ExpMask = Inf;
1031 APInt AllOneMantissa = APFloat::getLargest(Semantics).bitcastToAPInt() & ~Inf;
1032 APInt QNaNBitMask =
1033 APInt::getOneBitSet(BitSize, AllOneMantissa.getActiveBits() - 1);
1034 APInt InversionMask = APInt::getAllOnes(DstTy.getScalarSizeInBits());
1035
1036 auto SignBitC = buildSPIRVConstant(IntTy, SignBit);
1037 auto ValueMaskC = buildSPIRVConstant(IntTy, ValueMask);
1038 auto InfC = buildSPIRVConstant(IntTy, Inf);
1039 auto ExpMaskC = buildSPIRVConstant(IntTy, ExpMask);
1040 auto ZeroC = buildSPIRVConstant(IntTy, 0);
1041
1042 auto Abs = assignSPIRVTy(MIRBuilder.buildAnd(IntTy, AsInt, ValueMaskC));
1043 auto Sign = assignSPIRVTy(
1044 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_NE, DstTy, AsInt, Abs));
1045
1046 auto Res = buildSPIRVConstant(DstTy, 0);
1047
1048 const auto appendToRes = [&](MachineInstrBuilder &&ToAppend) {
1049 Res = assignSPIRVTy(
1050 MIRBuilder.buildOr(DstTyCopy, Res, assignSPIRVTy(std::move(ToAppend))));
1051 };
1052
1053 // Tests that involve more than one class should be processed first.
1054 if ((Mask & fcFinite) == fcFinite) {
1055 // finite(V) ==> abs(V) u< exp_mask
1056 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, Abs,
1057 ExpMaskC));
1058 Mask &= ~fcFinite;
1059 } else if ((Mask & fcFinite) == fcPosFinite) {
1060 // finite(V) && V > 0 ==> V u< exp_mask
1061 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, AsInt,
1062 ExpMaskC));
1063 Mask &= ~fcPosFinite;
1064 } else if ((Mask & fcFinite) == fcNegFinite) {
1065 // finite(V) && V < 0 ==> abs(V) u< exp_mask && signbit == 1
1066 auto Cmp = assignSPIRVTy(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT,
1067 DstTy, Abs, ExpMaskC));
1068 appendToRes(MIRBuilder.buildAnd(DstTy, Cmp, Sign));
1069 Mask &= ~fcNegFinite;
1070 }
1071
1072 if (FPClassTest PartialCheck = Mask & (fcZero | fcSubnormal)) {
1073 // fcZero | fcSubnormal => test all exponent bits are 0
1074 // TODO: Handle sign bit specific cases
1075 // TODO: Handle inverted case
1076 if (PartialCheck == (fcZero | fcSubnormal)) {
1077 auto ExpBits = assignSPIRVTy(MIRBuilder.buildAnd(IntTy, AsInt, ExpMaskC));
1078 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
1079 ExpBits, ZeroC));
1080 Mask &= ~PartialCheck;
1081 }
1082 }
1083
1084 // Check for individual classes.
1085 if (FPClassTest PartialCheck = Mask & fcZero) {
1086 if (PartialCheck == fcPosZero)
1087 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
1088 AsInt, ZeroC));
1089 else if (PartialCheck == fcZero)
1090 appendToRes(
1091 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy, Abs, ZeroC));
1092 else // fcNegZero
1093 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
1094 AsInt, SignBitC));
1095 }
1096
1097 if (FPClassTest PartialCheck = Mask & fcSubnormal) {
1098 // issubnormal(V) ==> unsigned(abs(V) - 1) u< (all mantissa bits set)
1099 // issubnormal(V) && V>0 ==> unsigned(V - 1) u< (all mantissa bits set)
1100 auto V = (PartialCheck == fcPosSubnormal) ? AsInt : Abs;
1101 auto OneC = buildSPIRVConstant(IntTy, 1);
1102 auto VMinusOne = MIRBuilder.buildSub(IntTy, V, OneC);
1103 auto SubnormalRes = assignSPIRVTy(
1104 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, VMinusOne,
1105 buildSPIRVConstant(IntTy, AllOneMantissa)));
1106 if (PartialCheck == fcNegSubnormal)
1107 SubnormalRes = MIRBuilder.buildAnd(DstTy, SubnormalRes, Sign);
1108 appendToRes(std::move(SubnormalRes));
1109 }
1110
1111 if (FPClassTest PartialCheck = Mask & fcInf) {
1112 if (PartialCheck == fcPosInf)
1113 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
1114 AsInt, InfC));
1115 else if (PartialCheck == fcInf)
1116 appendToRes(
1117 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy, Abs, InfC));
1118 else { // fcNegInf
1119 APInt NegInf = APFloat::getInf(Semantics, true).bitcastToAPInt();
1120 auto NegInfC = buildSPIRVConstant(IntTy, NegInf);
1121 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_EQ, DstTy,
1122 AsInt, NegInfC));
1123 }
1124 }
1125
1126 if (FPClassTest PartialCheck = Mask & fcNan) {
1127 auto InfWithQnanBitC =
1128 buildSPIRVConstant(IntTy, std::move(Inf) | QNaNBitMask);
1129 if (PartialCheck == fcNan) {
1130 // isnan(V) ==> abs(V) u> int(inf)
1131 appendToRes(
1132 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_UGT, DstTy, Abs, InfC));
1133 } else if (PartialCheck == fcQNan) {
1134 // isquiet(V) ==> abs(V) u>= (unsigned(Inf) | quiet_bit)
1135 appendToRes(MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_UGE, DstTy, Abs,
1136 InfWithQnanBitC));
1137 } else { // fcSNan
1138 // issignaling(V) ==> abs(V) u> unsigned(Inf) &&
1139 // abs(V) u< (unsigned(Inf) | quiet_bit)
1140 auto IsNan = assignSPIRVTy(
1141 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_UGT, DstTy, Abs, InfC));
1142 auto IsNotQnan = assignSPIRVTy(MIRBuilder.buildICmp(
1143 CmpInst::Predicate::ICMP_ULT, DstTy, Abs, InfWithQnanBitC));
1144 appendToRes(MIRBuilder.buildAnd(DstTy, IsNan, IsNotQnan));
1145 }
1146 }
1147
1148 if (FPClassTest PartialCheck = Mask & fcNormal) {
1149 // isnormal(V) ==> (0 u< exp u< max_exp) ==> (unsigned(exp-1) u<
1150 // (max_exp-1))
1151 APInt ExpLSB = ExpMask & ~(ExpMask.shl(1));
1152 auto ExpMinusOne = assignSPIRVTy(
1153 MIRBuilder.buildSub(IntTy, Abs, buildSPIRVConstant(IntTy, ExpLSB)));
1154 APInt MaxExpMinusOne = std::move(ExpMask) - ExpLSB;
1155 auto NormalRes = assignSPIRVTy(
1156 MIRBuilder.buildICmp(CmpInst::Predicate::ICMP_ULT, DstTy, ExpMinusOne,
1157 buildSPIRVConstant(IntTy, MaxExpMinusOne)));
1158 if (PartialCheck == fcNegNormal)
1159 NormalRes = MIRBuilder.buildAnd(DstTy, NormalRes, Sign);
1160 else if (PartialCheck == fcPosNormal) {
1161 auto PosSign = assignSPIRVTy(MIRBuilder.buildXor(
1162 DstTy, Sign, buildSPIRVConstant(DstTy, InversionMask)));
1163 NormalRes = MIRBuilder.buildAnd(DstTy, NormalRes, PosSign);
1164 }
1165 appendToRes(std::move(NormalRes));
1166 }
1167
1168 MIRBuilder.buildCopy(DstReg, Res);
1169 MI.eraseFromParent();
1170 return true;
1171}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static void scalarize(Instruction *I, SmallVectorImpl< Instruction * > &Worklist)
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
This file declares the MachineIRBuilder class.
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
ppc ctr loops verify
const SmallVectorImpl< MachineOperand > & Cond
static bool legalizeSpvInsertElt(LegalizerHelper &Helper, MachineInstr &MI, SPIRVGlobalRegistry *GR)
static bool needsVectorLegalization(const LLT &Ty, const SPIRVSubtarget &ST)
static Register convertPtrToInt(Register Reg, LLT ConvTy, SPIRVType *SpvType, LegalizerHelper &Helper, MachineRegisterInfo &MRI, SPIRVGlobalRegistry *GR)
static MachineInstrBuilder createStackTemporaryForVector(LegalizerHelper &Helper, SPIRVGlobalRegistry *GR, Register SrcReg, LLT SrcTy, MachinePointerInfo &PtrInfo, Align &VecAlign)
LegalityPredicate typeOfExtendedScalars(unsigned TypeIdx, bool IsExtendedInts)
static bool legalizeExtractVectorElt(LegalizerHelper &Helper, MachineInstr &MI, SPIRVGlobalRegistry *GR)
static bool legalizeStore(LegalizerHelper &Helper, MachineInstr &MI, SPIRVGlobalRegistry *GR)
static bool legalizeSpvExtractElt(LegalizerHelper &Helper, MachineInstr &MI, SPIRVGlobalRegistry *GR)
static bool legalizeSpvBitcast(LegalizerHelper &Helper, MachineInstr &MI, SPIRVGlobalRegistry *GR)
static bool legalizeSpvConstComposite(LegalizerHelper &Helper, MachineInstr &MI, SPIRVGlobalRegistry *GR)
static bool legalizeLoad(LegalizerHelper &Helper, MachineInstr &MI, SPIRVGlobalRegistry *GR)
static bool legalizeInsertVectorElt(LegalizerHelper &Helper, MachineInstr &MI, SPIRVGlobalRegistry *GR)
#define LLVM_DEBUG(...)
Definition Debug.h:114
APInt bitcastToAPInt() const
Definition APFloat.h:1416
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Definition APFloat.h:1201
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1161
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
Definition APInt.h:230
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1521
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition APInt.h:210
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition APInt.h:874
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:240
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr unsigned getAddressSpace() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr bool isPointerOrPointerVector() const
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
constexpr LLT getScalarType() const
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
LLVM_ABI void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & fewerElementsIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Remove elements to reach the type selected by the mutation if the predicate is true.
LegalizeRuleSet & moreElementsToNextPow2(unsigned TypeIdx)
Add more elements to the vector to reach the next power of two.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & scalarizeIf(LegalityPredicate Predicate, unsigned TypeIdx)
LegalizeRuleSet & lowerIf(LegalityPredicate Predicate)
The instruction is lowered if predicate is true.
LegalizeRuleSet & custom()
Unconditionally custom lower.
LegalizeRuleSet & unsupportedIf(LegalityPredicate Predicate)
LegalizeRuleSet & alwaysLegal()
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
LLVM_ABI MachineInstrBuilder createStackTemporary(TypeSize Bytes, Align Alignment, MachinePointerInfo &PtrInfo)
Create a stack temporary based on the size in bytes and the alignment.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LLVM_ABI Align getStackTemporaryAlignment(LLT Type, Align MinAlign=Align()) const
Return the alignment to use for a stack temporary object with the given type.
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_SUB Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITCAST Src.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildOr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_OR Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildXor(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_XOR Op0, Op1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
A description of a memory reference used in the backend.
const MachinePointerInfo & getPointerInfo() const
LLVM_ABI Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MachineOperand class - Representation of each machine instruction operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Definition Register.h:20
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getScalarOrVectorComponentType(Register VReg) const
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
LLT getRegType(SPIRVType *SpvType) const
SPIRVLegalizerInfo(const SPIRVSubtarget &ST)
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
SPIRVGlobalRegistry * getSPIRVGlobalRegistry() const
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI LegalityPredicate isScalar(unsigned TypeIdx)
True iff the specified type index is a scalar.
LLVM_ABI LegalityPredicate numElementsNotPow2(unsigned TypeIdx)
True iff the specified type index is a vector whose element count is not a power of 2.
LLVM_ABI LegalityPredicate vectorElementCountIsLessThanOrEqualTo(unsigned TypeIdx, unsigned Size)
True iff the specified type index is a vector with a number of elements that's less than or equal to ...
LLVM_ABI LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
LLVM_ABI LegalityPredicate vectorElementCountIsGreaterThan(unsigned TypeIdx, unsigned Size)
True iff the specified type index is a vector with a number of elements that's greater than the given...
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
LegalityPredicate typeIsNot(unsigned TypeIdx, LLT Type)
True iff the given type index is not the specified type.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LLVM_ABI LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LLVM_ABI LegalizeMutation changeElementCountTo(unsigned TypeIdx, unsigned FromTypeIdx)
Keep the same scalar or element type as TypeIdx, but take the number of elements from FromTypeIdx.
LLVM_ABI LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx)
Change the scalar size or element size to have the same scalar size as type index FromIndex.
Invariant opcodes: All instruction sets have these as their low opcodes.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI const llvm::fltSemantics & getFltSemanticForLLT(LLT Ty)
Get the appropriate floating point arithmetic semantic based on the bit size of the given scalar LLT.
std::function< bool(const LegalityQuery &)> LegalityPredicate
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition MathExtras.h:279
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
const MachineInstr SPIRVType
const std::set< unsigned > & getTypeFoldingSupportedOpcodes()
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
This class contains a discriminated union of information about pointers in memory operands,...
MachinePointerInfo getWithOffset(int64_t O) const