LLVM 22.0.0git
NVPTXISelLowering.cpp
Go to the documentation of this file.
1//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that NVPTX uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "NVPTXISelLowering.h"
16#include "NVPTX.h"
17#include "NVPTXISelDAGToDAG.h"
18#include "NVPTXSubtarget.h"
19#include "NVPTXTargetMachine.h"
21#include "NVPTXUtilities.h"
22#include "llvm/ADT/APFloat.h"
23#include "llvm/ADT/APInt.h"
24#include "llvm/ADT/STLExtras.h"
26#include "llvm/ADT/StringRef.h"
38#include "llvm/IR/Argument.h"
39#include "llvm/IR/Attributes.h"
40#include "llvm/IR/Constants.h"
41#include "llvm/IR/DataLayout.h"
44#include "llvm/IR/FPEnv.h"
45#include "llvm/IR/Function.h"
46#include "llvm/IR/GlobalValue.h"
47#include "llvm/IR/IRBuilder.h"
48#include "llvm/IR/Instruction.h"
50#include "llvm/IR/IntrinsicsNVPTX.h"
51#include "llvm/IR/Module.h"
52#include "llvm/IR/Type.h"
53#include "llvm/IR/Value.h"
65#include <algorithm>
66#include <cassert>
67#include <cmath>
68#include <cstdint>
69#include <iterator>
70#include <optional>
71#include <string>
72#include <tuple>
73#include <utility>
74#include <vector>
75
76#define DEBUG_TYPE "nvptx-lower"
77
78using namespace llvm;
79
81 "nvptx-sched4reg",
82 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
83
85 "nvptx-fma-level", cl::Hidden,
86 cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
87 " 1: do it 2: do it aggressively"),
88 cl::init(2));
89
91 "nvptx-prec-divf32", cl::Hidden,
93 "NVPTX Specific: Override the precision of the lowering for f32 fdiv"),
95 clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"),
96 clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"),
98 "Use IEEE Compliant F32 div.rnd if available (default)"),
100 "Use IEEE Compliant F32 div.rnd if available, no FTZ")),
102
104 "nvptx-prec-sqrtf32", cl::Hidden,
105 cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
106 cl::init(true));
107
108/// Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it
109/// does NOT use lg2.approx for log2, so this is disabled by default.
111 "nvptx-approx-log2f32",
112 cl::desc("NVPTX Specific: whether to use lg2.approx for log2"),
113 cl::init(false));
114
116 "nvptx-force-min-byval-param-align", cl::Hidden,
117 cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval"
118 " params of device functions."),
119 cl::init(false));
120
123 const SDNode &N) const {
124 // If nvptx-prec-div32=N is used on the command-line, always honor it
125 if (UsePrecDivF32.getNumOccurrences() > 0)
126 return UsePrecDivF32;
127
128 const SDNodeFlags Flags = N.getFlags();
129 if (Flags.hasApproximateFuncs())
131
133}
134
136 // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
137 if (UsePrecSqrtF32.getNumOccurrences() > 0)
138 return UsePrecSqrtF32;
139
140 if (N) {
141 const SDNodeFlags Flags = N->getFlags();
142 if (Flags.hasApproximateFuncs())
143 return false;
144 }
145
146 return true;
147}
148
153
154static bool IsPTXVectorType(MVT VT) {
155 switch (VT.SimpleTy) {
156 default:
157 return false;
158 case MVT::v2i1:
159 case MVT::v4i1:
160 case MVT::v2i8:
161 case MVT::v4i8:
162 case MVT::v8i8: // <2 x i8x4>
163 case MVT::v16i8: // <4 x i8x4>
164 case MVT::v2i16:
165 case MVT::v4i16:
166 case MVT::v8i16: // <4 x i16x2>
167 case MVT::v2i32:
168 case MVT::v4i32:
169 case MVT::v2i64:
170 case MVT::v2f16:
171 case MVT::v4f16:
172 case MVT::v8f16: // <4 x f16x2>
173 case MVT::v2bf16:
174 case MVT::v4bf16:
175 case MVT::v8bf16: // <4 x bf16x2>
176 case MVT::v2f32:
177 case MVT::v4f32:
178 case MVT::v2f64:
179 case MVT::v4i64:
180 case MVT::v4f64:
181 case MVT::v8i32:
182 case MVT::v8f32:
183 case MVT::v16f16: // <8 x f16x2>
184 case MVT::v16bf16: // <8 x bf16x2>
185 case MVT::v16i16: // <8 x i16x2>
186 case MVT::v32i8: // <8 x i8x4>
187 return true;
188 }
189}
190
191// When legalizing vector loads/stores, this function is called, which does two
192// things:
193// 1. Determines Whether the vector is something we want to custom lower,
194// std::nullopt is returned if we do not want to custom lower it.
195// 2. If we do want to handle it, returns two parameters:
196// - unsigned int NumElts - The number of elements in the final vector
197// - EVT EltVT - The type of the elements in the final vector
198static std::optional<std::pair<unsigned int, MVT>>
200 unsigned AddressSpace) {
201 const bool CanLowerTo256Bit = STI.has256BitVectorLoadStore(AddressSpace);
202
203 if (CanLowerTo256Bit && VectorEVT.isScalarInteger() &&
204 VectorEVT.getSizeInBits() == 256)
205 return {{4, MVT::i64}};
206
207 if (!VectorEVT.isSimple())
208 return std::nullopt;
209 const MVT VectorVT = VectorEVT.getSimpleVT();
210
211 if (!VectorVT.isVector()) {
212 if (VectorVT == MVT::i128 || VectorVT == MVT::f128)
213 return {{2, MVT::i64}};
214 return std::nullopt;
215 }
216
217 const MVT EltVT = VectorVT.getVectorElementType();
218 const unsigned NumElts = VectorVT.getVectorNumElements();
219
220 // The size of the PTX virtual register that holds a packed type.
221 unsigned PackRegSize;
222
223 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
224 // legal. We can (and should) split that into 2 stores of <2 x double> here
225 // but I'm leaving that as a TODO for now.
226 switch (VectorVT.SimpleTy) {
227 default:
228 return std::nullopt;
229
230 case MVT::v4i64:
231 case MVT::v4f64:
232 // This is a "native" vector type iff the address space is global and the
233 // target supports 256-bit loads/stores
234 if (!CanLowerTo256Bit)
235 return std::nullopt;
236 [[fallthrough]];
237 case MVT::v2i8:
238 case MVT::v2i64:
239 case MVT::v2f64:
240 // This is a "native" vector type
241 return std::pair(NumElts, EltVT);
242
243 case MVT::v16f16: // <8 x f16x2>
244 case MVT::v16bf16: // <8 x bf16x2>
245 case MVT::v16i16: // <8 x i16x2>
246 case MVT::v32i8: // <8 x i8x4>
247 // This can be upsized into a "native" vector type iff the address space is
248 // global and the target supports 256-bit loads/stores.
249 if (!CanLowerTo256Bit)
250 return std::nullopt;
251 [[fallthrough]];
252 case MVT::v2i16: // <1 x i16x2>
253 case MVT::v2f16: // <1 x f16x2>
254 case MVT::v2bf16: // <1 x bf16x2>
255 case MVT::v4i8: // <1 x i8x4>
256 case MVT::v4i16: // <2 x i16x2>
257 case MVT::v4f16: // <2 x f16x2>
258 case MVT::v4bf16: // <2 x bf16x2>
259 case MVT::v8i8: // <2 x i8x4>
260 case MVT::v8f16: // <4 x f16x2>
261 case MVT::v8bf16: // <4 x bf16x2>
262 case MVT::v8i16: // <4 x i16x2>
263 case MVT::v16i8: // <4 x i8x4>
264 PackRegSize = 32;
265 break;
266
267 case MVT::v8f32: // <4 x f32x2>
268 case MVT::v8i32: // <4 x i32x2>
269 // This is a "native" vector type iff the address space is global and the
270 // target supports 256-bit loads/stores
271 if (!CanLowerTo256Bit)
272 return std::nullopt;
273 [[fallthrough]];
274 case MVT::v2f32: // <1 x f32x2>
275 case MVT::v4f32: // <2 x f32x2>
276 case MVT::v2i32: // <1 x i32x2>
277 case MVT::v4i32: // <2 x i32x2>
278 if (!STI.hasF32x2Instructions())
279 return std::pair(NumElts, EltVT);
280 PackRegSize = 64;
281 break;
282 }
283
284 // If we reach here, then we can pack 2 or more elements into a single 32-bit
285 // or 64-bit PTX register and treat the vector as a new vector containing
286 // packed elements.
287
288 // Number of elements to pack in one word.
289 const unsigned NPerReg = PackRegSize / EltVT.getSizeInBits();
290
291 return std::pair(NumElts / NPerReg, MVT::getVectorVT(EltVT, NPerReg));
292}
293
294/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
295/// legal-ish MVTs that compose it. Unlike ComputeValueVTs, this will legalize
296/// the types as required by the calling convention (with special handling for
297/// i8s).
298/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
299/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
300/// LowerCall, and LowerReturn.
301static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
302 LLVMContext &Ctx, CallingConv::ID CallConv,
303 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
305 uint64_t StartingOffset = 0) {
306 SmallVector<EVT, 16> TempVTs;
307 SmallVector<uint64_t, 16> TempOffsets;
308 ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);
309
310 for (const auto [VT, Off] : zip(TempVTs, TempOffsets)) {
311 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT);
312 unsigned NumRegs = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT);
313
314 // Since we actually can load/store b8, we need to ensure that we'll use
315 // the original sized type for any i8s or i8 vectors.
316 if (VT.getScalarType() == MVT::i8) {
317 if (RegisterVT == MVT::i16)
318 RegisterVT = MVT::i8;
319 else if (RegisterVT == MVT::v2i16)
320 RegisterVT = MVT::v2i8;
321 else
322 assert(RegisterVT == MVT::v4i8 &&
323 "Expected v4i8, v2i16, or i16 for i8 RegisterVT");
324 }
325
326 // TODO: This is horribly incorrect for cases where the vector elements are
327 // not a multiple of bytes (ex i1) and legal or i8. However, this problem
328 // has existed for as long as NVPTX has and no one has complained, so we'll
329 // leave it for now.
330 for (unsigned I : seq(NumRegs)) {
331 ValueVTs.push_back(RegisterVT);
332 Offsets.push_back(Off + I * RegisterVT.getStoreSize());
333 }
334 }
335}
336
337// We return an EVT that can hold N VTs
338// If the VT is a vector, the resulting EVT is a flat vector with the same
339// element type as VT's element type.
340static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C) {
341 if (N == 1)
342 return VT;
343
344 return VT.isVector() ? EVT::getVectorVT(C, VT.getScalarType(),
345 VT.getVectorNumElements() * N)
346 : EVT::getVectorVT(C, VT, N);
347}
348
350 const SDLoc &dl, SelectionDAG &DAG) {
351 if (V.getValueType() == VT) {
352 assert(I == 0 && "Index must be 0 for scalar value");
353 return V;
354 }
355
356 if (!VT.isVector())
357 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, V,
358 DAG.getVectorIdxConstant(I, dl));
359
360 return DAG.getNode(
361 ISD::EXTRACT_SUBVECTOR, dl, VT, V,
363}
364
365template <typename T>
366static inline SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl,
367 SelectionDAG &DAG, T GetElement) {
368 if (N == 1)
369 return GetElement(0);
370
372 for (const unsigned I : llvm::seq(N)) {
373 SDValue Val = GetElement(I);
374 if (Val.getValueType().isVector())
375 DAG.ExtractVectorElements(Val, Values);
376 else
377 Values.push_back(Val);
378 }
379
380 EVT VT = EVT::getVectorVT(*DAG.getContext(), Values[0].getValueType(),
381 Values.size());
382 return DAG.getBuildVector(VT, dl, Values);
383}
384
385/// PromoteScalarIntegerPTX
386/// Used to make sure the arguments/returns are suitable for passing
387/// and promote them to a larger size if they're not.
388///
389/// The promoted type is placed in \p PromoteVT if the function returns true.
391 if (VT.isScalarInteger()) {
392 switch (PowerOf2Ceil(VT.getFixedSizeInBits())) {
393 default:
395 "Promotion is not suitable for scalars of size larger than 64-bits");
396 case 1:
397 return MVT::i1;
398 case 2:
399 case 4:
400 case 8:
401 return MVT::i8;
402 case 16:
403 return MVT::i16;
404 case 32:
405 return MVT::i32;
406 case 64:
407 return MVT::i64;
408 }
409 }
410 return VT;
411}
412
413// Check whether we can merge loads/stores of some of the pieces of a
414// flattened function parameter or return value into a single vector
415// load/store.
416//
417// The flattened parameter is represented as a list of EVTs and
418// offsets, and the whole structure is aligned to ParamAlignment. This
419// function determines whether we can load/store pieces of the
420// parameter starting at index Idx using a single vectorized op of
421// size AccessSize. If so, it returns the number of param pieces
422// covered by the vector op. Otherwise, it returns 1.
423template <typename T>
425 unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
426 const SmallVectorImpl<T> &Offsets, Align ParamAlignment) {
427
428 // Can't vectorize if param alignment is not sufficient.
429 if (ParamAlignment < AccessSize)
430 return 1;
431 // Can't vectorize if offset is not aligned.
432 if (Offsets[Idx] & (AccessSize - 1))
433 return 1;
434
435 EVT EltVT = ValueVTs[Idx];
436 unsigned EltSize = EltVT.getStoreSize();
437
438 // Element is too large to vectorize.
439 if (EltSize >= AccessSize)
440 return 1;
441
442 unsigned NumElts = AccessSize / EltSize;
443 // Can't vectorize if AccessBytes if not a multiple of EltSize.
444 if (AccessSize != EltSize * NumElts)
445 return 1;
446
447 // We don't have enough elements to vectorize.
448 if (Idx + NumElts > ValueVTs.size())
449 return 1;
450
451 // PTX ISA can only deal with 2- and 4-element vector ops.
452 if (NumElts != 4 && NumElts != 2)
453 return 1;
454
455 for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
456 // Types do not match.
457 if (ValueVTs[j] != EltVT)
458 return 1;
459
460 // Elements are not contiguous.
461 if (Offsets[j] - Offsets[j - 1] != EltSize)
462 return 1;
463 }
464 // OK. We can vectorize ValueVTs[i..i+NumElts)
465 return NumElts;
466}
467
468// Computes whether and how we can vectorize the loads/stores of a
469// flattened function parameter or return value.
470//
471// The flattened parameter is represented as the list of ValueVTs and
472// Offsets, and is aligned to ParamAlignment bytes. We return a vector
473// of the same size as ValueVTs indicating how each piece should be
474// loaded/stored (i.e. as a scalar, or as part of a vector
475// load/store).
476template <typename T>
479 const SmallVectorImpl<T> &Offsets, Align ParamAlignment,
480 bool IsVAArg = false) {
481 // Set vector size to match ValueVTs and mark all elements as
482 // scalars by default.
483
484 if (IsVAArg)
485 return SmallVector<unsigned>(ValueVTs.size(), 1);
486
487 SmallVector<unsigned, 16> VectorInfo;
488
489 const auto GetNumElts = [&](unsigned I) -> unsigned {
490 for (const unsigned AccessSize : {16, 8, 4, 2}) {
491 const unsigned NumElts = canMergeParamLoadStoresStartingAt(
492 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
493 assert((NumElts == 1 || NumElts == 2 || NumElts == 4) &&
494 "Unexpected vectorization size");
495 if (NumElts != 1)
496 return NumElts;
497 }
498 return 1;
499 };
500
501 // Check what we can vectorize using 128/64/32-bit accesses.
502 for (unsigned I = 0, E = ValueVTs.size(); I != E;) {
503 const unsigned NumElts = GetNumElts(I);
504 VectorInfo.push_back(NumElts);
505 I += NumElts;
506 }
507 assert(std::accumulate(VectorInfo.begin(), VectorInfo.end(), 0u) ==
508 ValueVTs.size());
509 return VectorInfo;
510}
511
512// NVPTXTargetLowering Constructor.
514 const NVPTXSubtarget &STI)
515 : TargetLowering(TM), nvTM(&TM), STI(STI), GlobalUniqueCallSite(0) {
516 // always lower memset, memcpy, and memmove intrinsics to load/store
517 // instructions, rather
518 // then generating calls to memset, mempcy or memmove.
522
525
526 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
527 // condition branches.
528 setJumpIsExpensive(true);
529
530 // Wide divides are _very_ slow. Try to reduce the width of the divide if
531 // possible.
532 addBypassSlowDiv(64, 32);
533
534 // By default, use the Source scheduling
535 if (sched4reg)
537 else
539
540 auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
541 LegalizeAction NoF16Action) {
542 bool IsOpSupported = STI.allowFP16Math();
543 switch (Op) {
544 // Several FP16 instructions are available on sm_80 only.
545 case ISD::FMINNUM:
546 case ISD::FMAXNUM:
547 case ISD::FMAXNUM_IEEE:
548 case ISD::FMINNUM_IEEE:
549 case ISD::FMAXIMUM:
550 case ISD::FMINIMUM:
551 case ISD::FMAXIMUMNUM:
552 case ISD::FMINIMUMNUM:
553 IsOpSupported &= STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
554 break;
555 case ISD::FEXP2:
556 IsOpSupported &= STI.getSmVersion() >= 75 && STI.getPTXVersion() >= 70;
557 break;
558 }
559 setOperationAction(Op, VT, IsOpSupported ? Action : NoF16Action);
560 };
561
562 auto setBF16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
563 LegalizeAction NoBF16Action) {
564 bool IsOpSupported = STI.hasNativeBF16Support(Op);
566 Op, VT, IsOpSupported ? Action : NoBF16Action);
567 };
568
569 auto setI16x2OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
570 LegalizeAction NoI16x2Action) {
571 bool IsOpSupported = false;
572 // instructions are available on sm_90 only
573 switch (Op) {
574 case ISD::ADD:
575 case ISD::SMAX:
576 case ISD::SMIN:
577 case ISD::UMIN:
578 case ISD::UMAX:
579 IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 80;
580 break;
581 }
582 setOperationAction(Op, VT, IsOpSupported ? Action : NoI16x2Action);
583 };
584
585 addRegisterClass(MVT::i1, &NVPTX::B1RegClass);
586 addRegisterClass(MVT::i16, &NVPTX::B16RegClass);
587 addRegisterClass(MVT::v2i16, &NVPTX::B32RegClass);
588 addRegisterClass(MVT::v4i8, &NVPTX::B32RegClass);
589 addRegisterClass(MVT::i32, &NVPTX::B32RegClass);
590 addRegisterClass(MVT::i64, &NVPTX::B64RegClass);
591 addRegisterClass(MVT::f32, &NVPTX::B32RegClass);
592 addRegisterClass(MVT::f64, &NVPTX::B64RegClass);
593 addRegisterClass(MVT::f16, &NVPTX::B16RegClass);
594 addRegisterClass(MVT::v2f16, &NVPTX::B32RegClass);
595 addRegisterClass(MVT::bf16, &NVPTX::B16RegClass);
596 addRegisterClass(MVT::v2bf16, &NVPTX::B32RegClass);
597
598 if (STI.hasF32x2Instructions()) {
599 addRegisterClass(MVT::v2f32, &NVPTX::B64RegClass);
600 addRegisterClass(MVT::v2i32, &NVPTX::B64RegClass);
601 }
602
603 // Conversion to/from FP16/FP16x2 is always legal.
608
609 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal);
610 if (STI.getSmVersion() >= 30 && STI.getPTXVersion() > 31)
611 setOperationAction(ISD::READSTEADYCOUNTER, MVT::i64, Legal);
612
613 setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
614 setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
615
616 // Conversion to/from BFP16/BFP16x2 is always legal.
621
622 setBF16OperationAction(ISD::SETCC, MVT::v2bf16, Legal, Expand);
623 setBF16OperationAction(ISD::SETCC, MVT::bf16, Legal, Promote);
624 if (getOperationAction(ISD::SETCC, MVT::bf16) == Promote)
625 AddPromotedToType(ISD::SETCC, MVT::bf16, MVT::f32);
626
627 // Conversion to/from i16/i16x2 is always legal.
632
637
638 // No support for these operations with v2f32/v2i32
639 setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, Expand);
640 setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v2f32, MVT::v2i32}, Expand);
641
644 MVT::v2i32, Expand);
645
646 // Need custom lowering in case the index is dynamic.
647 if (STI.hasF32x2Instructions())
648 setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32},
649 Custom);
650
651 // Custom conversions to/from v2i8.
652 setOperationAction(ISD::BITCAST, MVT::v2i8, Custom);
653
654 // Only logical ops can be done on v4i8/v2i32 directly, others must be done
655 // elementwise.
672 {MVT::v4i8, MVT::v2i32}, Expand);
673
674 // Operations not directly supported by NVPTX.
675 for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
676 MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
677 MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) {
679 setOperationAction(ISD::BR_CC, VT, Expand);
680 }
681
682 // We don't want ops like FMINIMUM or UMAX to be lowered to SETCC+VSELECT.
683 setOperationAction(ISD::VSELECT, {MVT::v2f32, MVT::v2i32}, Expand);
684
685 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
686 // For others we will expand to a SHL/SRA pair.
692 setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i16, MVT::v2i32}, Expand);
693
700
703
705 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},
706 Expand);
707
708 if (STI.hasHWROT32()) {
711 Custom);
712 }
713
715
716 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
717 setOperationAction(ISD::BRIND, MVT::Other, Expand);
718
719 // We want to legalize constant related memmove and memcopy
720 // intrinsics.
722
723 // FP extload/truncstore is not legal in PTX. We need to expand all these.
724 for (auto FloatVTs :
726 for (MVT ValVT : FloatVTs) {
727 for (MVT MemVT : FloatVTs) {
728 setLoadExtAction(ISD::EXTLOAD, ValVT, MemVT, Expand);
729 setTruncStoreAction(ValVT, MemVT, Expand);
730 }
731 }
732 }
733
734 // To improve CodeGen we'll legalize any-extend loads to zext loads. This is
735 // how they'll be lowered in ISel anyway, and by doing this a little earlier
736 // we allow for more DAG combine opportunities.
737 for (auto IntVTs :
739 for (MVT ValVT : IntVTs)
740 for (MVT MemVT : IntVTs)
741 if (isTypeLegal(ValVT))
742 setLoadExtAction(ISD::EXTLOAD, ValVT, MemVT, Custom);
743
744 // PTX does not support load / store predicate registers
745 setOperationAction({ISD::LOAD, ISD::STORE}, MVT::i1, Custom);
746 for (MVT VT : MVT::integer_valuetypes()) {
748 Promote);
749 setTruncStoreAction(VT, MVT::i1, Expand);
750 }
751
752 // Disable generations of extload/truncstore for v2i32/v2i16/v2i8. The generic
753 // expansion for these nodes when they are unaligned is incorrect if the
754 // type is a vector.
755 //
756 // TODO: Fix the generic expansion for these nodes found in
757 // TargetLowering::expandUnalignedLoad/Store.
759 MVT::v2i8, Expand);
761 {MVT::v2i8, MVT::v2i16}, Expand);
762 setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand);
763 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
764 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
765
766 // Register custom handling for illegal type loads/stores. We'll try to custom
767 // lower almost all illegal types and logic in the lowering will discard cases
768 // we can't handle.
769 setOperationAction({ISD::LOAD, ISD::STORE}, {MVT::i128, MVT::f128}, Custom);
771 if (!isTypeLegal(VT) && VT.getStoreSizeInBits() <= 256)
772 setOperationAction({ISD::STORE, ISD::LOAD}, VT, Custom);
773
774 // Custom legalization for LDU intrinsics.
775 // TODO: The logic to lower these is not very robust and we should rewrite it.
776 // Perhaps LDU should not be represented as an intrinsic at all.
779 if (IsPTXVectorType(VT))
781
785 MVT::i1, Expand);
786
787 // This is legal in NVPTX
792
793 setOperationAction(ISD::DYNAMIC_STACKALLOC, {MVT::i32, MVT::i64}, Custom);
794 setOperationAction({ISD::STACKRESTORE, ISD::STACKSAVE}, MVT::Other, Custom);
795
796 // TRAP can be lowered to PTX trap
797 setOperationAction(ISD::TRAP, MVT::Other, Legal);
798 // DEBUGTRAP can be lowered to PTX brkpt
799 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
800
801 // Support varargs.
802 setOperationAction(ISD::VASTART, MVT::Other, Custom);
803 setOperationAction(ISD::VAARG, MVT::Other, Custom);
804 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
805 setOperationAction(ISD::VAEND, MVT::Other, Expand);
806
808 {MVT::i16, MVT::i32, MVT::i64}, Legal);
809
811 Promote);
814
815 setI16x2OperationAction(ISD::ABS, MVT::v2i16, Legal, Custom);
816 setI16x2OperationAction(ISD::SMIN, MVT::v2i16, Legal, Custom);
817 setI16x2OperationAction(ISD::SMAX, MVT::v2i16, Legal, Custom);
818 setI16x2OperationAction(ISD::UMIN, MVT::v2i16, Legal, Custom);
819 setI16x2OperationAction(ISD::UMAX, MVT::v2i16, Legal, Custom);
820 setI16x2OperationAction(ISD::CTPOP, MVT::v2i16, Legal, Expand);
821 setI16x2OperationAction(ISD::CTLZ, MVT::v2i16, Legal, Expand);
822
823 setI16x2OperationAction(ISD::ADD, MVT::v2i16, Legal, Custom);
824 setI16x2OperationAction(ISD::SUB, MVT::v2i16, Legal, Custom);
825 setI16x2OperationAction(ISD::MUL, MVT::v2i16, Legal, Custom);
826 setI16x2OperationAction(ISD::SHL, MVT::v2i16, Legal, Custom);
827 setI16x2OperationAction(ISD::SREM, MVT::v2i16, Legal, Custom);
828 setI16x2OperationAction(ISD::UREM, MVT::v2i16, Legal, Custom);
829
830 // Other arithmetic and logic ops are unsupported.
834 {MVT::v2i16, MVT::v2i32}, Expand);
835
836 // v2i32 is not supported for any arithmetic operations
841 MVT::v2i32, Expand);
842
847 if (STI.getPTXVersion() >= 43) {
852 }
853
855 setOperationAction(ISD::CTTZ, {MVT::v2i16, MVT::v2i32}, Expand);
858
859 // PTX does not directly support SELP of i1, so promote to i32 first
861
862 // PTX cannot multiply two i64s in a single instruction.
865
866 // We have some custom DAG combine patterns for these nodes
869 ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM,
870 ISD::FMAXIMUM, ISD::FMINIMUM, ISD::FMAXIMUMNUM,
871 ISD::FMINIMUMNUM, ISD::MUL, ISD::SHL,
873 ISD::BUILD_VECTOR, ISD::ADDRSPACECAST, ISD::LOAD,
874 ISD::STORE, ISD::ZERO_EXTEND, ISD::SIGN_EXTEND});
875
876 // setcc for f16x2 and bf16x2 needs special handling to prevent
877 // legalizer's attempt to scalarize it due to v2i1 not being legal.
878 if (STI.allowFP16Math() || STI.hasBF16Math())
880
881 // Vector reduction operations. These may be turned into shuffle or tree
882 // reductions depending on what instructions are available for each type.
884 MVT EltVT = VT.getVectorElementType();
885 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
886 setOperationAction({ISD::VECREDUCE_FMAX, ISD::VECREDUCE_FMIN,
887 ISD::VECREDUCE_FMAXIMUM, ISD::VECREDUCE_FMINIMUM},
888 VT, Custom);
889 }
890 }
891
892 // Promote fp16 arithmetic if fp16 hardware isn't available or the
893 // user passed --nvptx-no-fp16-math. The flag is useful because,
894 // although sm_53+ GPUs have some sort of FP16 support in
895 // hardware, only sm_53 and sm_60 have full implementation. Others
896 // only have token amount of hardware and are likely to run faster
897 // by using fp32 units instead.
898 for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
899 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
900 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
901 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
902 // bf16 must be promoted to f32.
903 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
904 if (getOperationAction(Op, MVT::bf16) == Promote)
905 AddPromotedToType(Op, MVT::bf16, MVT::f32);
906 setOperationAction(Op, MVT::v2f32,
907 STI.hasF32x2Instructions() ? Legal : Expand);
908 }
909
910 // On SM80, we select add/mul/sub as fma to avoid promotion to float
911 for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB}) {
912 for (const auto &VT : {MVT::bf16, MVT::v2bf16}) {
913 if (!STI.hasNativeBF16Support(Op) && STI.hasNativeBF16Support(ISD::FMA)) {
915 }
916 }
917 }
918
919 // f16/f16x2 neg was introduced in PTX 60, SM_53.
920 const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 &&
921 STI.getPTXVersion() >= 60 &&
922 STI.allowFP16Math();
923 for (const auto &VT : {MVT::f16, MVT::v2f16})
924 setOperationAction(ISD::FNEG, VT,
925 IsFP16FP16x2NegAvailable ? Legal : Expand);
926
927 setBF16OperationAction(ISD::FNEG, MVT::bf16, Legal, Expand);
928 setBF16OperationAction(ISD::FNEG, MVT::v2bf16, Legal, Expand);
929 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
930 // (would be) Library functions.
931
932 // These map to conversion instructions for scalar FP types.
933 for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
934 ISD::FROUNDEVEN, ISD::FTRUNC}) {
935 setOperationAction(Op, MVT::f16, Legal);
936 setOperationAction(Op, MVT::f32, Legal);
937 setOperationAction(Op, MVT::f64, Legal);
938 setOperationAction(Op, MVT::v2f16, Expand);
939 setOperationAction(Op, MVT::v2bf16, Expand);
940 setOperationAction(Op, MVT::v2f32, Expand);
941 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
942 if (getOperationAction(Op, MVT::bf16) == Promote)
943 AddPromotedToType(Op, MVT::bf16, MVT::f32);
944 }
945
946 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71) {
947 setOperationAction(ISD::BF16_TO_FP, MVT::f32, Expand);
948 }
949 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
950 for (MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
951 setOperationAction(ISD::FP_EXTEND, VT, Custom);
953 }
954 }
955
956 // Expand v2f32 = fp_extend
957 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
958 // Expand v2[b]f16 = fp_round v2f32
959 setOperationAction(ISD::FP_ROUND, {MVT::v2bf16, MVT::v2f16}, Expand);
960
961 // sm_80 only has conversions between f32 and bf16. Custom lower all other
962 // bf16 conversions.
963 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
964 for (MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
967 VT, Custom);
968 }
971 MVT::bf16, Custom);
972 }
973
974 setOperationAction(ISD::FROUND, MVT::f16, Promote);
975 setOperationAction(ISD::FROUND, MVT::v2f16, Expand);
976 setOperationAction(ISD::FROUND, MVT::v2bf16, Expand);
977 setOperationAction(ISD::FROUND, MVT::f32, Custom);
978 setOperationAction(ISD::FROUND, MVT::f64, Custom);
979 setOperationAction(ISD::FROUND, MVT::bf16, Promote);
980 AddPromotedToType(ISD::FROUND, MVT::bf16, MVT::f32);
981
982 // 'Expand' implements FCOPYSIGN without calling an external library.
989
990 // These map to corresponding instructions for f32/f64. f16 must be
991 // promoted to f32. v2f16 is expanded to f16, which is then promoted
992 // to f32.
993 for (const auto &Op :
994 {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FTANH}) {
995 setOperationAction(Op, MVT::f16, Promote);
996 setOperationAction(Op, MVT::f32, Legal);
997 // only div/rem/sqrt are legal for f64
998 if (Op == ISD::FDIV || Op == ISD::FREM || Op == ISD::FSQRT) {
999 setOperationAction(Op, MVT::f64, Legal);
1000 }
1001 setOperationAction(Op, {MVT::v2f16, MVT::v2bf16, MVT::v2f32}, Expand);
1002 setOperationAction(Op, MVT::bf16, Promote);
1003 AddPromotedToType(Op, MVT::bf16, MVT::f32);
1004 }
1005 setOperationAction(ISD::FREM, {MVT::f32, MVT::f64}, Custom);
1006
1007 setOperationAction(ISD::FABS, {MVT::f32, MVT::f64}, Legal);
1008 setOperationAction(ISD::FABS, MVT::v2f32, Expand);
1009 if (STI.getPTXVersion() >= 65) {
1010 setFP16OperationAction(ISD::FABS, MVT::f16, Legal, Promote);
1011 setFP16OperationAction(ISD::FABS, MVT::v2f16, Legal, Expand);
1012 } else {
1013 setOperationAction(ISD::FABS, MVT::f16, Promote);
1014 setOperationAction(ISD::FABS, MVT::v2f16, Expand);
1015 }
1016 setBF16OperationAction(ISD::FABS, MVT::v2bf16, Legal, Expand);
1017 setBF16OperationAction(ISD::FABS, MVT::bf16, Legal, Promote);
1018 if (getOperationAction(ISD::FABS, MVT::bf16) == Promote)
1019 AddPromotedToType(ISD::FABS, MVT::bf16, MVT::f32);
1020
1021 for (const auto &Op :
1022 {ISD::FMINNUM, ISD::FMAXNUM, ISD::FMINIMUMNUM, ISD::FMAXIMUMNUM}) {
1023 setOperationAction(Op, MVT::f32, Legal);
1024 setOperationAction(Op, MVT::f64, Legal);
1025 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
1026 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1027 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1028 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
1029 if (getOperationAction(Op, MVT::bf16) == Promote)
1030 AddPromotedToType(Op, MVT::bf16, MVT::f32);
1031 setOperationAction(Op, MVT::v2f32, Expand);
1032 }
1033 bool SupportsF32MinMaxNaN =
1034 STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
1035 for (const auto &Op : {ISD::FMINIMUM, ISD::FMAXIMUM}) {
1036 setOperationAction(Op, MVT::f32, SupportsF32MinMaxNaN ? Legal : Expand);
1037 setFP16OperationAction(Op, MVT::f16, Legal, Expand);
1038 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1039 setBF16OperationAction(Op, MVT::bf16, Legal, Expand);
1040 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1041 setOperationAction(Op, MVT::v2f32, Expand);
1042 }
1043
1044 // Custom lowering for inline asm with 128-bit operands
1047
1048 // FEXP2 support:
1049 // - f32
1050 // - f16/f16x2 (sm_70+, PTX 7.0+)
1051 // - bf16/bf16x2 (sm_90+, PTX 7.8+)
1052 // When f16/bf16 types aren't supported, they are promoted/expanded to f32.
1053 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
1054 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
1055 setFP16OperationAction(ISD::FEXP2, MVT::f16, Legal, Promote);
1056 setFP16OperationAction(ISD::FEXP2, MVT::v2f16, Legal, Expand);
1057 setBF16OperationAction(ISD::FEXP2, MVT::bf16, Legal, Promote);
1058 setBF16OperationAction(ISD::FEXP2, MVT::v2bf16, Legal, Expand);
1059
1060 // FLOG2 supports f32 only
1061 // f16/bf16 types aren't supported, but they are promoted/expanded to f32.
1062 if (UseApproxLog2F32) {
1063 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
1064 setOperationPromotedToType(ISD::FLOG2, MVT::f16, MVT::f32);
1065 setOperationPromotedToType(ISD::FLOG2, MVT::bf16, MVT::f32);
1066 setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16, MVT::v2f32},
1067 Expand);
1068 }
1069
1070 setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom);
1071
1072 setOperationAction(ISD::ATOMIC_LOAD_SUB, {MVT::i32, MVT::i64}, Expand);
1073
1074 // atom.b128 is legal in PTX but since we don't represent i128 as a legal
1075 // type, we need to custom lower it.
1076 setOperationAction({ISD::ATOMIC_CMP_SWAP, ISD::ATOMIC_SWAP}, MVT::i128,
1077 Custom);
1078
1079 // Now deduce the information based on the above mentioned
1080 // actions
1081 computeRegisterProperties(STI.getRegisterInfo());
1082
1083 // PTX support for 16-bit CAS is emulated. Only use 32+
1084 setMinCmpXchgSizeInBits(STI.getMinCmpXchgSizeInBits());
1085 setMaxAtomicSizeInBitsSupported(STI.hasAtomSwap128() ? 128 : 64);
1087
1088 // Custom lowering for tcgen05.ld vector operands
1090 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1091 MVT::v32i32, MVT::v64i32, MVT::v128i32},
1092 Custom);
1093
1094 // Custom lowering for tcgen05.st vector operands
1096 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1097 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::Other},
1098 Custom);
1099
1100 // Enable custom lowering for the following:
1101 // * MVT::i128 - clusterlaunchcontrol
1102 // * MVT::i32 - prmt
1103 // * MVT::v4f32 - cvt_rs fp{4/6/8}x4 intrinsics
1104 // * MVT::Other - internal.addrspace.wrap
1106 {MVT::i32, MVT::i128, MVT::v4f32, MVT::Other}, Custom);
1107}
1108
1109const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
1110
1111#define MAKE_CASE(V) \
1112 case V: \
1113 return #V;
1114
1115 switch ((NVPTXISD::NodeType)Opcode) {
1117 break;
1118
1171 MAKE_CASE(
1173 MAKE_CASE(
1185 MAKE_CASE(
1187 MAKE_CASE(
1194 }
1195 return nullptr;
1196
1197#undef MAKE_CASE
1198}
1199
1202 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
1203 VT.getScalarType() == MVT::i1)
1204 return TypeSplitVector;
1206}
1207
1209 int Enabled, int &ExtraSteps,
1210 bool &UseOneConst,
1211 bool Reciprocal) const {
1214 return SDValue();
1215
1216 if (ExtraSteps == ReciprocalEstimate::Unspecified)
1217 ExtraSteps = 0;
1218
1219 SDLoc DL(Operand);
1220 EVT VT = Operand.getValueType();
1221 bool Ftz = useF32FTZ(DAG.getMachineFunction());
1222
1223 auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
1224 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
1225 DAG.getConstant(IID, DL, MVT::i32), Operand);
1226 };
1227
1228 // The sqrt and rsqrt refinement processes assume we always start out with an
1229 // approximation of the rsqrt. Therefore, if we're going to do any refinement
1230 // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
1231 // any refinement, we must return a regular sqrt.
1232 if (Reciprocal || ExtraSteps > 0) {
1233 if (VT == MVT::f32)
1234 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1235 : Intrinsic::nvvm_rsqrt_approx_f);
1236 else if (VT == MVT::f64)
1237 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1238 else
1239 return SDValue();
1240 } else {
1241 if (VT == MVT::f32)
1242 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1243 : Intrinsic::nvvm_sqrt_approx_f);
1244 else {
1245 // There's no sqrt.approx.f64 instruction, so we emit
1246 // reciprocal(rsqrt(x)). This is faster than
1247 // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
1248 // x * rsqrt(x).)
1249 return DAG.getNode(
1251 DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
1252 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1253 }
1254 }
1255}
1256
1258 const DataLayout &DL, Type *RetTy, const ArgListTy &Args,
1260 std::optional<unsigned> FirstVAArg, const CallBase &CB,
1261 unsigned UniqueCallSite) const {
1262 auto PtrVT = getPointerTy(DL);
1263
1264 std::string Prototype;
1265 raw_string_ostream O(Prototype);
1266 O << "prototype_" << UniqueCallSite << " : .callprototype ";
1267
1268 if (RetTy->isVoidTy()) {
1269 O << "()";
1270 } else {
1271 O << "(";
1272 if (shouldPassAsArray(RetTy)) {
1273 const Align RetAlign = getArgumentAlignment(&CB, RetTy, 0, DL);
1274 O << ".param .align " << RetAlign.value() << " .b8 _["
1275 << DL.getTypeAllocSize(RetTy) << "]";
1276 } else if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy()) {
1277 unsigned size = 0;
1278 if (auto *ITy = dyn_cast<IntegerType>(RetTy)) {
1279 size = ITy->getBitWidth();
1280 } else {
1281 assert(RetTy->isFloatingPointTy() &&
1282 "Floating point type expected here");
1283 size = RetTy->getPrimitiveSizeInBits();
1284 }
1285 // PTX ABI requires all scalar return values to be at least 32
1286 // bits in size. fp16 normally uses .b16 as its storage type in
1287 // PTX, so its size must be adjusted here, too.
1289
1290 O << ".param .b" << size << " _";
1291 } else if (isa<PointerType>(RetTy)) {
1292 O << ".param .b" << PtrVT.getSizeInBits() << " _";
1293 } else {
1294 llvm_unreachable("Unknown return type");
1295 }
1296 O << ") ";
1297 }
1298 O << "_ (";
1299
1300 bool first = true;
1301
1302 const unsigned NumArgs = FirstVAArg.value_or(Args.size());
1303 auto AllOuts = ArrayRef(Outs);
1304 for (const unsigned I : llvm::seq(NumArgs)) {
1305 const auto ArgOuts =
1306 AllOuts.take_while([I](auto O) { return O.OrigArgIndex == I; });
1307 AllOuts = AllOuts.drop_front(ArgOuts.size());
1308
1309 Type *Ty = Args[I].Ty;
1310 if (!first) {
1311 O << ", ";
1312 }
1313 first = false;
1314
1315 if (ArgOuts[0].Flags.isByVal()) {
1316 // Indirect calls need strict ABI alignment so we disable optimizations by
1317 // not providing a function to optimize.
1318 Type *ETy = Args[I].IndirectType;
1319 Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1320 Align ParamByValAlign =
1321 getFunctionByValParamAlign(/*F=*/nullptr, ETy, InitialAlign, DL);
1322
1323 O << ".param .align " << ParamByValAlign.value() << " .b8 _["
1324 << ArgOuts[0].Flags.getByValSize() << "]";
1325 } else {
1326 if (shouldPassAsArray(Ty)) {
1327 Align ParamAlign =
1328 getArgumentAlignment(&CB, Ty, I + AttributeList::FirstArgIndex, DL);
1329 O << ".param .align " << ParamAlign.value() << " .b8 _["
1330 << DL.getTypeAllocSize(Ty) << "]";
1331 continue;
1332 }
1333 // i8 types in IR will be i16 types in SDAG
1334 assert((getValueType(DL, Ty) == ArgOuts[0].VT ||
1335 (getValueType(DL, Ty) == MVT::i8 && ArgOuts[0].VT == MVT::i16)) &&
1336 "type mismatch between callee prototype and arguments");
1337 // scalar type
1338 unsigned sz = 0;
1339 if (auto *ITy = dyn_cast<IntegerType>(Ty)) {
1340 sz = promoteScalarArgumentSize(ITy->getBitWidth());
1341 } else if (isa<PointerType>(Ty)) {
1342 sz = PtrVT.getSizeInBits();
1343 } else {
1344 sz = Ty->getPrimitiveSizeInBits();
1345 }
1346 O << ".param .b" << sz << " _";
1347 }
1348 }
1349
1350 if (FirstVAArg)
1351 O << (first ? "" : ",") << " .param .align "
1352 << STI.getMaxRequiredAlignment() << " .b8 _[]";
1353 O << ")";
1354 if (shouldEmitPTXNoReturn(&CB, *nvTM))
1355 O << " .noreturn";
1356 O << ";";
1357
1358 return Prototype;
1359}
1360
1362 const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const {
1363 return getAlign(*F, Idx).value_or(getFunctionParamOptimizedAlign(F, Ty, DL));
1364}
1365
1366Align NVPTXTargetLowering::getArgumentAlignment(const CallBase *CB, Type *Ty,
1367 unsigned Idx,
1368 const DataLayout &DL) const {
1369 if (!CB) {
1370 // CallSite is zero, fallback to ABI type alignment
1371 return DL.getABITypeAlign(Ty);
1372 }
1373
1374 const Function *DirectCallee = CB->getCalledFunction();
1375
1376 if (!DirectCallee) {
1377 // We don't have a direct function symbol, but that may be because of
1378 // constant cast instructions in the call.
1379
1380 // With bitcast'd call targets, the instruction will be the call
1381 if (const auto *CI = dyn_cast<CallInst>(CB)) {
1382 // Check if we have call alignment metadata
1383 if (MaybeAlign StackAlign = getAlign(*CI, Idx))
1384 return StackAlign.value();
1385 }
1386 DirectCallee = getMaybeBitcastedCallee(CB);
1387 }
1388
1389 // Check for function alignment information if we found that the
1390 // ultimate target is a Function
1391 if (DirectCallee)
1392 return getFunctionArgumentAlignment(DirectCallee, Ty, Idx, DL);
1393
1394 // Call is indirect, fall back to the ABI type alignment
1395 return DL.getABITypeAlign(Ty);
1396}
1397
1399 const GlobalAddressSDNode *Func) {
1400 if (!Func)
1401 return false;
1402 if (auto *CalleeFunc = dyn_cast<Function>(Func->getGlobal()))
1403 return CB->getFunctionType() != CalleeFunc->getFunctionType();
1404 return false;
1405}
1406
1408 const DataLayout &DL,
1409 const TargetLowering &TL) {
1410 if (Ptr->getOpcode() == ISD::FrameIndex) {
1411 auto Ty = TL.getPointerTy(DL, ADDRESS_SPACE_LOCAL);
1414
1416 }
1417
1418 // Peel of an addrspacecast to generic and load directly from the specific
1419 // address space.
1420 if (Ptr->getOpcode() == ISD::ADDRSPACECAST) {
1421 const auto *ASC = cast<AddrSpaceCastSDNode>(Ptr);
1422 if (ASC->getDestAddressSpace() == ADDRESS_SPACE_GENERIC) {
1423 Ptr = ASC->getOperand(0);
1424 return MachinePointerInfo(ASC->getSrcAddressSpace());
1425 }
1426 }
1427
1428 return MachinePointerInfo();
1429}
1430
1432 if (Flags.isSExt())
1433 return ISD::SIGN_EXTEND;
1434 if (Flags.isZExt())
1435 return ISD::ZERO_EXTEND;
1436 return ISD::ANY_EXTEND;
1437}
1438
1440 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1441 SDLoc dl) {
1442 const EVT ActualVT = V.getValueType();
1443 assert((ActualVT == ExpectedVT ||
1444 (ExpectedVT.isInteger() && ActualVT.isInteger())) &&
1445 "Non-integer argument type size mismatch");
1446 if (ExpectedVT.bitsGT(ActualVT))
1447 return DAG.getNode(getExtOpcode(Flags), dl, ExpectedVT, V);
1448 if (ExpectedVT.bitsLT(ActualVT))
1449 return DAG.getNode(ISD::TRUNCATE, dl, ExpectedVT, V);
1450
1451 return V;
1452}
1453
1455 SmallVectorImpl<SDValue> &InVals) const {
1456
1457 if (CLI.IsVarArg && (STI.getPTXVersion() < 60 || STI.getSmVersion() < 30))
1459 "Support for variadic functions (unsized array parameter) introduced "
1460 "in PTX ISA version 6.0 and requires target sm_30.");
1461
1462 SelectionDAG &DAG = CLI.DAG;
1463 SDLoc dl = CLI.DL;
1464 const SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1465 SDValue Callee = CLI.Callee;
1466 ArgListTy &Args = CLI.getArgs();
1467 Type *RetTy = CLI.RetTy;
1468 const CallBase *CB = CLI.CB;
1469 const DataLayout &DL = DAG.getDataLayout();
1470 LLVMContext &Ctx = *DAG.getContext();
1471
1472 const auto GetI32 = [&](const unsigned I) {
1473 return DAG.getConstant(I, dl, MVT::i32);
1474 };
1475
1476 const unsigned UniqueCallSite = GlobalUniqueCallSite++;
1477 const SDValue CallChain = CLI.Chain;
1478 const SDValue StartChain =
1479 DAG.getCALLSEQ_START(CallChain, UniqueCallSite, 0, dl);
1480 SDValue DeclareGlue = StartChain.getValue(1);
1481
1482 SmallVector<SDValue, 16> CallPrereqs{StartChain};
1483
1484 const auto MakeDeclareScalarParam = [&](SDValue Symbol, unsigned Size) {
1485 // PTX ABI requires integral types to be at least 32 bits in size. FP16 is
1486 // loaded/stored using i16, so it's handled here as well.
1487 const unsigned SizeBits = promoteScalarArgumentSize(Size * 8);
1488 SDValue Declare =
1489 DAG.getNode(NVPTXISD::DeclareScalarParam, dl, {MVT::Other, MVT::Glue},
1490 {StartChain, Symbol, GetI32(SizeBits), DeclareGlue});
1491 CallPrereqs.push_back(Declare);
1492 DeclareGlue = Declare.getValue(1);
1493 return Declare;
1494 };
1495
1496 const auto MakeDeclareArrayParam = [&](SDValue Symbol, Align Align,
1497 unsigned Size) {
1498 SDValue Declare = DAG.getNode(
1499 NVPTXISD::DeclareArrayParam, dl, {MVT::Other, MVT::Glue},
1500 {StartChain, Symbol, GetI32(Align.value()), GetI32(Size), DeclareGlue});
1501 CallPrereqs.push_back(Declare);
1502 DeclareGlue = Declare.getValue(1);
1503 return Declare;
1504 };
1505
1506 // Variadic arguments.
1507 //
1508 // Normally, for each argument, we declare a param scalar or a param
1509 // byte array in the .param space, and store the argument value to that
1510 // param scalar or array starting at offset 0.
1511 //
1512 // In the case of the first variadic argument, we declare a vararg byte array
1513 // with size 0. The exact size of this array isn't known at this point, so
1514 // it'll be patched later. All the variadic arguments will be stored to this
1515 // array at a certain offset (which gets tracked by 'VAOffset'). The offset is
1516 // initially set to 0, so it can be used for non-variadic arguments (which use
1517 // 0 offset) to simplify the code.
1518 //
1519 // After all vararg is processed, 'VAOffset' holds the size of the
1520 // vararg byte array.
1521 assert((CLI.IsVarArg || CLI.Args.size() == CLI.NumFixedArgs) &&
1522 "Non-VarArg function with extra arguments");
1523
1524 const unsigned FirstVAArg = CLI.NumFixedArgs; // position of first variadic
1525 unsigned VAOffset = 0; // current offset in the param array
1526
1527 const SDValue VADeclareParam =
1528 CLI.Args.size() > FirstVAArg
1529 ? MakeDeclareArrayParam(getCallParamSymbol(DAG, FirstVAArg, MVT::i32),
1530 Align(STI.getMaxRequiredAlignment()), 0)
1531 : SDValue();
1532
1533 // Args.size() and Outs.size() need not match.
1534 // Outs.size() will be larger
1535 // * if there is an aggregate argument with multiple fields (each field
1536 // showing up separately in Outs)
1537 // * if there is a vector argument with more than typical vector-length
1538 // elements (generally if more than 4) where each vector element is
1539 // individually present in Outs.
1540 // So a different index should be used for indexing into Outs/OutVals.
1541 // See similar issue in LowerFormalArguments.
1542 auto AllOuts = ArrayRef(CLI.Outs);
1543 auto AllOutVals = ArrayRef(CLI.OutVals);
1544 assert(AllOuts.size() == AllOutVals.size() &&
1545 "Outs and OutVals must be the same size");
1546 // Declare the .params or .reg need to pass values
1547 // to the function
1548 for (const auto E : llvm::enumerate(Args)) {
1549 const auto ArgI = E.index();
1550 const auto Arg = E.value();
1551 const auto ArgOuts =
1552 AllOuts.take_while([&](auto O) { return O.OrigArgIndex == ArgI; });
1553 const auto ArgOutVals = AllOutVals.take_front(ArgOuts.size());
1554 AllOuts = AllOuts.drop_front(ArgOuts.size());
1555 AllOutVals = AllOutVals.drop_front(ArgOuts.size());
1556
1557 const bool IsVAArg = (ArgI >= FirstVAArg);
1558 const bool IsByVal = Arg.IsByVal;
1559
1560 const SDValue ParamSymbol =
1561 getCallParamSymbol(DAG, IsVAArg ? FirstVAArg : ArgI, MVT::i32);
1562
1563 assert((!IsByVal || Arg.IndirectType) &&
1564 "byval arg must have indirect type");
1565 Type *ETy = (IsByVal ? Arg.IndirectType : Arg.Ty);
1566
1567 const Align ArgAlign = [&]() {
1568 if (IsByVal) {
1569 // The ByValAlign in the Outs[OIdx].Flags is always set at this point,
1570 // so we don't need to worry whether it's naturally aligned or not.
1571 // See TargetLowering::LowerCallTo().
1572 const Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1574 InitialAlign, DL);
1575 }
1576 return getArgumentAlignment(CB, Arg.Ty, ArgI + 1, DL);
1577 }();
1578
1579 const unsigned TySize = DL.getTypeAllocSize(ETy);
1580 assert((!IsByVal || TySize == ArgOuts[0].Flags.getByValSize()) &&
1581 "type size mismatch");
1582
1583 const SDValue ArgDeclare = [&]() {
1584 if (IsVAArg)
1585 return VADeclareParam;
1586
1587 if (IsByVal || shouldPassAsArray(Arg.Ty))
1588 return MakeDeclareArrayParam(ParamSymbol, ArgAlign, TySize);
1589
1590 assert(ArgOuts.size() == 1 && "We must pass only one value as non-array");
1591 assert((ArgOuts[0].VT.isInteger() || ArgOuts[0].VT.isFloatingPoint()) &&
1592 "Only int and float types are supported as non-array arguments");
1593
1594 return MakeDeclareScalarParam(ParamSymbol, TySize);
1595 }();
1596
1597 if (IsByVal) {
1598 assert(ArgOutVals.size() == 1 && "We must pass only one value as byval");
1599 SDValue SrcPtr = ArgOutVals[0];
1600 const auto PointerInfo = refinePtrAS(SrcPtr, DAG, DL, *this);
1601 const Align BaseSrcAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1602
1603 if (IsVAArg)
1604 VAOffset = alignTo(VAOffset, ArgAlign);
1605
1606 SmallVector<EVT, 4> ValueVTs, MemVTs;
1608 ComputeValueVTs(*this, DL, ETy, ValueVTs, &MemVTs, &Offsets);
1609
1610 unsigned J = 0;
1611 const auto VI = VectorizePTXValueVTs(MemVTs, Offsets, ArgAlign, IsVAArg);
1612 for (const unsigned NumElts : VI) {
1613 EVT LoadVT = getVectorizedVT(MemVTs[J], NumElts, Ctx);
1614 Align SrcAlign = commonAlignment(BaseSrcAlign, Offsets[J]);
1615 SDValue SrcAddr = DAG.getObjectPtrOffset(dl, SrcPtr, Offsets[J]);
1616 SDValue SrcLoad =
1617 DAG.getLoad(LoadVT, dl, CallChain, SrcAddr, PointerInfo, SrcAlign);
1618
1619 TypeSize ParamOffset = Offsets[J].getWithIncrement(VAOffset);
1620 Align ParamAlign = commonAlignment(ArgAlign, ParamOffset);
1621 SDValue ParamAddr =
1622 DAG.getObjectPtrOffset(dl, ParamSymbol, ParamOffset);
1623 SDValue StoreParam =
1624 DAG.getStore(ArgDeclare, dl, SrcLoad, ParamAddr,
1626 CallPrereqs.push_back(StoreParam);
1627
1628 J += NumElts;
1629 }
1630 if (IsVAArg)
1631 VAOffset += TySize;
1632 } else {
1635 ComputePTXValueVTs(*this, DL, Ctx, CLI.CallConv, Arg.Ty, VTs, Offsets,
1636 VAOffset);
1637 assert(VTs.size() == Offsets.size() && "Size mismatch");
1638 assert(VTs.size() == ArgOuts.size() && "Size mismatch");
1639
1640 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
1641 // than 32-bits are sign extended or zero extended, depending on
1642 // whether they are signed or unsigned types. This case applies
1643 // only to scalar parameters and not to aggregate values.
1644 const bool ExtendIntegerParam =
1645 Arg.Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Arg.Ty) < 32;
1646
1647 const auto GetStoredValue = [&](const unsigned I) {
1648 SDValue StVal = ArgOutVals[I];
1650 StVal.getValueType() &&
1651 "OutVal type should always be legal");
1652
1653 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
1654 const EVT StoreVT =
1655 ExtendIntegerParam ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1656
1657 return correctParamType(StVal, StoreVT, ArgOuts[I].Flags, DAG, dl);
1658 };
1659
1660 unsigned J = 0;
1661 const auto VI = VectorizePTXValueVTs(VTs, Offsets, ArgAlign, IsVAArg);
1662 for (const unsigned NumElts : VI) {
1663 const EVT EltVT = promoteScalarIntegerPTX(VTs[J]);
1664
1665 unsigned Offset;
1666 if (IsVAArg) {
1667 // TODO: We may need to support vector types that can be passed
1668 // as scalars in variadic arguments.
1669 assert(NumElts == 1 &&
1670 "Vectorization should be disabled for vaargs.");
1671
1672 // Align each part of the variadic argument to their type.
1673 VAOffset = alignTo(VAOffset, DAG.getEVTAlign(EltVT));
1674 Offset = VAOffset;
1675
1676 const EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;
1677 VAOffset += DL.getTypeAllocSize(TheStoreType.getTypeForEVT(Ctx));
1678 } else {
1679 assert(VAOffset == 0 && "VAOffset must be 0 for non-VA args");
1680 Offset = Offsets[J];
1681 }
1682
1683 SDValue Ptr =
1684 DAG.getObjectPtrOffset(dl, ParamSymbol, TypeSize::getFixed(Offset));
1685
1686 const MaybeAlign CurrentAlign = ExtendIntegerParam
1687 ? MaybeAlign(std::nullopt)
1688 : commonAlignment(ArgAlign, Offset);
1689
1690 SDValue Val =
1691 getBuildVectorizedValue(NumElts, dl, DAG, [&](unsigned K) {
1692 return GetStoredValue(J + K);
1693 });
1694
1695 SDValue StoreParam =
1696 DAG.getStore(ArgDeclare, dl, Val, Ptr,
1698 CallPrereqs.push_back(StoreParam);
1699
1700 J += NumElts;
1701 }
1702 }
1703 }
1704
1705 // Handle Result
1706 if (!Ins.empty()) {
1707 const SDValue RetSymbol = DAG.getExternalSymbol("retval0", MVT::i32);
1708 const unsigned ResultSize = DL.getTypeAllocSize(RetTy);
1709 if (shouldPassAsArray(RetTy)) {
1710 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1711 MakeDeclareArrayParam(RetSymbol, RetAlign, ResultSize);
1712 } else {
1713 MakeDeclareScalarParam(RetSymbol, ResultSize);
1714 }
1715 }
1716
1717 // Set the size of the vararg param byte array if the callee is a variadic
1718 // function and the variadic part is not empty.
1719 if (VADeclareParam) {
1720 SDValue DeclareParamOps[] = {VADeclareParam.getOperand(0),
1721 VADeclareParam.getOperand(1),
1722 VADeclareParam.getOperand(2), GetI32(VAOffset),
1723 VADeclareParam.getOperand(4)};
1724 DAG.MorphNodeTo(VADeclareParam.getNode(), VADeclareParam.getOpcode(),
1725 VADeclareParam->getVTList(), DeclareParamOps);
1726 }
1727
1728 const auto *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
1729 // If the type of the callsite does not match that of the function, convert
1730 // the callsite to an indirect call.
1731 const bool ConvertToIndirectCall = shouldConvertToIndirectCall(CB, Func);
1732
1733 // Both indirect calls and libcalls have nullptr Func. In order to distinguish
1734 // between them we must rely on the call site value which is valid for
1735 // indirect calls but is always null for libcalls.
1736 const bool IsIndirectCall = (!Func && CB) || ConvertToIndirectCall;
1737
1738 if (isa<ExternalSymbolSDNode>(Callee)) {
1739 Function* CalleeFunc = nullptr;
1740
1741 // Try to find the callee in the current module.
1742 Callee = DAG.getSymbolFunctionGlobalAddress(Callee, &CalleeFunc);
1743 assert(CalleeFunc != nullptr && "Libcall callee must be set.");
1744
1745 // Set the "libcall callee" attribute to indicate that the function
1746 // must always have a declaration.
1747 CalleeFunc->addFnAttr("nvptx-libcall-callee", "true");
1748 }
1749
1750 if (IsIndirectCall) {
1751 // This is indirect function call case : PTX requires a prototype of the
1752 // form
1753 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
1754 // to be emitted, and the label has to used as the last arg of call
1755 // instruction.
1756 // The prototype is embedded in a string and put as the operand for a
1757 // CallPrototype SDNode which will print out to the value of the string.
1758 const bool HasVAArgs = CLI.IsVarArg && (CLI.Args.size() > CLI.NumFixedArgs);
1759 std::string Proto =
1760 getPrototype(DL, RetTy, Args, CLI.Outs,
1761 HasVAArgs ? std::optional(FirstVAArg) : std::nullopt, *CB,
1762 UniqueCallSite);
1763 const char *ProtoStr = nvTM->getStrPool().save(Proto).data();
1764 const SDValue PrototypeDeclare = DAG.getNode(
1765 NVPTXISD::CallPrototype, dl, MVT::Other,
1766 {StartChain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32)});
1767 CallPrereqs.push_back(PrototypeDeclare);
1768 }
1769
1770 const unsigned Proto = IsIndirectCall ? UniqueCallSite : 0;
1771 const unsigned NumArgs =
1772 std::min<unsigned>(CLI.NumFixedArgs + 1, Args.size());
1773 /// CALL(Chain, IsConvergent, IsIndirectCall/IsUniform, NumReturns,
1774 /// NumParams, Callee, Proto)
1775 const SDValue CallToken = DAG.getTokenFactor(dl, CallPrereqs);
1776 const SDValue Call = DAG.getNode(
1777 NVPTXISD::CALL, dl, MVT::Other,
1778 {CallToken, GetI32(CLI.IsConvergent), GetI32(IsIndirectCall),
1779 GetI32(Ins.empty() ? 0 : 1), GetI32(NumArgs), Callee, GetI32(Proto)});
1780
1781 SmallVector<SDValue, 16> LoadChains{Call};
1782 SmallVector<SDValue, 16> ProxyRegOps;
1783 if (!Ins.empty()) {
1786 ComputePTXValueVTs(*this, DL, Ctx, CLI.CallConv, RetTy, VTs, Offsets);
1787 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1788
1789 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1790 const SDValue RetSymbol = DAG.getExternalSymbol("retval0", MVT::i32);
1791
1792 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
1793 // 32-bits are sign extended or zero extended, depending on whether
1794 // they are signed or unsigned types.
1795 const bool ExtendIntegerRetVal =
1796 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
1797
1798 unsigned I = 0;
1799 const auto VI = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
1800 for (const unsigned NumElts : VI) {
1801 const MaybeAlign CurrentAlign =
1802 ExtendIntegerRetVal ? MaybeAlign(std::nullopt)
1803 : commonAlignment(RetAlign, Offsets[I]);
1804
1805 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
1806 const EVT LoadVT =
1807 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1808 const EVT VecVT = getVectorizedVT(LoadVT, NumElts, Ctx);
1809 SDValue Ptr =
1810 DAG.getObjectPtrOffset(dl, RetSymbol, TypeSize::getFixed(Offsets[I]));
1811
1812 SDValue R =
1813 DAG.getLoad(VecVT, dl, Call, Ptr,
1815
1816 LoadChains.push_back(R.getValue(1));
1817 for (const unsigned J : llvm::seq(NumElts))
1818 ProxyRegOps.push_back(getExtractVectorizedValue(R, J, LoadVT, dl, DAG));
1819 I += NumElts;
1820 }
1821 }
1822
1823 const SDValue EndToken = DAG.getTokenFactor(dl, LoadChains);
1824 const SDValue CallEnd = DAG.getCALLSEQ_END(EndToken, UniqueCallSite,
1825 UniqueCallSite + 1, SDValue(), dl);
1826
1827 // Append ProxyReg instructions to the chain to make sure that `callseq_end`
1828 // will not get lost. Otherwise, during libcalls expansion, the nodes can become
1829 // dangling.
1830 for (const auto [I, Reg] : llvm::enumerate(ProxyRegOps)) {
1831 SDValue Proxy =
1832 DAG.getNode(NVPTXISD::ProxyReg, dl, Reg.getValueType(), {CallEnd, Reg});
1833 SDValue Ret = correctParamType(Proxy, Ins[I].VT, Ins[I].Flags, DAG, dl);
1834 InVals.push_back(Ret);
1835 }
1836
1837 // set IsTailCall to false for now, until we figure out how to express
1838 // tail call optimization in PTX
1839 CLI.IsTailCall = false;
1840 return CallEnd;
1841}
1842
1844 SelectionDAG &DAG) const {
1845
1846 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1847 const Function &Fn = DAG.getMachineFunction().getFunction();
1848
1850 Fn,
1851 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "
1852 "requires target sm_52.",
1853 SDLoc(Op).getDebugLoc()));
1854 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()),
1855 Op.getOperand(0)};
1856 return DAG.getMergeValues(Ops, SDLoc());
1857 }
1858
1859 SDLoc DL(Op.getNode());
1860 SDValue Chain = Op.getOperand(0);
1861 SDValue Size = Op.getOperand(1);
1862 uint64_t Align = Op.getConstantOperandVal(2);
1863
1864 // The alignment on a ISD::DYNAMIC_STACKALLOC node may be 0 to indicate that
1865 // the default stack alignment should be used.
1866 if (Align == 0)
1868
1869 // The size for ptx alloca instruction is 64-bit for m64 and 32-bit for m32.
1870 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1871
1872 SDValue Alloc =
1873 DAG.getNode(NVPTXISD::DYNAMIC_STACKALLOC, DL, {LocalVT, MVT::Other},
1874 {Chain, DAG.getZExtOrTrunc(Size, DL, LocalVT),
1875 DAG.getTargetConstant(Align, DL, MVT::i32)});
1876
1877 SDValue ASC = DAG.getAddrSpaceCast(
1879
1880 return DAG.getMergeValues({ASC, SDValue(Alloc.getNode(), 1)}, DL);
1881}
1882
1884 SelectionDAG &DAG) const {
1885 SDLoc DL(Op.getNode());
1886 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1887 const Function &Fn = DAG.getMachineFunction().getFunction();
1888
1890 Fn,
1891 "Support for stackrestore requires PTX ISA version >= 7.3 and target "
1892 ">= sm_52.",
1893 DL.getDebugLoc()));
1894 return Op.getOperand(0);
1895 }
1896
1897 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1898 SDValue Chain = Op.getOperand(0);
1899 SDValue Ptr = Op.getOperand(1);
1902 return DAG.getNode(NVPTXISD::STACKRESTORE, DL, MVT::Other, {Chain, ASC});
1903}
1904
1906 SelectionDAG &DAG) const {
1907 SDLoc DL(Op.getNode());
1908 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1909 const Function &Fn = DAG.getMachineFunction().getFunction();
1910
1912 Fn,
1913 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "
1914 "sm_52.",
1915 DL.getDebugLoc()));
1916 auto Ops = {DAG.getConstant(0, DL, Op.getValueType()), Op.getOperand(0)};
1917 return DAG.getMergeValues(Ops, DL);
1918 }
1919
1920 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1921 SDValue Chain = Op.getOperand(0);
1922 SDValue SS =
1923 DAG.getNode(NVPTXISD::STACKSAVE, DL, {LocalVT, MVT::Other}, Chain);
1924 SDValue ASC = DAG.getAddrSpaceCast(
1925 DL, Op.getValueType(), SS, ADDRESS_SPACE_LOCAL, ADDRESS_SPACE_GENERIC);
1926 return DAG.getMergeValues({ASC, SDValue(SS.getNode(), 1)}, DL);
1927}
1928
1929// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1930// (see LegalizeDAG.cpp). This is slow and uses local memory.
1931// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1932SDValue
1933NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1934 SDNode *Node = Op.getNode();
1935 SDLoc dl(Node);
1937 unsigned NumOperands = Node->getNumOperands();
1938 for (unsigned i = 0; i < NumOperands; ++i) {
1939 SDValue SubOp = Node->getOperand(i);
1940 EVT VVT = SubOp.getNode()->getValueType(0);
1941 EVT EltVT = VVT.getVectorElementType();
1942 unsigned NumSubElem = VVT.getVectorNumElements();
1943 for (unsigned j = 0; j < NumSubElem; ++j) {
1944 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1945 DAG.getIntPtrConstant(j, dl)));
1946 }
1947 }
1948 return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
1949}
1950
1952 SelectionDAG &DAG,
1953 unsigned Mode = NVPTX::PTXPrmtMode::NONE) {
1954 assert(A.getValueType() == MVT::i32 && B.getValueType() == MVT::i32 &&
1955 Selector.getValueType() == MVT::i32 && "PRMT must have i32 operands");
1956 return DAG.getNode(NVPTXISD::PRMT, DL, MVT::i32,
1957 {A, B, Selector, DAG.getConstant(Mode, DL, MVT::i32)});
1958}
1959
1961 SelectionDAG &DAG,
1962 unsigned Mode = NVPTX::PTXPrmtMode::NONE) {
1963 return getPRMT(A, B, DAG.getConstant(Selector, DL, MVT::i32), DL, DAG, Mode);
1964}
1965
1966/// Reduces the elements using the scalar operations provided. The operations
1967/// are sorted descending in number of inputs they take. The flags on the
1968/// original reduction operation will be propagated to each scalar operation.
1969/// Nearby elements are grouped in tree reduction, unlike the shuffle reduction
1970/// used in ExpandReductions and SelectionDAG.
1972 const SmallVector<SDValue> &Elements, EVT EltTy,
1973 ArrayRef<std::pair<unsigned /*NodeType*/, unsigned /*NumInputs*/>> Ops,
1974 const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG) {
1975 // Build the reduction tree at each level, starting with all the elements.
1976 SmallVector<SDValue> Level = Elements;
1977
1978 unsigned OpIdx = 0;
1979 while (Level.size() > 1) {
1980 // Try to reduce this level using the current operator.
1981 const auto [Op, NumInputs] = Ops[OpIdx];
1982
1983 // Build the next level by partially reducing all elements.
1984 SmallVector<SDValue> ReducedLevel;
1985 unsigned I = 0, E = Level.size();
1986 for (; I + NumInputs <= E; I += NumInputs) {
1987 // Reduce elements in groups of [NumInputs], as much as possible.
1988 ReducedLevel.push_back(DAG.getNode(
1989 Op, DL, EltTy, ArrayRef<SDValue>(Level).slice(I, NumInputs), Flags));
1990 }
1991
1992 if (I < E) {
1993 // Handle leftover elements.
1994
1995 if (ReducedLevel.empty()) {
1996 // We didn't reduce anything at this level. We need to pick a smaller
1997 // operator.
1998 ++OpIdx;
1999 assert(OpIdx < Ops.size() && "no smaller operators for reduction");
2000 continue;
2001 }
2002
2003 // We reduced some things but there's still more left, meaning the
2004 // operator's number of inputs doesn't evenly divide this level size. Move
2005 // these elements to the next level.
2006 for (; I < E; ++I)
2007 ReducedLevel.push_back(Level[I]);
2008 }
2009
2010 // Process the next level.
2011 Level = ReducedLevel;
2012 }
2013
2014 return *Level.begin();
2015}
2016
2017// Get scalar reduction opcode
2018static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode) {
2019 switch (ReductionOpcode) {
2020 case ISD::VECREDUCE_FMAX:
2021 return ISD::FMAXNUM;
2022 case ISD::VECREDUCE_FMIN:
2023 return ISD::FMINNUM;
2024 case ISD::VECREDUCE_FMAXIMUM:
2025 return ISD::FMAXIMUM;
2026 case ISD::VECREDUCE_FMINIMUM:
2027 return ISD::FMINIMUM;
2028 default:
2029 llvm_unreachable("unhandled reduction opcode");
2030 }
2031}
2032
2033/// Get 3-input scalar reduction opcode
2034static std::optional<NVPTXISD::NodeType>
2035getScalar3OpcodeForReduction(unsigned ReductionOpcode) {
2036 switch (ReductionOpcode) {
2037 case ISD::VECREDUCE_FMAX:
2038 return NVPTXISD::FMAXNUM3;
2039 case ISD::VECREDUCE_FMIN:
2040 return NVPTXISD::FMINNUM3;
2041 case ISD::VECREDUCE_FMAXIMUM:
2042 return NVPTXISD::FMAXIMUM3;
2043 case ISD::VECREDUCE_FMINIMUM:
2044 return NVPTXISD::FMINIMUM3;
2045 default:
2046 return std::nullopt;
2047 }
2048}
2049
2050/// Lower reductions to either a sequence of operations or a tree if
2051/// reassociations are allowed. This method will use larger operations like
2052/// max3/min3 when the target supports them.
2053SDValue NVPTXTargetLowering::LowerVECREDUCE(SDValue Op,
2054 SelectionDAG &DAG) const {
2055 SDLoc DL(Op);
2056 const SDNodeFlags Flags = Op->getFlags();
2057 SDValue Vector = Op.getOperand(0);
2058
2059 const unsigned Opcode = Op->getOpcode();
2060 const EVT EltTy = Vector.getValueType().getVectorElementType();
2061
2062 // Whether we can use 3-input min/max when expanding the reduction.
2063 const bool CanUseMinMax3 =
2064 EltTy == MVT::f32 && STI.getSmVersion() >= 100 &&
2065 STI.getPTXVersion() >= 88 &&
2066 (Opcode == ISD::VECREDUCE_FMAX || Opcode == ISD::VECREDUCE_FMIN ||
2067 Opcode == ISD::VECREDUCE_FMAXIMUM || Opcode == ISD::VECREDUCE_FMINIMUM);
2068
2069 // A list of SDNode opcodes with equivalent semantics, sorted descending by
2070 // number of inputs they take.
2071 SmallVector<std::pair<unsigned /*Op*/, unsigned /*NumIn*/>, 2> ScalarOps;
2072
2073 if (auto Opcode3Elem = getScalar3OpcodeForReduction(Opcode);
2074 CanUseMinMax3 && Opcode3Elem)
2075 ScalarOps.push_back({*Opcode3Elem, 3});
2076 ScalarOps.push_back({getScalarOpcodeForReduction(Opcode), 2});
2077
2079 DAG.ExtractVectorElements(Vector, Elements);
2080
2081 return buildTreeReduction(Elements, EltTy, ScalarOps, DL, Flags, DAG);
2082}
2083
2084SDValue NVPTXTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
2085 // Handle bitcasting from v2i8 without hitting the default promotion
2086 // strategy which goes through stack memory.
2087 EVT FromVT = Op->getOperand(0)->getValueType(0);
2088 if (FromVT != MVT::v2i8) {
2089 return Op;
2090 }
2091
2092 // Pack vector elements into i16 and bitcast to final type
2093 SDLoc DL(Op);
2094 SDValue Vec0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8,
2095 Op->getOperand(0), DAG.getIntPtrConstant(0, DL));
2096 SDValue Vec1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8,
2097 Op->getOperand(0), DAG.getIntPtrConstant(1, DL));
2098 SDValue Extend0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i16, Vec0);
2099 SDValue Extend1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i16, Vec1);
2100 SDValue Const8 = DAG.getConstant(8, DL, MVT::i16);
2101 SDValue AsInt = DAG.getNode(
2102 ISD::OR, DL, MVT::i16,
2103 {Extend0, DAG.getNode(ISD::SHL, DL, MVT::i16, {Extend1, Const8})});
2104 EVT ToVT = Op->getValueType(0);
2105 return DAG.getBitcast(ToVT, AsInt);
2106}
2107
2108// We can init constant f16x2/v2i16/v4i8 with a single .b32 move. Normally it
2109// would get lowered as two constant loads and vector-packing move.
2110// Instead we want just a constant move:
2111// mov.b32 %r2, 0x40003C00
2112SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2113 SelectionDAG &DAG) const {
2114 EVT VT = Op->getValueType(0);
2115 if (!(NVPTX::isPackedVectorTy(VT) && VT.is32BitVector()))
2116 return Op;
2117 SDLoc DL(Op);
2118
2119 if (!llvm::all_of(Op->ops(), [](SDValue Operand) {
2120 return Operand->isUndef() || isa<ConstantSDNode>(Operand) ||
2121 isa<ConstantFPSDNode>(Operand);
2122 })) {
2123 if (VT != MVT::v4i8)
2124 return Op;
2125 // Lower non-const v4i8 vector as byte-wise constructed i32, which allows us
2126 // to optimize calculation of constant parts.
2127 auto GetPRMT = [&](const SDValue Left, const SDValue Right, bool Cast,
2128 uint64_t SelectionValue) -> SDValue {
2129 SDValue L = Left;
2130 SDValue R = Right;
2131 if (Cast) {
2132 L = DAG.getAnyExtOrTrunc(L, DL, MVT::i32);
2133 R = DAG.getAnyExtOrTrunc(R, DL, MVT::i32);
2134 }
2135 return getPRMT(L, R, SelectionValue, DL, DAG);
2136 };
2137 auto PRMT__10 = GetPRMT(Op->getOperand(0), Op->getOperand(1), true, 0x3340);
2138 auto PRMT__32 = GetPRMT(Op->getOperand(2), Op->getOperand(3), true, 0x3340);
2139 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32, false, 0x5410);
2140 return DAG.getBitcast(VT, PRMT3210);
2141 }
2142
2143 // Get value or the Nth operand as an APInt(32). Undef values treated as 0.
2144 auto GetOperand = [](SDValue Op, int N) -> APInt {
2145 const SDValue &Operand = Op->getOperand(N);
2146 EVT VT = Op->getValueType(0);
2147 if (Operand->isUndef())
2148 return APInt(32, 0);
2149 APInt Value;
2150 if (VT == MVT::v2f16 || VT == MVT::v2bf16)
2151 Value = cast<ConstantFPSDNode>(Operand)->getValueAPF().bitcastToAPInt();
2152 else if (VT == MVT::v2i16 || VT == MVT::v4i8)
2153 Value = Operand->getAsAPIntVal();
2154 else
2155 llvm_unreachable("Unsupported type");
2156 // i8 values are carried around as i16, so we need to zero out upper bits,
2157 // so they do not get in the way of combining individual byte values
2158 if (VT == MVT::v4i8)
2159 Value = Value.trunc(8);
2160 return Value.zext(32);
2161 };
2162
2163 // Construct a 32-bit constant by shifting into place smaller values
2164 // (elements of the vector type VT).
2165 // For example, if VT has 2 elements, then N == 2:
2166 // ShiftAmount = 32 / N = 16
2167 // Value |= Op0 (b16) << 0
2168 // Value |= Op1 (b16) << 16
2169 // If N == 4:
2170 // ShiftAmount = 32 / N = 8
2171 // Value |= Op0 (b8) << 0
2172 // Value |= Op1 (b8) << 8
2173 // Value |= Op2 (b8) << 16
2174 // Value |= Op3 (b8) << 24
2175 // ...etc
2176 APInt Value(32, 0);
2177 const unsigned NumElements = VT.getVectorNumElements();
2178 assert(32 % NumElements == 0 && "must evenly divide bit length");
2179 const unsigned ShiftAmount = 32 / NumElements;
2180 for (unsigned ElementNo : seq(NumElements))
2181 Value |= GetOperand(Op, ElementNo).shl(ElementNo * ShiftAmount);
2182 SDValue Const = DAG.getConstant(Value, DL, MVT::i32);
2183 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), Const);
2184}
2185
2186SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
2187 SelectionDAG &DAG) const {
2188 SDValue Index = Op->getOperand(1);
2189 SDValue Vector = Op->getOperand(0);
2190 SDLoc DL(Op);
2191 EVT VectorVT = Vector.getValueType();
2192
2193 if (VectorVT == MVT::v4i8) {
2194 SDValue Selector = DAG.getNode(ISD::OR, DL, MVT::i32,
2195 DAG.getZExtOrTrunc(Index, DL, MVT::i32),
2196 DAG.getConstant(0x7770, DL, MVT::i32));
2197 SDValue PRMT = getPRMT(DAG.getBitcast(MVT::i32, Vector),
2198 DAG.getConstant(0, DL, MVT::i32), Selector, DL, DAG);
2199 SDValue Ext = DAG.getAnyExtOrTrunc(PRMT, DL, Op->getValueType(0));
2200 SDNodeFlags Flags;
2201 Flags.setNoSignedWrap(Ext.getScalarValueSizeInBits() > 8);
2202 Flags.setNoUnsignedWrap(Ext.getScalarValueSizeInBits() >= 8);
2203 Ext->setFlags(Flags);
2204 return Ext;
2205 }
2206
2207 // Constant index will be matched by tablegen.
2208 if (isa<ConstantSDNode>(Index.getNode()))
2209 return Op;
2210
2211 // Extract individual elements and select one of them.
2212 assert(NVPTX::isPackedVectorTy(VectorVT) &&
2213 VectorVT.getVectorNumElements() == 2 && "Unexpected vector type.");
2214 EVT EltVT = VectorVT.getVectorElementType();
2215
2216 SDLoc dl(Op.getNode());
2217 SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
2218 DAG.getIntPtrConstant(0, dl));
2219 SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
2220 DAG.getIntPtrConstant(1, dl));
2221 return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
2223}
2224
2225SDValue NVPTXTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
2226 SelectionDAG &DAG) const {
2227 SDValue Vector = Op->getOperand(0);
2228 EVT VectorVT = Vector.getValueType();
2229
2230 if (VectorVT != MVT::v4i8)
2231 return Op;
2232 SDLoc DL(Op);
2233 SDValue Value = Op->getOperand(1);
2234 if (Value->isUndef())
2235 return Vector;
2236
2237 SDValue Index = Op->getOperand(2);
2238
2239 SDValue BFI =
2240 DAG.getNode(NVPTXISD::BFI, DL, MVT::i32,
2241 {DAG.getZExtOrTrunc(Value, DL, MVT::i32), Vector,
2242 DAG.getNode(ISD::MUL, DL, MVT::i32,
2243 DAG.getZExtOrTrunc(Index, DL, MVT::i32),
2244 DAG.getConstant(8, DL, MVT::i32)),
2245 DAG.getConstant(8, DL, MVT::i32)});
2246 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), BFI);
2247}
2248
2249SDValue NVPTXTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2250 SelectionDAG &DAG) const {
2251 SDValue V1 = Op.getOperand(0);
2252 EVT VectorVT = V1.getValueType();
2253 if (VectorVT != MVT::v4i8 || Op.getValueType() != MVT::v4i8)
2254 return Op;
2255
2256 // Lower shuffle to PRMT instruction.
2257 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2258 SDValue V2 = Op.getOperand(1);
2259 uint32_t Selector = 0;
2260 for (auto I : llvm::enumerate(SVN->getMask())) {
2261 if (I.value() != -1) // -1 is a placeholder for undef.
2262 Selector |= (I.value() << (I.index() * 4));
2263 }
2264
2265 SDLoc DL(Op);
2266 SDValue PRMT = getPRMT(DAG.getBitcast(MVT::i32, V1),
2267 DAG.getBitcast(MVT::i32, V2), Selector, DL, DAG);
2268 return DAG.getBitcast(Op.getValueType(), PRMT);
2269}
2270/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
2271/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
2272/// amount, or
2273/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
2274/// amount.
2275SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
2276 SelectionDAG &DAG) const {
2277 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2278 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
2279
2280 EVT VT = Op.getValueType();
2281 unsigned VTBits = VT.getSizeInBits();
2282 SDLoc dl(Op);
2283 SDValue ShOpLo = Op.getOperand(0);
2284 SDValue ShOpHi = Op.getOperand(1);
2285 SDValue ShAmt = Op.getOperand(2);
2286 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
2287
2288 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2289 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
2290 // {dHi, dLo} = {aHi, aLo} >> Amt
2291 // dHi = aHi >> Amt
2292 // dLo = shf.r.clamp aLo, aHi, Amt
2293
2294 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2295 SDValue Lo =
2296 DAG.getNode(NVPTXISD::FSHR_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2297
2298 SDValue Ops[2] = { Lo, Hi };
2299 return DAG.getMergeValues(Ops, dl);
2300 }
2301 else {
2302 // {dHi, dLo} = {aHi, aLo} >> Amt
2303 // - if (Amt>=size) then
2304 // dLo = aHi >> (Amt-size)
2305 // dHi = aHi >> Amt (this is either all 0 or all 1)
2306 // else
2307 // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
2308 // dHi = aHi >> Amt
2309
2310 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2311 DAG.getConstant(VTBits, dl, MVT::i32),
2312 ShAmt);
2313 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
2314 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2315 DAG.getConstant(VTBits, dl, MVT::i32));
2316 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
2317 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2318 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
2319
2320 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
2321 DAG.getConstant(VTBits, dl, MVT::i32),
2322 ISD::SETGE);
2323 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2324 SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
2325
2326 SDValue Ops[2] = { Lo, Hi };
2327 return DAG.getMergeValues(Ops, dl);
2328 }
2329}
2330
2331/// LowerShiftLeftParts - Lower SHL_PARTS, which
2332/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
2333/// amount, or
2334/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
2335/// amount.
2336SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
2337 SelectionDAG &DAG) const {
2338 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2339 assert(Op.getOpcode() == ISD::SHL_PARTS);
2340
2341 EVT VT = Op.getValueType();
2342 unsigned VTBits = VT.getSizeInBits();
2343 SDLoc dl(Op);
2344 SDValue ShOpLo = Op.getOperand(0);
2345 SDValue ShOpHi = Op.getOperand(1);
2346 SDValue ShAmt = Op.getOperand(2);
2347
2348 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2349 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
2350 // {dHi, dLo} = {aHi, aLo} << Amt
2351 // dHi = shf.l.clamp aLo, aHi, Amt
2352 // dLo = aLo << Amt
2353
2354 SDValue Hi =
2355 DAG.getNode(NVPTXISD::FSHL_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2356 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2357
2358 SDValue Ops[2] = { Lo, Hi };
2359 return DAG.getMergeValues(Ops, dl);
2360 }
2361 else {
2362 // {dHi, dLo} = {aHi, aLo} << Amt
2363 // - if (Amt>=size) then
2364 // dLo = aLo << Amt (all 0)
2365 // dLo = aLo << (Amt-size)
2366 // else
2367 // dLo = aLo << Amt
2368 // dHi = (aHi << Amt) | (aLo >> (size-Amt))
2369
2370 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2371 DAG.getConstant(VTBits, dl, MVT::i32),
2372 ShAmt);
2373 SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
2374 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2375 DAG.getConstant(VTBits, dl, MVT::i32));
2376 SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
2377 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2378 SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
2379
2380 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
2381 DAG.getConstant(VTBits, dl, MVT::i32),
2382 ISD::SETGE);
2383 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2384 SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
2385
2386 SDValue Ops[2] = { Lo, Hi };
2387 return DAG.getMergeValues(Ops, dl);
2388 }
2389}
2390
2391/// If the types match, convert the generic copysign to the NVPTXISD version,
2392/// otherwise bail ensuring that mismatched cases are properly expaned.
2393SDValue NVPTXTargetLowering::LowerFCOPYSIGN(SDValue Op,
2394 SelectionDAG &DAG) const {
2395 EVT VT = Op.getValueType();
2396 SDLoc DL(Op);
2397
2398 SDValue In1 = Op.getOperand(0);
2399 SDValue In2 = Op.getOperand(1);
2400 EVT SrcVT = In2.getValueType();
2401
2402 if (!SrcVT.bitsEq(VT))
2403 return SDValue();
2404
2405 return DAG.getNode(NVPTXISD::FCOPYSIGN, DL, VT, In1, In2);
2406}
2407
2408SDValue NVPTXTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2409 EVT VT = Op.getValueType();
2410
2411 if (VT == MVT::f32)
2412 return LowerFROUND32(Op, DAG);
2413
2414 if (VT == MVT::f64)
2415 return LowerFROUND64(Op, DAG);
2416
2417 llvm_unreachable("unhandled type");
2418}
2419
2420// This is the the rounding method used in CUDA libdevice in C like code:
2421// float roundf(float A)
2422// {
2423// float RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f));
2424// RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2425// return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2426// }
2427SDValue NVPTXTargetLowering::LowerFROUND32(SDValue Op,
2428 SelectionDAG &DAG) const {
2429 SDLoc SL(Op);
2430 SDValue A = Op.getOperand(0);
2431 EVT VT = Op.getValueType();
2432
2433 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2434
2435 // RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f))
2436 SDValue Bitcast = DAG.getNode(ISD::BITCAST, SL, MVT::i32, A);
2437 const unsigned SignBitMask = 0x80000000;
2438 SDValue Sign = DAG.getNode(ISD::AND, SL, MVT::i32, Bitcast,
2439 DAG.getConstant(SignBitMask, SL, MVT::i32));
2440 const unsigned PointFiveInBits = 0x3F000000;
2441 SDValue PointFiveWithSignRaw =
2442 DAG.getNode(ISD::OR, SL, MVT::i32, Sign,
2443 DAG.getConstant(PointFiveInBits, SL, MVT::i32));
2444 SDValue PointFiveWithSign =
2445 DAG.getNode(ISD::BITCAST, SL, VT, PointFiveWithSignRaw);
2446 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, A, PointFiveWithSign);
2447 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2448
2449 // RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2450 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2451 SDValue IsLarge =
2452 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 23.0), SL, VT),
2453 ISD::SETOGT);
2454 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2455
2456 // return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2457 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2458 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2459 SDValue RoundedAForSmallA = DAG.getNode(ISD::FTRUNC, SL, VT, A);
2460 return DAG.getNode(ISD::SELECT, SL, VT, IsSmall, RoundedAForSmallA, RoundedA);
2461}
2462
2463// The implementation of round(double) is similar to that of round(float) in
2464// that they both separate the value range into three regions and use a method
2465// specific to the region to round the values. However, round(double) first
2466// calculates the round of the absolute value and then adds the sign back while
2467// round(float) directly rounds the value with sign.
2468SDValue NVPTXTargetLowering::LowerFROUND64(SDValue Op,
2469 SelectionDAG &DAG) const {
2470 SDLoc SL(Op);
2471 SDValue A = Op.getOperand(0);
2472 EVT VT = Op.getValueType();
2473
2474 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2475
2476 // double RoundedA = (double) (int) (abs(A) + 0.5f);
2477 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, AbsA,
2478 DAG.getConstantFP(0.5, SL, VT));
2479 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2480
2481 // RoundedA = abs(A) < 0.5 ? (double)0 : RoundedA;
2482 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2483 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2484 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2485 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsSmall,
2486 DAG.getConstantFP(0, SL, VT),
2487 RoundedA);
2488
2489 // Add sign to rounded_A
2490 RoundedA = DAG.getNode(ISD::FCOPYSIGN, SL, VT, RoundedA, A);
2491 DAG.getNode(ISD::FTRUNC, SL, VT, A);
2492
2493 // RoundedA = abs(A) > 0x1.0p52 ? A : RoundedA;
2494 SDValue IsLarge =
2495 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 52.0), SL, VT),
2496 ISD::SETOGT);
2497 return DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2498}
2499
2501 EVT VT = N->getValueType(0);
2502 EVT NVT = MVT::f32;
2503 if (VT.isVector()) {
2504 NVT = EVT::getVectorVT(*DAG.getContext(), NVT, VT.getVectorElementCount());
2505 }
2506 SDLoc DL(N);
2507 SDValue Tmp0 = DAG.getFPExtendOrRound(N->getOperand(0), DL, NVT);
2508 SDValue Tmp1 = DAG.getFPExtendOrRound(N->getOperand(1), DL, NVT);
2509 SDValue Res = DAG.getNode(N->getOpcode(), DL, NVT, Tmp0, Tmp1, N->getFlags());
2510 return DAG.getFPExtendOrRound(Res, DL, VT);
2511}
2512
2513SDValue NVPTXTargetLowering::PromoteBinOpIfF32FTZ(SDValue Op,
2514 SelectionDAG &DAG) const {
2515 if (useF32FTZ(DAG.getMachineFunction())) {
2516 return PromoteBinOpToF32(Op.getNode(), DAG);
2517 }
2518 return Op;
2519}
2520
2521SDValue NVPTXTargetLowering::LowerINT_TO_FP(SDValue Op,
2522 SelectionDAG &DAG) const {
2523 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2524
2525 if (Op.getValueType() == MVT::bf16) {
2526 SDLoc Loc(Op);
2527 return DAG.getNode(
2528 ISD::FP_ROUND, Loc, MVT::bf16,
2529 DAG.getNode(Op.getOpcode(), Loc, MVT::f32, Op.getOperand(0)),
2530 DAG.getIntPtrConstant(0, Loc, /*isTarget=*/true));
2531 }
2532
2533 // Everything else is considered legal.
2534 return Op;
2535}
2536
2537SDValue NVPTXTargetLowering::LowerFP_TO_INT(SDValue Op,
2538 SelectionDAG &DAG) const {
2539 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2540
2541 if (Op.getOperand(0).getValueType() == MVT::bf16) {
2542 SDLoc Loc(Op);
2543 return DAG.getNode(
2544 Op.getOpcode(), Loc, Op.getValueType(),
2545 DAG.getNode(ISD::FP_EXTEND, Loc, MVT::f32, Op.getOperand(0)));
2546 }
2547
2548 // Everything else is considered legal.
2549 return Op;
2550}
2551
2552SDValue NVPTXTargetLowering::LowerFP_ROUND(SDValue Op,
2553 SelectionDAG &DAG) const {
2554 EVT NarrowVT = Op.getValueType();
2555 SDValue Wide = Op.getOperand(0);
2556 EVT WideVT = Wide.getValueType();
2557 if (NarrowVT.getScalarType() == MVT::bf16) {
2558 const TargetLowering *TLI = STI.getTargetLowering();
2559 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 70) {
2560 return TLI->expandFP_ROUND(Op.getNode(), DAG);
2561 }
2562 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
2563 // This combination was the first to support f32 -> bf16.
2564 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70) {
2565 if (WideVT.getScalarType() == MVT::f32) {
2566 return Op;
2567 }
2568 if (WideVT.getScalarType() == MVT::f64) {
2569 SDLoc Loc(Op);
2570 // Round-inexact-to-odd f64 to f32, then do the final rounding using
2571 // the hardware f32 -> bf16 instruction.
2573 WideVT.isVector() ? WideVT.changeVectorElementType(MVT::f32)
2574 : MVT::f32,
2575 Wide, Loc, DAG);
2576 return DAG.getFPExtendOrRound(rod, Loc, NarrowVT);
2577 }
2578 }
2579 return TLI->expandFP_ROUND(Op.getNode(), DAG);
2580 }
2581 }
2582
2583 // Everything else is considered legal.
2584 return Op;
2585}
2586
2587SDValue NVPTXTargetLowering::LowerFP_EXTEND(SDValue Op,
2588 SelectionDAG &DAG) const {
2589 SDValue Narrow = Op.getOperand(0);
2590 EVT NarrowVT = Narrow.getValueType();
2591 EVT WideVT = Op.getValueType();
2592 if (NarrowVT.getScalarType() == MVT::bf16) {
2593 if (WideVT.getScalarType() == MVT::f32 &&
2594 (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71)) {
2595 SDLoc Loc(Op);
2596 return DAG.getNode(ISD::BF16_TO_FP, Loc, WideVT, Narrow);
2597 }
2598 if (WideVT.getScalarType() == MVT::f64 &&
2599 (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78)) {
2600 EVT F32 = NarrowVT.isVector() ? NarrowVT.changeVectorElementType(MVT::f32)
2601 : MVT::f32;
2602 SDLoc Loc(Op);
2603 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 71) {
2604 Op = DAG.getNode(ISD::FP_EXTEND, Loc, F32, Narrow);
2605 } else {
2606 Op = DAG.getNode(ISD::BF16_TO_FP, Loc, F32, Narrow);
2607 }
2608 return DAG.getNode(ISD::FP_EXTEND, Loc, WideVT, Op);
2609 }
2610 }
2611
2612 // Everything else is considered legal.
2613 return Op;
2614}
2615
2617 SDLoc DL(Op);
2618 if (Op.getValueType() != MVT::v2i16)
2619 return Op;
2620 EVT EltVT = Op.getValueType().getVectorElementType();
2621 SmallVector<SDValue> VecElements;
2622 for (int I = 0, E = Op.getValueType().getVectorNumElements(); I < E; I++) {
2623 SmallVector<SDValue> ScalarArgs;
2624 llvm::transform(Op->ops(), std::back_inserter(ScalarArgs),
2625 [&](const SDUse &O) {
2626 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
2627 O.get(), DAG.getIntPtrConstant(I, DL));
2628 });
2629 VecElements.push_back(DAG.getNode(Op.getOpcode(), DL, EltVT, ScalarArgs));
2630 }
2631 SDValue V =
2632 DAG.getNode(ISD::BUILD_VECTOR, DL, Op.getValueType(), VecElements);
2633 return V;
2634}
2635
2637 SDNode *N = Op.getNode();
2638 SDLoc DL(N);
2640
2641 // split the vector argument
2642 for (size_t I = 0; I < N->getNumOperands(); I++) {
2643 SDValue Val = N->getOperand(I);
2644 EVT ValVT = Val.getValueType();
2645 if (ValVT.isVector()) {
2646 EVT EltVT = ValVT.getVectorElementType();
2647 for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++)
2648 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
2649 DAG.getIntPtrConstant(J, DL)));
2650 } else
2651 Ops.push_back(Val);
2652 }
2653
2655 SDValue Tcgen05StNode =
2656 DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, N->getVTList(), Ops,
2657 MemSD->getMemoryVT(), MemSD->getMemOperand());
2658
2659 return Tcgen05StNode;
2660}
2661
2662static unsigned getTcgen05MMADisableOutputLane(unsigned IID) {
2663 switch (IID) {
2664 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2666 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2668 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2670 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2672 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2674 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2676 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2678 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2680 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2682 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2684 case Intrinsic::
2685 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2687 case Intrinsic::
2688 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2690 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2692 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2694 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2696 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2698 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2700 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2702 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2704 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2706 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2708 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2710 case Intrinsic::
2711 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2712 return NVPTXISD::
2713 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2714 case Intrinsic::
2715 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2716 return NVPTXISD::
2717 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2718 };
2719 llvm_unreachable("unhandled tcgen05.mma.disable_output_lane intrinsic");
2720}
2721
2723 SDNode *N = Op.getNode();
2724 SDLoc DL(N);
2725 unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2726
2728 // split the vector argument
2729 for (size_t I = 0; I < N->getNumOperands(); I++) {
2730 if (I == 1)
2731 continue; // skip IID
2732 SDValue Val = N->getOperand(I);
2733 EVT ValVT = Val.getValueType();
2734 if (ValVT.isVector()) {
2735 EVT EltVT = ValVT.getVectorElementType();
2736 for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++)
2737 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
2738 DAG.getIntPtrConstant(J, DL)));
2739 } else
2740 Ops.push_back(Val);
2741 }
2742
2744 SDValue Tcgen05MMANode = DAG.getMemIntrinsicNode(
2745 getTcgen05MMADisableOutputLane(IID), DL, N->getVTList(), Ops,
2746 MemSD->getMemoryVT(), MemSD->getMemOperand());
2747
2748 return Tcgen05MMANode;
2749}
2750
2751// Lower vector return type of tcgen05.ld intrinsics
2752static std::optional<std::pair<SDValue, SDValue>>
2753lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset = false) {
2754 SDLoc DL(N);
2755 EVT ResVT = N->getValueType(0);
2756 if (!ResVT.isVector())
2757 return {}; // already legalized.
2758
2759 const unsigned NumElts = ResVT.getVectorNumElements();
2760
2761 // Create the return type of the instructions
2762 SmallVector<EVT, 5> ListVTs;
2763 for (unsigned i = 0; i < NumElts; ++i)
2764 ListVTs.push_back(MVT::i32);
2765
2766 ListVTs.push_back(N->getValueType(1)); // Chain
2767
2768 SDVTList ResVTs = DAG.getVTList(ListVTs);
2769
2770 SmallVector<SDValue, 8> Ops{N->getOperand(0), N->getOperand(1),
2771 N->getOperand(2)};
2772
2773 if (HasOffset) {
2774 Ops.push_back(N->getOperand(3)); // offset
2775 Ops.push_back(N->getOperand(4)); // Pack flag
2776 } else
2777 Ops.push_back(N->getOperand(3)); // Pack flag
2778
2780 SDValue NewNode =
2782 MemSD->getMemoryVT(), MemSD->getMemOperand());
2783
2784 // split the vector result
2785 SmallVector<SDValue, 4> ScalarRes;
2786 for (unsigned i = 0; i < NumElts; ++i) {
2787 SDValue Res = NewNode.getValue(i);
2788 ScalarRes.push_back(Res);
2789 }
2790
2791 SDValue Chain = NewNode.getValue(NumElts);
2792 SDValue BuildVector = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
2793 return {{BuildVector, Chain}};
2794}
2795
2797 SDNode *N = Op.getNode();
2798 SDValue Intrin = N->getOperand(1);
2799
2800 // Get the intrinsic ID
2801 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
2802 switch (IntrinNo) {
2803 default:
2804 break;
2805 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
2806 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
2807 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
2808 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
2809 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
2810 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
2811 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
2812 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
2813 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
2814 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
2815 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
2816 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
2817 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
2818 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
2819 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
2820 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
2821 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
2822 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
2823 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
2824 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
2825 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1:
2826 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2:
2827 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4:
2828 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8:
2829 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16:
2830 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32:
2831 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64:
2832 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128:
2833 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
2834 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
2835 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
2836 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
2837 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
2838 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
2839 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
2840 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
2841 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
2842 return lowerTcgen05St(Op, DAG);
2843 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2844 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2845 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2846 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2847 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2848 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2849 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2850 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2851 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2852 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2853 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2854 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2855 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2856 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2857 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2858 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2859 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2860 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2861 case Intrinsic::
2862 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2863 case Intrinsic::
2864 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2865 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2866 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2867 case Intrinsic::
2868 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2869 case Intrinsic::
2870 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2872 }
2873 return Op;
2874}
2875
2877 SelectionDAG &DAG) {
2878
2879 SDNode *N = Op.getNode();
2880 if (N->getOperand(1).getValueType() != MVT::i128) {
2881 // return, if the operand is already lowered
2882 return SDValue();
2883 }
2884
2885 unsigned IID =
2886 cast<ConstantSDNode>(N->getOperand(0).getNode())->getZExtValue();
2887 auto Opcode = [&]() {
2888 switch (IID) {
2889 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2891 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2893 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2895 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2897 default:
2898 llvm_unreachable("unsupported/unhandled intrinsic");
2899 }
2900 }();
2901
2902 SDLoc DL(N);
2903 SDValue TryCancelResponse = N->getOperand(1);
2904 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TryCancelResponse);
2905 SDValue TryCancelResponse0 =
2906 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
2907 DAG.getIntPtrConstant(0, DL));
2908 SDValue TryCancelResponse1 =
2909 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
2910 DAG.getIntPtrConstant(1, DL));
2911
2912 return DAG.getNode(Opcode, DL, N->getVTList(),
2913 {TryCancelResponse0, TryCancelResponse1});
2914}
2915
2917 SDNode *N = Op.getNode();
2918 SDLoc DL(N);
2919 SDValue F32Vec = N->getOperand(1);
2920 SDValue RBits = N->getOperand(2);
2921
2922 unsigned IntrinsicID = N->getConstantOperandVal(0);
2923
2924 // Extract the 4 float elements from the vector
2926 for (unsigned i = 0; i < 4; ++i)
2927 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, F32Vec,
2928 DAG.getIntPtrConstant(i, DL)));
2929
2931
2932 auto [OpCode, RetTy, CvtModeFlag] =
2933 [&]() -> std::tuple<NVPTXISD::NodeType, MVT::SimpleValueType, uint32_t> {
2934 switch (IntrinsicID) {
2935 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
2936 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8,
2937 CvtMode::RS | CvtMode::RELU_FLAG};
2938 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
2939 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2940 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
2941 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8,
2942 CvtMode::RS | CvtMode::RELU_FLAG};
2943 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
2944 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2945 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
2946 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8,
2947 CvtMode::RS | CvtMode::RELU_FLAG};
2948 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
2949 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2950 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
2951 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8,
2952 CvtMode::RS | CvtMode::RELU_FLAG};
2953 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
2954 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2955 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
2956 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16,
2957 CvtMode::RS | CvtMode::RELU_FLAG};
2958 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
2959 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16, CvtMode::RS};
2960 default:
2961 llvm_unreachable("unsupported/unhandled intrinsic");
2962 }
2963 }();
2964
2965 Ops.push_back(RBits);
2966 Ops.push_back(DAG.getConstant(CvtModeFlag, DL, MVT::i32));
2967
2968 return DAG.getNode(OpCode, DL, RetTy, Ops);
2969}
2970
2972 const unsigned Mode = [&]() {
2973 switch (Op->getConstantOperandVal(0)) {
2974 case Intrinsic::nvvm_prmt:
2976 case Intrinsic::nvvm_prmt_b4e:
2978 case Intrinsic::nvvm_prmt_ecl:
2980 case Intrinsic::nvvm_prmt_ecr:
2982 case Intrinsic::nvvm_prmt_f4e:
2984 case Intrinsic::nvvm_prmt_rc16:
2986 case Intrinsic::nvvm_prmt_rc8:
2988 default:
2989 llvm_unreachable("unsupported/unhandled intrinsic");
2990 }
2991 }();
2992 SDLoc DL(Op);
2993 SDValue A = Op->getOperand(1);
2994 SDValue B = Op.getNumOperands() == 4 ? Op.getOperand(2)
2995 : DAG.getConstant(0, DL, MVT::i32);
2996 SDValue Selector = (Op->op_end() - 1)->get();
2997 return getPRMT(A, B, Selector, DL, DAG, Mode);
2998}
2999
3001 switch (Op->getConstantOperandVal(1)) {
3002 default:
3003 return Op;
3004
3005 // These tcgen05 intrinsics return a v2i32, which is legal, so we have to
3006 // lower them through LowerOperation() instead of ReplaceNodeResults().
3007 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
3008 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
3009 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
3010 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG))
3011 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op));
3012 return SDValue();
3013
3014 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
3015 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG, /*HasOffset=*/true))
3016 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op));
3017 return SDValue();
3018 }
3019}
3020
3022 switch (Op->getConstantOperandVal(0)) {
3023 default:
3024 return Op;
3025 case Intrinsic::nvvm_prmt:
3026 case Intrinsic::nvvm_prmt_b4e:
3027 case Intrinsic::nvvm_prmt_ecl:
3028 case Intrinsic::nvvm_prmt_ecr:
3029 case Intrinsic::nvvm_prmt_f4e:
3030 case Intrinsic::nvvm_prmt_rc16:
3031 case Intrinsic::nvvm_prmt_rc8:
3032 return lowerPrmtIntrinsic(Op, DAG);
3033 case Intrinsic::nvvm_internal_addrspace_wrap:
3034 return Op.getOperand(1);
3035 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
3036 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
3037 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
3038 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
3040 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
3041 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
3042 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
3043 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
3044 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
3045 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
3046 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
3047 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
3048 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
3049 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
3050 return lowerCvtRSIntrinsics(Op, DAG);
3051 }
3052}
3053
3054// In PTX 64-bit CTLZ and CTPOP are supported, but they return a 32-bit value.
3055// Lower these into a node returning the correct type which is zero-extended
3056// back to the correct size.
3058 SDValue V = Op->getOperand(0);
3059 assert(V.getValueType() == MVT::i64 &&
3060 "Unexpected CTLZ/CTPOP type to legalize");
3061
3062 SDLoc DL(Op);
3063 SDValue CT = DAG.getNode(Op->getOpcode(), DL, MVT::i32, V);
3064 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, CT, SDNodeFlags::NonNeg);
3065}
3066
3068 unsigned Opcode, SelectionDAG &DAG) {
3069 assert(A.getValueType() == MVT::i64 && B.getValueType() == MVT::i64);
3070
3071 const auto *AmtConst = dyn_cast<ConstantSDNode>(ShiftAmount);
3072 if (!AmtConst)
3073 return SDValue();
3074 const auto Amt = AmtConst->getZExtValue() & 63;
3075
3076 SDValue UnpackA =
3077 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, A);
3078 SDValue UnpackB =
3079 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, B);
3080
3081 // Arch is Little endiain: 0 = low bits, 1 = high bits
3082 SDValue ALo = UnpackA.getValue(0);
3083 SDValue AHi = UnpackA.getValue(1);
3084 SDValue BLo = UnpackB.getValue(0);
3085 SDValue BHi = UnpackB.getValue(1);
3086
3087 // The bitfeild consists of { AHi : ALo : BHi : BLo }
3088 //
3089 // * FSHL, Amt < 32 - The window will contain { AHi : ALo : BHi }
3090 // * FSHL, Amt >= 32 - The window will contain { ALo : BHi : BLo }
3091 // * FSHR, Amt < 32 - The window will contain { ALo : BHi : BLo }
3092 // * FSHR, Amt >= 32 - The window will contain { AHi : ALo : BHi }
3093 //
3094 // Note that Amt = 0 and Amt = 32 are special cases where 32-bit funnel shifts
3095 // are not needed at all. Amt = 0 is a no-op producing either A or B depending
3096 // on the direction. Amt = 32 can be implemented by a packing and unpacking
3097 // move to select and arrange the 32bit values. For simplicity, these cases
3098 // are not handled here explicitly and instead we rely on DAGCombiner to
3099 // remove the no-op funnel shifts we insert.
3100 auto [High, Mid, Low] = ((Opcode == ISD::FSHL) == (Amt < 32))
3101 ? std::make_tuple(AHi, ALo, BHi)
3102 : std::make_tuple(ALo, BHi, BLo);
3103
3104 SDValue NewAmt = DAG.getConstant(Amt & 31, DL, MVT::i32);
3105 SDValue RHi = DAG.getNode(Opcode, DL, MVT::i32, {High, Mid, NewAmt});
3106 SDValue RLo = DAG.getNode(Opcode, DL, MVT::i32, {Mid, Low, NewAmt});
3107
3108 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64, {RLo, RHi});
3109}
3110
3112 return expandFSH64(Op->getOperand(0), Op->getOperand(1), Op->getOperand(2),
3113 SDLoc(Op), Op->getOpcode(), DAG);
3114}
3115
3117 unsigned Opcode = Op->getOpcode() == ISD::ROTL ? ISD::FSHL : ISD::FSHR;
3118 return expandFSH64(Op->getOperand(0), Op->getOperand(0), Op->getOperand(1),
3119 SDLoc(Op), Opcode, DAG);
3120}
3121
3123 // Lower (frem x, y) into (sub x, (mul (ftrunc (div x, y)) y)),
3124 // i.e. "poor man's fmod()". When y is infinite, x is returned. This matches
3125 // the semantics of LLVM's frem.
3126 SDLoc DL(Op);
3127 SDValue X = Op->getOperand(0);
3128 SDValue Y = Op->getOperand(1);
3129 EVT Ty = Op.getValueType();
3130 SDNodeFlags Flags = Op->getFlags();
3131
3132 SDValue Div = DAG.getNode(ISD::FDIV, DL, Ty, X, Y, Flags);
3133 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, Ty, Div, Flags);
3134 SDValue Mul = DAG.getNode(ISD::FMUL, DL, Ty, Trunc, Y,
3136 SDValue Sub = DAG.getNode(ISD::FSUB, DL, Ty, X, Mul,
3138
3139 if (Flags.hasNoInfs())
3140 return Sub;
3141
3142 // If Y is infinite, return X
3143 SDValue AbsY = DAG.getNode(ISD::FABS, DL, Ty, Y);
3144 SDValue Inf =
3145 DAG.getConstantFP(APFloat::getInf(Ty.getFltSemantics()), DL, Ty);
3146 SDValue IsInf = DAG.getSetCC(DL, MVT::i1, AbsY, Inf, ISD::SETEQ);
3147 return DAG.getSelect(DL, Ty, IsInf, X, Sub);
3148}
3149
3151 assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
3152
3153 SDValue Cond = Op->getOperand(0);
3154 SDValue TrueVal = Op->getOperand(1);
3155 SDValue FalseVal = Op->getOperand(2);
3156 SDLoc DL(Op);
3157
3158 // If both operands are truncated, we push the select through the truncates.
3159 if (TrueVal.getOpcode() == ISD::TRUNCATE &&
3160 FalseVal.getOpcode() == ISD::TRUNCATE) {
3161 TrueVal = TrueVal.getOperand(0);
3162 FalseVal = FalseVal.getOperand(0);
3163
3164 EVT VT = TrueVal.getSimpleValueType().bitsLE(FalseVal.getSimpleValueType())
3165 ? TrueVal.getValueType()
3166 : FalseVal.getValueType();
3167 TrueVal = DAG.getAnyExtOrTrunc(TrueVal, DL, VT);
3168 FalseVal = DAG.getAnyExtOrTrunc(FalseVal, DL, VT);
3169 SDValue Select = DAG.getSelect(DL, VT, Cond, TrueVal, FalseVal);
3170 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
3171 }
3172
3173 // Otherwise, expand the select into a series of logical operations. These
3174 // often can be folded into other operations either by us or ptxas.
3175 TrueVal = DAG.getFreeze(TrueVal);
3176 FalseVal = DAG.getFreeze(FalseVal);
3177 SDValue And1 = DAG.getNode(ISD::AND, DL, MVT::i1, Cond, TrueVal);
3178 SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
3179 SDValue And2 = DAG.getNode(ISD::AND, DL, MVT::i1, NotCond, FalseVal);
3180 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i1, And1, And2);
3181 return Or;
3182}
3183
3184SDValue
3186 switch (Op.getOpcode()) {
3187 case ISD::RETURNADDR:
3188 return SDValue();
3189 case ISD::FRAMEADDR:
3190 return SDValue();
3191 case ISD::ADDRSPACECAST:
3192 return LowerADDRSPACECAST(Op, DAG);
3194 return lowerIntrinsicWChain(Op, DAG);
3196 return lowerIntrinsicWOChain(Op, DAG);
3198 return lowerIntrinsicVoid(Op, DAG);
3199 case ISD::BUILD_VECTOR:
3200 return LowerBUILD_VECTOR(Op, DAG);
3201 case ISD::BITCAST:
3202 return LowerBITCAST(Op, DAG);
3204 return Op;
3206 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3208 return LowerINSERT_VECTOR_ELT(Op, DAG);
3210 return LowerVECTOR_SHUFFLE(Op, DAG);
3212 return LowerCONCAT_VECTORS(Op, DAG);
3213 case ISD::VECREDUCE_FMAX:
3214 case ISD::VECREDUCE_FMIN:
3215 case ISD::VECREDUCE_FMAXIMUM:
3216 case ISD::VECREDUCE_FMINIMUM:
3217 return LowerVECREDUCE(Op, DAG);
3218 case ISD::STORE:
3219 return LowerSTORE(Op, DAG);
3220 case ISD::LOAD:
3221 return LowerLOAD(Op, DAG);
3222 case ISD::SHL_PARTS:
3223 return LowerShiftLeftParts(Op, DAG);
3224 case ISD::SRA_PARTS:
3225 case ISD::SRL_PARTS:
3226 return LowerShiftRightParts(Op, DAG);
3227 case ISD::SELECT:
3228 return lowerSELECT(Op, DAG);
3229 case ISD::FROUND:
3230 return LowerFROUND(Op, DAG);
3231 case ISD::FCOPYSIGN:
3232 return LowerFCOPYSIGN(Op, DAG);
3233 case ISD::SINT_TO_FP:
3234 case ISD::UINT_TO_FP:
3235 return LowerINT_TO_FP(Op, DAG);
3236 case ISD::FP_TO_SINT:
3237 case ISD::FP_TO_UINT:
3238 return LowerFP_TO_INT(Op, DAG);
3239 case ISD::FP_ROUND:
3240 return LowerFP_ROUND(Op, DAG);
3241 case ISD::FP_EXTEND:
3242 return LowerFP_EXTEND(Op, DAG);
3243 case ISD::BR_JT:
3244 return LowerBR_JT(Op, DAG);
3245 case ISD::VAARG:
3246 return LowerVAARG(Op, DAG);
3247 case ISD::VASTART:
3248 return LowerVASTART(Op, DAG);
3249 case ISD::FSHL:
3250 case ISD::FSHR:
3251 return lowerFSH(Op, DAG);
3252 case ISD::ROTL:
3253 case ISD::ROTR:
3254 return lowerROT(Op, DAG);
3255 case ISD::ABS:
3256 case ISD::SMIN:
3257 case ISD::SMAX:
3258 case ISD::UMIN:
3259 case ISD::UMAX:
3260 case ISD::ADD:
3261 case ISD::SUB:
3262 case ISD::MUL:
3263 case ISD::SHL:
3264 case ISD::SREM:
3265 case ISD::UREM:
3266 return LowerVectorArith(Op, DAG);
3267 case ISD::DYNAMIC_STACKALLOC:
3268 return LowerDYNAMIC_STACKALLOC(Op, DAG);
3269 case ISD::STACKRESTORE:
3270 return LowerSTACKRESTORE(Op, DAG);
3271 case ISD::STACKSAVE:
3272 return LowerSTACKSAVE(Op, DAG);
3273 case ISD::CopyToReg:
3274 return LowerCopyToReg_128(Op, DAG);
3275 case ISD::FADD:
3276 case ISD::FSUB:
3277 case ISD::FMUL:
3278 // Used only for bf16 on SM80, where we select fma for non-ftz operation
3279 return PromoteBinOpIfF32FTZ(Op, DAG);
3280 case ISD::CTPOP:
3281 case ISD::CTLZ:
3282 return lowerCTLZCTPOP(Op, DAG);
3283 case ISD::FREM:
3284 return lowerFREM(Op, DAG);
3285
3286 default:
3287 llvm_unreachable("Custom lowering not defined for operation");
3288 }
3289}
3290
3291SDValue NVPTXTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
3292 SDLoc DL(Op);
3293 SDValue Chain = Op.getOperand(0);
3294 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
3295 SDValue Index = Op.getOperand(2);
3296
3297 unsigned JId = JT->getIndex();
3299 ArrayRef<MachineBasicBlock *> MBBs = MJTI->getJumpTables()[JId].MBBs;
3300
3301 SDValue IdV = DAG.getConstant(JId, DL, MVT::i32);
3302
3303 // Generate BrxStart node
3304 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
3305 Chain = DAG.getNode(NVPTXISD::BrxStart, DL, VTs, Chain, IdV);
3306
3307 // Generate BrxItem nodes
3308 assert(!MBBs.empty());
3309 for (MachineBasicBlock *MBB : MBBs.drop_back())
3310 Chain = DAG.getNode(NVPTXISD::BrxItem, DL, VTs, Chain.getValue(0),
3311 DAG.getBasicBlock(MBB), Chain.getValue(1));
3312
3313 // Generate BrxEnd nodes
3314 SDValue EndOps[] = {Chain.getValue(0), DAG.getBasicBlock(MBBs.back()), Index,
3315 IdV, Chain.getValue(1)};
3316 SDValue BrxEnd = DAG.getNode(NVPTXISD::BrxEnd, DL, VTs, EndOps);
3317
3318 return BrxEnd;
3319}
3320
3321// This will prevent AsmPrinter from trying to print the jump tables itself.
3325
3326SDValue NVPTXTargetLowering::LowerADDRSPACECAST(SDValue Op,
3327 SelectionDAG &DAG) const {
3329 unsigned SrcAS = N->getSrcAddressSpace();
3330 unsigned DestAS = N->getDestAddressSpace();
3331 if (SrcAS != llvm::ADDRESS_SPACE_GENERIC &&
3332 DestAS != llvm::ADDRESS_SPACE_GENERIC) {
3333 // Shared and SharedCluster can be converted to each other through generic
3334 // space
3335 if ((SrcAS == llvm::ADDRESS_SPACE_SHARED &&
3338 DestAS == llvm::ADDRESS_SPACE_SHARED)) {
3339 SDLoc DL(Op.getNode());
3340 const MVT GenerictVT =
3342 SDValue GenericConversion = DAG.getAddrSpaceCast(
3343 DL, GenerictVT, Op.getOperand(0), SrcAS, ADDRESS_SPACE_GENERIC);
3344 SDValue SharedClusterConversion =
3345 DAG.getAddrSpaceCast(DL, Op.getValueType(), GenericConversion,
3346 ADDRESS_SPACE_GENERIC, DestAS);
3347 return SharedClusterConversion;
3348 }
3349
3350 return DAG.getUNDEF(Op.getValueType());
3351 }
3352
3353 return Op;
3354}
3355
3356// This function is almost a copy of SelectionDAG::expandVAArg().
3357// The only diff is that this one produces loads from local address space.
3358SDValue NVPTXTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3359 const TargetLowering *TLI = STI.getTargetLowering();
3360 SDLoc DL(Op);
3361
3362 SDNode *Node = Op.getNode();
3363 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3364 EVT VT = Node->getValueType(0);
3365 auto *Ty = VT.getTypeForEVT(*DAG.getContext());
3366 SDValue Tmp1 = Node->getOperand(0);
3367 SDValue Tmp2 = Node->getOperand(1);
3368 const MaybeAlign MA(Node->getConstantOperandVal(3));
3369
3370 SDValue VAListLoad = DAG.getLoad(TLI->getPointerTy(DAG.getDataLayout()), DL,
3371 Tmp1, Tmp2, MachinePointerInfo(V));
3372 SDValue VAList = VAListLoad;
3373
3374 if (MA && *MA > TLI->getMinStackArgumentAlignment()) {
3375 VAList = DAG.getNode(
3376 ISD::ADD, DL, VAList.getValueType(), VAList,
3377 DAG.getConstant(MA->value() - 1, DL, VAList.getValueType()));
3378
3379 VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList,
3380 DAG.getSignedConstant(-(int64_t)MA->value(), DL,
3381 VAList.getValueType()));
3382 }
3383
3384 // Increment the pointer, VAList, to the next vaarg
3385 Tmp1 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
3387 DL, VAList.getValueType()));
3388
3389 // Store the incremented VAList to the legalized pointer
3390 Tmp1 = DAG.getStore(VAListLoad.getValue(1), DL, Tmp1, Tmp2,
3391 MachinePointerInfo(V));
3392
3393 const Value *SrcV = Constant::getNullValue(
3395
3396 // Load the actual argument out of the pointer VAList
3397 return DAG.getLoad(VT, DL, Tmp1, VAList, MachinePointerInfo(SrcV));
3398}
3399
3400SDValue NVPTXTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3401 const TargetLowering *TLI = STI.getTargetLowering();
3402 SDLoc DL(Op);
3403 EVT PtrVT = TLI->getPointerTy(DAG.getDataLayout());
3404
3405 // Store the address of unsized array <function>_vararg[] in the ap object.
3406 SDValue VAReg = getParamSymbol(DAG, /* vararg */ -1, PtrVT);
3407
3408 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3409 return DAG.getStore(Op.getOperand(0), DL, VAReg, Op.getOperand(1),
3410 MachinePointerInfo(SV));
3411}
3412
3413/// replaceLoadVector - Convert vector loads into multi-output scalar loads.
3414static std::optional<std::pair<SDValue, SDValue>>
3417 const EVT ResVT = LD->getValueType(0);
3418 const EVT MemVT = LD->getMemoryVT();
3419
3420 // If we're doing sign/zero extension as part of the load, avoid lowering to
3421 // a LoadV node. TODO: consider relaxing this restriction.
3422 if (ResVT != MemVT)
3423 return std::nullopt;
3424
3425 const auto NumEltsAndEltVT =
3426 getVectorLoweringShape(ResVT, STI, LD->getAddressSpace());
3427 if (!NumEltsAndEltVT)
3428 return std::nullopt;
3429 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3430
3431 Align Alignment = LD->getAlign();
3432 const auto &TD = DAG.getDataLayout();
3433 Align PrefAlign = TD.getPrefTypeAlign(MemVT.getTypeForEVT(*DAG.getContext()));
3434 if (Alignment < PrefAlign) {
3435 // This load is not sufficiently aligned, so bail out and let this vector
3436 // load be scalarized. Note that we may still be able to emit smaller
3437 // vector loads. For example, if we are loading a <4 x float> with an
3438 // alignment of 8, this check will fail but the legalizer will try again
3439 // with 2 x <2 x float>, which will succeed with an alignment of 8.
3440 return std::nullopt;
3441 }
3442
3443 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
3444 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
3445 // loaded type to i16 and propagate the "real" type as the memory type.
3446 const MVT LoadEltVT = (EltVT.getSizeInBits() < 16) ? MVT::i16 : EltVT;
3447
3448 unsigned Opcode;
3449 switch (NumElts) {
3450 default:
3451 return std::nullopt;
3452 case 2:
3453 Opcode = NVPTXISD::LoadV2;
3454 break;
3455 case 4:
3456 Opcode = NVPTXISD::LoadV4;
3457 break;
3458 case 8:
3459 Opcode = NVPTXISD::LoadV8;
3460 break;
3461 }
3462 auto ListVTs = SmallVector<EVT, 9>(NumElts, LoadEltVT);
3463 ListVTs.push_back(MVT::Other);
3464 SDVTList LdResVTs = DAG.getVTList(ListVTs);
3465
3466 SDLoc DL(LD);
3467
3468 // Copy regular operands
3469 SmallVector<SDValue, 8> OtherOps(LD->ops());
3470
3471 // The select routine does not have access to the LoadSDNode instance, so
3472 // pass along the extension information
3473 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType(), DL));
3474
3475 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps, MemVT,
3476 LD->getMemOperand());
3477
3478 SmallVector<SDValue> ScalarRes;
3479 if (EltVT.isVector()) {
3481 assert(NumElts * EltVT.getVectorNumElements() ==
3482 ResVT.getVectorNumElements());
3483 // Generate EXTRACT_VECTOR_ELTs to split v2[i,f,bf]16/v4i8 subvectors back
3484 // into individual elements.
3485 for (const unsigned I : llvm::seq(NumElts)) {
3486 SDValue SubVector = NewLD.getValue(I);
3487 DAG.ExtractVectorElements(SubVector, ScalarRes);
3488 }
3489 } else {
3490 for (const unsigned I : llvm::seq(NumElts)) {
3491 SDValue Res = NewLD.getValue(I);
3492 if (LoadEltVT != EltVT)
3493 Res = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res);
3494 ScalarRes.push_back(Res);
3495 }
3496 }
3497
3498 SDValue LoadChain = NewLD.getValue(NumElts);
3499
3500 const MVT BuildVecVT =
3501 MVT::getVectorVT(EltVT.getScalarType(), ScalarRes.size());
3502 SDValue BuildVec = DAG.getBuildVector(BuildVecVT, DL, ScalarRes);
3503 SDValue LoadValue = DAG.getBitcast(ResVT, BuildVec);
3504
3505 return {{LoadValue, LoadChain}};
3506}
3507
3510 const NVPTXSubtarget &STI) {
3511 if (auto Res = replaceLoadVector(N, DAG, STI))
3512 Results.append({Res->first, Res->second});
3513}
3514
3516 const NVPTXSubtarget &STI) {
3517 if (auto Res = replaceLoadVector(N, DAG, STI))
3518 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(N));
3519 return SDValue();
3520}
3521
3522// v = ld i1* addr
3523// =>
3524// v1 = ld i8* addr (-> i16)
3525// v = trunc i16 to i1
3527 SDLoc dl(LD);
3528 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
3529 assert(LD->getValueType(0) == MVT::i1 && "Custom lowering for i1 load only");
3530 SDValue newLD = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i16, LD->getChain(),
3531 LD->getBasePtr(), LD->getPointerInfo(),
3532 MVT::i8, LD->getAlign(),
3533 LD->getMemOperand()->getFlags());
3534 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
3535 // The legalizer (the caller) is expecting two values from the legalized
3536 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
3537 // in LegalizeDAG.cpp which also uses MergeValues.
3538 return DAG.getMergeValues({result, LD->getChain()}, dl);
3539}
3540
3541SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
3542 LoadSDNode *LD = cast<LoadSDNode>(Op);
3543
3544 if (Op.getValueType() == MVT::i1)
3545 return lowerLOADi1(LD, DAG);
3546
3547 // To improve CodeGen we'll legalize any-extend loads to zext loads. This is
3548 // how they'll be lowered in ISel anyway, and by doing this a little earlier
3549 // we allow for more DAG combine opportunities.
3550 if (LD->getExtensionType() == ISD::EXTLOAD) {
3551 assert(LD->getValueType(0).isInteger() && LD->getMemoryVT().isInteger() &&
3552 "Unexpected fpext-load");
3553 return DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(Op), Op.getValueType(),
3554 LD->getChain(), LD->getBasePtr(), LD->getMemoryVT(),
3555 LD->getMemOperand());
3556 }
3557
3558 llvm_unreachable("Unexpected custom lowering for load");
3559}
3560
3562 const NVPTXSubtarget &STI) {
3563 MemSDNode *N = cast<MemSDNode>(Op.getNode());
3564 SDValue Val = N->getOperand(1);
3565 SDLoc DL(N);
3566 const EVT ValVT = Val.getValueType();
3567 const EVT MemVT = N->getMemoryVT();
3568
3569 // If we're truncating as part of the store, avoid lowering to a StoreV node.
3570 // TODO: consider relaxing this restriction.
3571 if (ValVT != MemVT)
3572 return SDValue();
3573
3574 const auto NumEltsAndEltVT =
3575 getVectorLoweringShape(ValVT, STI, N->getAddressSpace());
3576 if (!NumEltsAndEltVT)
3577 return SDValue();
3578 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3579
3580 const DataLayout &TD = DAG.getDataLayout();
3581
3582 Align Alignment = N->getAlign();
3583 Align PrefAlign = TD.getPrefTypeAlign(ValVT.getTypeForEVT(*DAG.getContext()));
3584 if (Alignment < PrefAlign) {
3585 // This store is not sufficiently aligned, so bail out and let this vector
3586 // store be scalarized. Note that we may still be able to emit smaller
3587 // vector stores. For example, if we are storing a <4 x float> with an
3588 // alignment of 8, this check will fail but the legalizer will try again
3589 // with 2 x <2 x float>, which will succeed with an alignment of 8.
3590 return SDValue();
3591 }
3592
3593 unsigned Opcode;
3594 switch (NumElts) {
3595 default:
3596 return SDValue();
3597 case 2:
3598 Opcode = NVPTXISD::StoreV2;
3599 break;
3600 case 4:
3601 Opcode = NVPTXISD::StoreV4;
3602 break;
3603 case 8:
3604 Opcode = NVPTXISD::StoreV8;
3605 break;
3606 }
3607
3609
3610 // First is the chain
3611 Ops.push_back(N->getOperand(0));
3612
3613 // Then the split values
3614 if (EltVT.isVector()) {
3616 assert(NumElts * EltVT.getVectorNumElements() ==
3617 ValVT.getVectorNumElements());
3618 // Combine individual elements into v2[i,f,bf]16/v4i8 subvectors to be
3619 // stored as b32s
3620 const unsigned NumEltsPerSubVector = EltVT.getVectorNumElements();
3621 for (const unsigned I : llvm::seq(NumElts)) {
3622 SmallVector<SDValue, 4> SubVectorElts;
3623 DAG.ExtractVectorElements(Val, SubVectorElts, I * NumEltsPerSubVector,
3624 NumEltsPerSubVector);
3625 Ops.push_back(DAG.getBuildVector(EltVT, DL, SubVectorElts));
3626 }
3627 } else {
3628 SDValue V = DAG.getBitcast(MVT::getVectorVT(EltVT, NumElts), Val);
3629 for (const unsigned I : llvm::seq(NumElts)) {
3630 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, V,
3631 DAG.getIntPtrConstant(I, DL));
3632
3633 // Since StoreV2 is a target node, we cannot rely on DAG type
3634 // legalization. Therefore, we must ensure the type is legal. For i1 and
3635 // i8, we set the stored type to i16 and propagate the "real" type as the
3636 // memory type.
3637 if (EltVT.getSizeInBits() < 16)
3638 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
3639 Ops.push_back(ExtVal);
3640 }
3641 }
3642
3643 // Then any remaining arguments
3644 Ops.append(N->op_begin() + 2, N->op_end());
3645
3646 SDValue NewSt =
3647 DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
3648 N->getMemoryVT(), N->getMemOperand());
3649
3650 // return DCI.CombineTo(N, NewSt, true);
3651 return NewSt;
3652}
3653
3654SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
3655 StoreSDNode *Store = cast<StoreSDNode>(Op);
3656 EVT VT = Store->getMemoryVT();
3657
3658 if (VT == MVT::i1)
3659 return LowerSTOREi1(Op, DAG);
3660
3661 // Lower store of any other vector type, including v2f32 as we want to break
3662 // it apart since this is not a widely-supported type.
3663 return lowerSTOREVector(Op, DAG, STI);
3664}
3665
3666// st i1 v, addr
3667// =>
3668// v1 = zxt v to i16
3669// st.u8 i16, addr
3670SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
3671 SDNode *Node = Op.getNode();
3672 SDLoc dl(Node);
3673 StoreSDNode *ST = cast<StoreSDNode>(Node);
3674 SDValue Tmp1 = ST->getChain();
3675 SDValue Tmp2 = ST->getBasePtr();
3676 SDValue Tmp3 = ST->getValue();
3677 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
3678 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
3679 SDValue Result =
3680 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
3681 ST->getAlign(), ST->getMemOperand()->getFlags());
3682 return Result;
3683}
3684
3685SDValue NVPTXTargetLowering::LowerCopyToReg_128(SDValue Op,
3686 SelectionDAG &DAG) const {
3687 // Change the CopyToReg to take in two 64-bit operands instead of a 128-bit
3688 // operand so that it can pass the legalization.
3689
3690 assert(Op.getOperand(1).getValueType() == MVT::i128 &&
3691 "Custom lowering for 128-bit CopyToReg only");
3692
3693 SDNode *Node = Op.getNode();
3694 SDLoc DL(Node);
3695
3696 SDValue Cast = DAG.getBitcast(MVT::v2i64, Op->getOperand(2));
3697 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
3698 DAG.getIntPtrConstant(0, DL));
3699 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
3700 DAG.getIntPtrConstant(1, DL));
3701
3703 SmallVector<EVT, 3> ResultsType(Node->values());
3704
3705 NewOps[0] = Op->getOperand(0); // Chain
3706 NewOps[1] = Op->getOperand(1); // Dst Reg
3707 NewOps[2] = Lo; // Lower 64-bit
3708 NewOps[3] = Hi; // Higher 64-bit
3709 if (Op.getNumOperands() == 4)
3710 NewOps[4] = Op->getOperand(3); // Glue if exists
3711
3712 return DAG.getNode(ISD::CopyToReg, DL, ResultsType, NewOps);
3713}
3714
3715unsigned NVPTXTargetLowering::getNumRegisters(
3716 LLVMContext &Context, EVT VT,
3717 std::optional<MVT> RegisterVT = std::nullopt) const {
3718 if (VT == MVT::i128 && RegisterVT == MVT::i128)
3719 return 1;
3720 return TargetLoweringBase::getNumRegisters(Context, VT, RegisterVT);
3721}
3722
3723bool NVPTXTargetLowering::splitValueIntoRegisterParts(
3724 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
3725 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
3726 if (Val.getValueType() == MVT::i128 && NumParts == 1) {
3727 Parts[0] = Val;
3728 return true;
3729 }
3730 return false;
3731}
3732
3733// This creates target external symbol for a function parameter.
3734// Name of the symbol is composed from its index and the function name.
3735// Negative index corresponds to special parameter (unsized array) used for
3736// passing variable arguments.
3737SDValue NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int I,
3738 EVT T) const {
3739 StringRef SavedStr = nvTM->getStrPool().save(
3741 return DAG.getExternalSymbol(SavedStr.data(), T);
3742}
3743
3744SDValue NVPTXTargetLowering::getCallParamSymbol(SelectionDAG &DAG, int I,
3745 EVT T) const {
3746 const StringRef SavedStr = nvTM->getStrPool().save("param" + Twine(I));
3747 return DAG.getExternalSymbol(SavedStr.data(), T);
3748}
3749
3751 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3752 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3753 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3754 const DataLayout &DL = DAG.getDataLayout();
3755 LLVMContext &Ctx = *DAG.getContext();
3756 auto PtrVT = getPointerTy(DAG.getDataLayout());
3757
3758 const Function &F = DAG.getMachineFunction().getFunction();
3759
3760 SDValue Root = DAG.getRoot();
3761 SmallVector<SDValue, 16> OutChains;
3762
3763 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
3764 // Ins.size() will be larger
3765 // * if there is an aggregate argument with multiple fields (each field
3766 // showing up separately in Ins)
3767 // * if there is a vector argument with more than typical vector-length
3768 // elements (generally if more than 4) where each vector element is
3769 // individually present in Ins.
3770 // So a different index should be used for indexing into Ins.
3771 // See similar issue in LowerCall.
3772
3773 auto AllIns = ArrayRef(Ins);
3774 for (const auto &Arg : F.args()) {
3775 const auto ArgIns = AllIns.take_while(
3776 [&](auto I) { return I.OrigArgIndex == Arg.getArgNo(); });
3777 AllIns = AllIns.drop_front(ArgIns.size());
3778
3779 Type *Ty = Arg.getType();
3780
3781 if (ArgIns.empty())
3782 report_fatal_error("Empty parameter types are not supported");
3783
3784 if (Arg.use_empty()) {
3785 // argument is dead
3786 for (const auto &In : ArgIns) {
3787 assert(!In.Used && "Arg.use_empty() is true but Arg is used?");
3788 InVals.push_back(DAG.getUNDEF(In.VT));
3789 }
3790 continue;
3791 }
3792
3793 SDValue ArgSymbol = getParamSymbol(DAG, Arg.getArgNo(), PtrVT);
3794
3795 // In the following cases, assign a node order of "i+1"
3796 // to newly created nodes. The SDNodes for params have to
3797 // appear in the same order as their order of appearance
3798 // in the original function. "i+1" holds that order.
3799 if (Arg.hasByValAttr()) {
3800 // Param has ByVal attribute
3801 // Return MoveParam(param symbol).
3802 // Ideally, the param symbol can be returned directly,
3803 // but when SDNode builder decides to use it in a CopyToReg(),
3804 // machine instruction fails because TargetExternalSymbol
3805 // (not lowered) is target dependent, and CopyToReg assumes
3806 // the source is lowered.
3807 assert(ArgIns.size() == 1 && "ByVal argument must be a pointer");
3808 const auto &ByvalIn = ArgIns[0];
3809 assert(getValueType(DL, Ty) == ByvalIn.VT &&
3810 "Ins type did not match function type");
3811 assert(ByvalIn.VT == PtrVT && "ByVal argument must be a pointer");
3812
3813 SDValue P;
3814 if (isKernelFunction(F)) {
3815 P = ArgSymbol;
3816 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3817 } else {
3818 P = DAG.getNode(NVPTXISD::MoveParam, dl, ByvalIn.VT, ArgSymbol);
3819 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3820 P = DAG.getAddrSpaceCast(dl, ByvalIn.VT, P, ADDRESS_SPACE_LOCAL,
3822 }
3823 InVals.push_back(P);
3824 } else {
3827 ComputePTXValueVTs(*this, DL, Ctx, CallConv, Ty, VTs, Offsets);
3828 assert(VTs.size() == ArgIns.size() && "Size mismatch");
3829 assert(VTs.size() == Offsets.size() && "Size mismatch");
3830
3831 const Align ArgAlign = getFunctionArgumentAlignment(
3832 &F, Ty, Arg.getArgNo() + AttributeList::FirstArgIndex, DL);
3833
3834 unsigned I = 0;
3835 const auto VI = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
3836 for (const unsigned NumElts : VI) {
3837 // i1 is loaded/stored as i8
3838 const EVT LoadVT = VTs[I] == MVT::i1 ? MVT::i8 : VTs[I];
3839 const EVT VecVT = getVectorizedVT(LoadVT, NumElts, Ctx);
3840
3841 SDValue VecAddr = DAG.getObjectPtrOffset(
3842 dl, ArgSymbol, TypeSize::getFixed(Offsets[I]));
3843
3844 const Align PartAlign = commonAlignment(ArgAlign, Offsets[I]);
3845 SDValue P =
3846 DAG.getLoad(VecVT, dl, Root, VecAddr,
3850 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3851 for (const unsigned J : llvm::seq(NumElts)) {
3852 SDValue Elt = getExtractVectorizedValue(P, J, LoadVT, dl, DAG);
3853
3854 Elt = correctParamType(Elt, ArgIns[I + J].VT, ArgIns[I + J].Flags,
3855 DAG, dl);
3856 InVals.push_back(Elt);
3857 }
3858 I += NumElts;
3859 }
3860 }
3861 }
3862
3863 if (!OutChains.empty())
3864 DAG.setRoot(DAG.getTokenFactor(dl, OutChains));
3865
3866 return Chain;
3867}
3868
3869SDValue
3871 bool isVarArg,
3873 const SmallVectorImpl<SDValue> &OutVals,
3874 const SDLoc &dl, SelectionDAG &DAG) const {
3875 const Function &F = DAG.getMachineFunction().getFunction();
3876 Type *RetTy = F.getReturnType();
3877
3878 if (RetTy->isVoidTy()) {
3879 assert(OutVals.empty() && Outs.empty() && "Return value expected for void");
3880 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
3881 }
3882
3883 const DataLayout &DL = DAG.getDataLayout();
3884 LLVMContext &Ctx = *DAG.getContext();
3885
3886 const SDValue RetSymbol = DAG.getExternalSymbol("func_retval0", MVT::i32);
3887 const auto RetAlign = getFunctionParamOptimizedAlign(&F, RetTy, DL);
3888
3889 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
3890 // 32-bits are sign extended or zero extended, depending on whether
3891 // they are signed or unsigned types.
3892 const bool ExtendIntegerRetVal =
3893 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
3894
3897 ComputePTXValueVTs(*this, DL, Ctx, CallConv, RetTy, VTs, Offsets);
3898 assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
3899
3900 const auto GetRetVal = [&](unsigned I) -> SDValue {
3901 SDValue RetVal = OutVals[I];
3903 RetVal.getValueType() &&
3904 "OutVal type should always be legal");
3905
3906 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
3907 const EVT StoreVT =
3908 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
3909 return correctParamType(RetVal, StoreVT, Outs[I].Flags, DAG, dl);
3910 };
3911
3912 unsigned I = 0;
3913 const auto VI = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
3914 for (const unsigned NumElts : VI) {
3915 const MaybeAlign CurrentAlign = ExtendIntegerRetVal
3916 ? MaybeAlign(std::nullopt)
3917 : commonAlignment(RetAlign, Offsets[I]);
3918
3920 NumElts, dl, DAG, [&](unsigned K) { return GetRetVal(I + K); });
3921
3922 SDValue Ptr =
3923 DAG.getObjectPtrOffset(dl, RetSymbol, TypeSize::getFixed(Offsets[I]));
3924
3925 Chain = DAG.getStore(Chain, dl, Val, Ptr,
3927
3928 I += NumElts;
3929 }
3930
3931 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
3932}
3933
3935 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
3936 SelectionDAG &DAG) const {
3937 if (Constraint.size() > 1)
3938 return;
3940}
3941
3942// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
3943// TgtMemIntrinsic
3944// because we need the information that is only available in the "Value" type
3945// of destination
3946// pointer. In particular, the address space information.
3948 IntrinsicInfo &Info, const CallInst &I,
3949 MachineFunction &MF, unsigned Intrinsic) const {
3950 switch (Intrinsic) {
3951 default:
3952 return false;
3953 case Intrinsic::nvvm_match_all_sync_i32p:
3954 case Intrinsic::nvvm_match_all_sync_i64p:
3955 Info.opc = ISD::INTRINSIC_W_CHAIN;
3956 // memVT is bogus. These intrinsics have IntrInaccessibleMemOnly attribute
3957 // in order to model data exchange with other threads, but perform no real
3958 // memory accesses.
3959 Info.memVT = MVT::i1;
3960
3961 // Our result depends on both our and other thread's arguments.
3963 return true;
3964 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
3965 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
3966 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
3967 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
3968 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
3969 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
3970 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
3971 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
3972 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
3973 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
3974 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
3975 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
3976 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
3977 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
3978 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
3979 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
3980 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
3981 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
3982 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
3983 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
3984 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
3985 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
3986 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
3987 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
3988 Info.opc = ISD::INTRINSIC_W_CHAIN;
3989 Info.memVT = MVT::v8f16;
3990 Info.ptrVal = I.getArgOperand(0);
3991 Info.offset = 0;
3992 Info.flags = MachineMemOperand::MOLoad;
3993 Info.align = Align(16);
3994 return true;
3995 }
3996 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
3997 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
3998 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
3999 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
4000 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
4001 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
4002 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
4003 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
4004 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:
4005 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:
4006 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:
4007 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:
4008 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
4009 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
4010 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
4011 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
4012 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
4013 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
4014 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
4015 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:
4016 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:
4017 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:
4018 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:
4019 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {
4020 Info.opc = ISD::INTRINSIC_W_CHAIN;
4021 Info.memVT = MVT::v2i32;
4022 Info.ptrVal = I.getArgOperand(0);
4023 Info.offset = 0;
4024 Info.flags = MachineMemOperand::MOLoad;
4025 Info.align = Align(8);
4026 return true;
4027 }
4028
4029 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
4030 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
4031 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
4032 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
4033 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
4034 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
4035 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
4036 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
4037 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:
4038 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:
4039 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:
4040 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:
4041 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:
4042 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:
4043 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:
4044 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:
4045
4046 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
4047 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
4048 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
4049 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
4050 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
4051 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
4052 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
4053 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:
4054 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:
4055 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:
4056 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:
4057 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:
4058 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:
4059 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:
4060 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:
4061 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:
4062 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:
4063 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16:
4064 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8:
4065 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b4x16_p64:
4066 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b6x16_p32:
4067 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b4x16_p64:
4068 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b6x16_p32: {
4069 Info.opc = ISD::INTRINSIC_W_CHAIN;
4070 Info.memVT = MVT::v4i32;
4071 Info.ptrVal = I.getArgOperand(0);
4072 Info.offset = 0;
4073 Info.flags = MachineMemOperand::MOLoad;
4074 Info.align = Align(16);
4075 return true;
4076 }
4077
4078 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
4079 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
4080 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
4081 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
4082 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
4083 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
4084 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
4085 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
4086
4087 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
4088 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
4089 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
4090 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
4091 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
4092 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
4093 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
4094 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
4095 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
4096 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
4097 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
4098 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
4099 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
4100 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
4101 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
4102 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
4103 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
4104 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
4105 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
4106 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:
4107 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:
4108 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16:
4109 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b4x16_p64:
4110 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b6x16_p32: {
4111 Info.opc = ISD::INTRINSIC_W_CHAIN;
4112 Info.memVT = MVT::i32;
4113 Info.ptrVal = I.getArgOperand(0);
4114 Info.offset = 0;
4115 Info.flags = MachineMemOperand::MOLoad;
4116 Info.align = Align(4);
4117 return true;
4118 }
4119
4120 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
4121 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
4122 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
4123 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
4124 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
4125 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
4126 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
4127 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
4128 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
4129 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
4130 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
4131 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
4132 Info.opc = ISD::INTRINSIC_W_CHAIN;
4133 Info.memVT = MVT::v4f16;
4134 Info.ptrVal = I.getArgOperand(0);
4135 Info.offset = 0;
4136 Info.flags = MachineMemOperand::MOLoad;
4137 Info.align = Align(16);
4138 return true;
4139 }
4140
4141 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
4142 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
4143 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
4144 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
4145 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
4146 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
4147 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
4148 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
4149 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
4150 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
4151 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
4152 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:
4153 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:
4154 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:
4155 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:
4156 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {
4157 Info.opc = ISD::INTRINSIC_W_CHAIN;
4158 Info.memVT = MVT::v8f32;
4159 Info.ptrVal = I.getArgOperand(0);
4160 Info.offset = 0;
4161 Info.flags = MachineMemOperand::MOLoad;
4162 Info.align = Align(16);
4163 return true;
4164 }
4165
4166 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:
4167 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:
4168 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:
4169 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:
4170
4171 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:
4172 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:
4173 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:
4174 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:
4175
4176 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
4177 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
4178 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
4179 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
4180 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
4181 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
4182 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
4183 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
4184 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
4185 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
4186 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
4187 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
4188 Info.opc = ISD::INTRINSIC_W_CHAIN;
4189 Info.memVT = MVT::v8i32;
4190 Info.ptrVal = I.getArgOperand(0);
4191 Info.offset = 0;
4192 Info.flags = MachineMemOperand::MOLoad;
4193 Info.align = Align(16);
4194 return true;
4195 }
4196
4197 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
4198 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
4199 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
4200 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
4201 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
4202 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
4203 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
4204 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:
4205 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:
4206 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16:
4207 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8:
4208 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b4x16_p64:
4209 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b6x16_p32:
4210 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b4x16_p64:
4211 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b6x16_p32: {
4212 Info.opc = ISD::INTRINSIC_W_CHAIN;
4213 Info.memVT = MVT::v2i32;
4214 Info.ptrVal = I.getArgOperand(0);
4215 Info.offset = 0;
4216 Info.flags = MachineMemOperand::MOLoad;
4217 Info.align = Align(8);
4218 return true;
4219 }
4220
4221 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:
4222 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:
4223 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:
4224 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:
4225
4226 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:
4227 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:
4228 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:
4229 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {
4230 Info.opc = ISD::INTRINSIC_W_CHAIN;
4231 Info.memVT = MVT::f64;
4232 Info.ptrVal = I.getArgOperand(0);
4233 Info.offset = 0;
4234 Info.flags = MachineMemOperand::MOLoad;
4235 Info.align = Align(8);
4236 return true;
4237 }
4238
4239 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:
4240 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:
4241 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:
4242 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {
4243 Info.opc = ISD::INTRINSIC_W_CHAIN;
4244 Info.memVT = MVT::v2f64;
4245 Info.ptrVal = I.getArgOperand(0);
4246 Info.offset = 0;
4247 Info.flags = MachineMemOperand::MOLoad;
4248 Info.align = Align(16);
4249 return true;
4250 }
4251
4252 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
4253 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
4254 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
4255 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
4256 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
4257 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
4258 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
4259 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
4260 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
4261 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
4262 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
4263 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
4264 Info.opc = ISD::INTRINSIC_VOID;
4265 Info.memVT = MVT::v4f16;
4266 Info.ptrVal = I.getArgOperand(0);
4267 Info.offset = 0;
4268 Info.flags = MachineMemOperand::MOStore;
4269 Info.align = Align(16);
4270 return true;
4271 }
4272
4273 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
4274 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
4275 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
4276 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
4277 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
4278 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
4279 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
4280 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
4281 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
4282 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
4283 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
4284 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:
4285 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:
4286 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:
4287 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:
4288 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {
4289 Info.opc = ISD::INTRINSIC_VOID;
4290 Info.memVT = MVT::v8f32;
4291 Info.ptrVal = I.getArgOperand(0);
4292 Info.offset = 0;
4293 Info.flags = MachineMemOperand::MOStore;
4294 Info.align = Align(16);
4295 return true;
4296 }
4297
4298 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
4299 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
4300 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
4301 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
4302 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
4303 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
4304 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
4305 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
4306 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
4307 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
4308 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
4309 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
4310 Info.opc = ISD::INTRINSIC_VOID;
4311 Info.memVT = MVT::v8i32;
4312 Info.ptrVal = I.getArgOperand(0);
4313 Info.offset = 0;
4314 Info.flags = MachineMemOperand::MOStore;
4315 Info.align = Align(16);
4316 return true;
4317 }
4318
4319 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
4320 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
4321 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
4322 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
4323 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
4324 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
4325 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
4326 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride:
4327 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_b16:
4328 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_trans_b16:
4329 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x2_trans_b8: {
4330 Info.opc = ISD::INTRINSIC_VOID;
4331 Info.memVT = MVT::v2i32;
4332 Info.ptrVal = I.getArgOperand(0);
4333 Info.offset = 0;
4334 Info.flags = MachineMemOperand::MOStore;
4335 Info.align = Align(8);
4336 return true;
4337 }
4338
4339 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:
4340 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:
4341 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:
4342 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {
4343 Info.opc = ISD::INTRINSIC_VOID;
4344 Info.memVT = MVT::v2f64;
4345 Info.ptrVal = I.getArgOperand(0);
4346 Info.offset = 0;
4347 Info.flags = MachineMemOperand::MOStore;
4348 Info.align = Align(16);
4349 return true;
4350 }
4351
4352 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_b16:
4353 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_trans_b16:
4354 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x1_trans_b8: {
4355 Info.opc = ISD::INTRINSIC_VOID;
4356 Info.memVT = MVT::i32;
4357 Info.ptrVal = I.getArgOperand(0);
4358 Info.offset = 0;
4359 Info.flags = MachineMemOperand::MOStore;
4360 Info.align = Align(4);
4361 return true;
4362 }
4363
4364 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_b16:
4365 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_trans_b16:
4366 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x4_trans_b8: {
4367 Info.opc = ISD::INTRINSIC_VOID;
4368 Info.memVT = MVT::v4i32;
4369 Info.ptrVal = I.getArgOperand(0);
4370 Info.offset = 0;
4371 Info.flags = MachineMemOperand::MOStore;
4372 Info.align = Align(16);
4373 return true;
4374 }
4375
4376 case Intrinsic::nvvm_atomic_add_gen_f_cta:
4377 case Intrinsic::nvvm_atomic_add_gen_f_sys:
4378 case Intrinsic::nvvm_atomic_add_gen_i_cta:
4379 case Intrinsic::nvvm_atomic_add_gen_i_sys:
4380 case Intrinsic::nvvm_atomic_and_gen_i_cta:
4381 case Intrinsic::nvvm_atomic_and_gen_i_sys:
4382 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
4383 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
4384 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
4385 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
4386 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
4387 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
4388 case Intrinsic::nvvm_atomic_max_gen_i_cta:
4389 case Intrinsic::nvvm_atomic_max_gen_i_sys:
4390 case Intrinsic::nvvm_atomic_min_gen_i_cta:
4391 case Intrinsic::nvvm_atomic_min_gen_i_sys:
4392 case Intrinsic::nvvm_atomic_or_gen_i_cta:
4393 case Intrinsic::nvvm_atomic_or_gen_i_sys:
4394 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
4395 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
4396 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
4397 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
4398 auto &DL = I.getDataLayout();
4399 Info.opc = ISD::INTRINSIC_W_CHAIN;
4400 Info.memVT = getValueType(DL, I.getType());
4401 Info.ptrVal = I.getArgOperand(0);
4402 Info.offset = 0;
4404 Info.align.reset();
4405 return true;
4406 }
4407
4408 case Intrinsic::nvvm_prefetch_tensormap: {
4409 auto &DL = I.getDataLayout();
4410 Info.opc = ISD::INTRINSIC_VOID;
4411 Info.memVT = getPointerTy(DL);
4412 Info.ptrVal = I.getArgOperand(0);
4413 Info.offset = 0;
4414 Info.flags =
4416 Info.align.reset();
4417 return true;
4418 }
4419
4420 case Intrinsic::nvvm_ldu_global_i:
4421 case Intrinsic::nvvm_ldu_global_f:
4422 case Intrinsic::nvvm_ldu_global_p: {
4423 Info.opc = ISD::INTRINSIC_W_CHAIN;
4424 Info.memVT = getValueType(I.getDataLayout(), I.getType());
4425 Info.ptrVal = I.getArgOperand(0);
4426 Info.offset = 0;
4427 Info.flags = MachineMemOperand::MOLoad;
4428 Info.align = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4429
4430 return true;
4431 }
4432 case Intrinsic::nvvm_tex_1d_v4f32_s32:
4433 case Intrinsic::nvvm_tex_1d_v4f32_f32:
4434 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
4435 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
4436 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
4437 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
4438 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
4439 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
4440 case Intrinsic::nvvm_tex_2d_v4f32_s32:
4441 case Intrinsic::nvvm_tex_2d_v4f32_f32:
4442 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
4443 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
4444 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
4445 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
4446 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
4447 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
4448 case Intrinsic::nvvm_tex_3d_v4f32_s32:
4449 case Intrinsic::nvvm_tex_3d_v4f32_f32:
4450 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
4451 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
4452 case Intrinsic::nvvm_tex_cube_v4f32_f32:
4453 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
4454 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
4455 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
4456 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
4457 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
4458 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
4459 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
4460 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
4461 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
4462 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
4463 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
4464 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
4465 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
4466 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
4467 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
4468 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
4469 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
4470 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
4471 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
4472 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
4473 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
4474 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
4475 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
4476 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
4477 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
4478 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
4479 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
4480 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
4481 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
4482 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
4483 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
4484 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
4485 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
4486 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
4487 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
4488 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
4489 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
4490 Info.opc = ISD::INTRINSIC_W_CHAIN;
4491 Info.memVT = MVT::v4f32;
4492 Info.ptrVal = nullptr;
4493 Info.offset = 0;
4494 Info.flags = MachineMemOperand::MOLoad;
4495 Info.align = Align(16);
4496 return true;
4497
4498 case Intrinsic::nvvm_tex_1d_v4s32_s32:
4499 case Intrinsic::nvvm_tex_1d_v4s32_f32:
4500 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
4501 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
4502 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
4503 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
4504 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
4505 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
4506 case Intrinsic::nvvm_tex_2d_v4s32_s32:
4507 case Intrinsic::nvvm_tex_2d_v4s32_f32:
4508 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
4509 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
4510 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
4511 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
4512 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
4513 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
4514 case Intrinsic::nvvm_tex_3d_v4s32_s32:
4515 case Intrinsic::nvvm_tex_3d_v4s32_f32:
4516 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
4517 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
4518 case Intrinsic::nvvm_tex_cube_v4s32_f32:
4519 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
4520 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
4521 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
4522 case Intrinsic::nvvm_tex_cube_v4u32_f32:
4523 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
4524 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
4525 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
4526 case Intrinsic::nvvm_tex_1d_v4u32_s32:
4527 case Intrinsic::nvvm_tex_1d_v4u32_f32:
4528 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
4529 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
4530 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
4531 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
4532 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
4533 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
4534 case Intrinsic::nvvm_tex_2d_v4u32_s32:
4535 case Intrinsic::nvvm_tex_2d_v4u32_f32:
4536 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
4537 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
4538 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
4539 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
4540 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
4541 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
4542 case Intrinsic::nvvm_tex_3d_v4u32_s32:
4543 case Intrinsic::nvvm_tex_3d_v4u32_f32:
4544 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
4545 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
4546 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
4547 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
4548 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
4549 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
4550 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
4551 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
4552 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
4553 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
4554 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
4555 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
4556 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
4557 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
4558 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
4559 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
4560 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
4561 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
4562 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
4563 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
4564 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
4565 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
4566 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
4567 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
4568 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
4569 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
4570 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
4571 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
4572 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
4573 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
4574 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
4575 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
4576 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
4577 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
4578 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
4579 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
4580 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
4581 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
4582 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
4583 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
4584 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
4585 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
4586 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
4587 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
4588 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
4589 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
4590 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
4591 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
4592 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
4593 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
4594 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
4595 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
4596 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
4597 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
4598 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
4599 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
4600 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
4601 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
4602 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
4603 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
4604 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
4605 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
4606 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
4607 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
4608 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
4609 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
4610 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
4611 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
4612 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
4613 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
4614 Info.opc = ISD::INTRINSIC_W_CHAIN;
4615 Info.memVT = MVT::v4i32;
4616 Info.ptrVal = nullptr;
4617 Info.offset = 0;
4618 Info.flags = MachineMemOperand::MOLoad;
4619 Info.align = Align(16);
4620 return true;
4621
4622 case Intrinsic::nvvm_suld_1d_i8_clamp:
4623 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
4624 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
4625 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
4626 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
4627 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
4628 case Intrinsic::nvvm_suld_2d_i8_clamp:
4629 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4630 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4631 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4632 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4633 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4634 case Intrinsic::nvvm_suld_3d_i8_clamp:
4635 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4636 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4637 case Intrinsic::nvvm_suld_1d_i8_trap:
4638 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4639 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4640 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4641 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
4642 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
4643 case Intrinsic::nvvm_suld_2d_i8_trap:
4644 case Intrinsic::nvvm_suld_2d_v2i8_trap:
4645 case Intrinsic::nvvm_suld_2d_v4i8_trap:
4646 case Intrinsic::nvvm_suld_2d_array_i8_trap:
4647 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
4648 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
4649 case Intrinsic::nvvm_suld_3d_i8_trap:
4650 case Intrinsic::nvvm_suld_3d_v2i8_trap:
4651 case Intrinsic::nvvm_suld_3d_v4i8_trap:
4652 case Intrinsic::nvvm_suld_1d_i8_zero:
4653 case Intrinsic::nvvm_suld_1d_v2i8_zero:
4654 case Intrinsic::nvvm_suld_1d_v4i8_zero:
4655 case Intrinsic::nvvm_suld_1d_array_i8_zero:
4656 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
4657 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
4658 case Intrinsic::nvvm_suld_2d_i8_zero:
4659 case Intrinsic::nvvm_suld_2d_v2i8_zero:
4660 case Intrinsic::nvvm_suld_2d_v4i8_zero:
4661 case Intrinsic::nvvm_suld_2d_array_i8_zero:
4662 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
4663 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
4664 case Intrinsic::nvvm_suld_3d_i8_zero:
4665 case Intrinsic::nvvm_suld_3d_v2i8_zero:
4666 case Intrinsic::nvvm_suld_3d_v4i8_zero:
4667 Info.opc = ISD::INTRINSIC_W_CHAIN;
4668 Info.memVT = MVT::i8;
4669 Info.ptrVal = nullptr;
4670 Info.offset = 0;
4671 Info.flags = MachineMemOperand::MOLoad;
4672 Info.align = Align(16);
4673 return true;
4674
4675 case Intrinsic::nvvm_suld_1d_i16_clamp:
4676 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
4677 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
4678 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
4679 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
4680 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
4681 case Intrinsic::nvvm_suld_2d_i16_clamp:
4682 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
4683 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
4684 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
4685 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
4686 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
4687 case Intrinsic::nvvm_suld_3d_i16_clamp:
4688 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
4689 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
4690 case Intrinsic::nvvm_suld_1d_i16_trap:
4691 case Intrinsic::nvvm_suld_1d_v2i16_trap:
4692 case Intrinsic::nvvm_suld_1d_v4i16_trap:
4693 case Intrinsic::nvvm_suld_1d_array_i16_trap:
4694 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
4695 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
4696 case Intrinsic::nvvm_suld_2d_i16_trap:
4697 case Intrinsic::nvvm_suld_2d_v2i16_trap:
4698 case Intrinsic::nvvm_suld_2d_v4i16_trap:
4699 case Intrinsic::nvvm_suld_2d_array_i16_trap:
4700 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
4701 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
4702 case Intrinsic::nvvm_suld_3d_i16_trap:
4703 case Intrinsic::nvvm_suld_3d_v2i16_trap:
4704 case Intrinsic::nvvm_suld_3d_v4i16_trap:
4705 case Intrinsic::nvvm_suld_1d_i16_zero:
4706 case Intrinsic::nvvm_suld_1d_v2i16_zero:
4707 case Intrinsic::nvvm_suld_1d_v4i16_zero:
4708 case Intrinsic::nvvm_suld_1d_array_i16_zero:
4709 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
4710 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
4711 case Intrinsic::nvvm_suld_2d_i16_zero:
4712 case Intrinsic::nvvm_suld_2d_v2i16_zero:
4713 case Intrinsic::nvvm_suld_2d_v4i16_zero:
4714 case Intrinsic::nvvm_suld_2d_array_i16_zero:
4715 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
4716 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
4717 case Intrinsic::nvvm_suld_3d_i16_zero:
4718 case Intrinsic::nvvm_suld_3d_v2i16_zero:
4719 case Intrinsic::nvvm_suld_3d_v4i16_zero:
4720 Info.opc = ISD::INTRINSIC_W_CHAIN;
4721 Info.memVT = MVT::i16;
4722 Info.ptrVal = nullptr;
4723 Info.offset = 0;
4724 Info.flags = MachineMemOperand::MOLoad;
4725 Info.align = Align(16);
4726 return true;
4727
4728 case Intrinsic::nvvm_suld_1d_i32_clamp:
4729 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
4730 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
4731 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
4732 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
4733 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
4734 case Intrinsic::nvvm_suld_2d_i32_clamp:
4735 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
4736 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
4737 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
4738 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
4739 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
4740 case Intrinsic::nvvm_suld_3d_i32_clamp:
4741 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
4742 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
4743 case Intrinsic::nvvm_suld_1d_i32_trap:
4744 case Intrinsic::nvvm_suld_1d_v2i32_trap:
4745 case Intrinsic::nvvm_suld_1d_v4i32_trap:
4746 case Intrinsic::nvvm_suld_1d_array_i32_trap:
4747 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
4748 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
4749 case Intrinsic::nvvm_suld_2d_i32_trap:
4750 case Intrinsic::nvvm_suld_2d_v2i32_trap:
4751 case Intrinsic::nvvm_suld_2d_v4i32_trap:
4752 case Intrinsic::nvvm_suld_2d_array_i32_trap:
4753 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
4754 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
4755 case Intrinsic::nvvm_suld_3d_i32_trap:
4756 case Intrinsic::nvvm_suld_3d_v2i32_trap:
4757 case Intrinsic::nvvm_suld_3d_v4i32_trap:
4758 case Intrinsic::nvvm_suld_1d_i32_zero:
4759 case Intrinsic::nvvm_suld_1d_v2i32_zero:
4760 case Intrinsic::nvvm_suld_1d_v4i32_zero:
4761 case Intrinsic::nvvm_suld_1d_array_i32_zero:
4762 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
4763 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
4764 case Intrinsic::nvvm_suld_2d_i32_zero:
4765 case Intrinsic::nvvm_suld_2d_v2i32_zero:
4766 case Intrinsic::nvvm_suld_2d_v4i32_zero:
4767 case Intrinsic::nvvm_suld_2d_array_i32_zero:
4768 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
4769 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
4770 case Intrinsic::nvvm_suld_3d_i32_zero:
4771 case Intrinsic::nvvm_suld_3d_v2i32_zero:
4772 case Intrinsic::nvvm_suld_3d_v4i32_zero:
4773 Info.opc = ISD::INTRINSIC_W_CHAIN;
4774 Info.memVT = MVT::i32;
4775 Info.ptrVal = nullptr;
4776 Info.offset = 0;
4777 Info.flags = MachineMemOperand::MOLoad;
4778 Info.align = Align(16);
4779 return true;
4780
4781 case Intrinsic::nvvm_suld_1d_i64_clamp:
4782 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
4783 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
4784 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
4785 case Intrinsic::nvvm_suld_2d_i64_clamp:
4786 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
4787 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
4788 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
4789 case Intrinsic::nvvm_suld_3d_i64_clamp:
4790 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
4791 case Intrinsic::nvvm_suld_1d_i64_trap:
4792 case Intrinsic::nvvm_suld_1d_v2i64_trap:
4793 case Intrinsic::nvvm_suld_1d_array_i64_trap:
4794 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
4795 case Intrinsic::nvvm_suld_2d_i64_trap:
4796 case Intrinsic::nvvm_suld_2d_v2i64_trap:
4797 case Intrinsic::nvvm_suld_2d_array_i64_trap:
4798 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
4799 case Intrinsic::nvvm_suld_3d_i64_trap:
4800 case Intrinsic::nvvm_suld_3d_v2i64_trap:
4801 case Intrinsic::nvvm_suld_1d_i64_zero:
4802 case Intrinsic::nvvm_suld_1d_v2i64_zero:
4803 case Intrinsic::nvvm_suld_1d_array_i64_zero:
4804 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
4805 case Intrinsic::nvvm_suld_2d_i64_zero:
4806 case Intrinsic::nvvm_suld_2d_v2i64_zero:
4807 case Intrinsic::nvvm_suld_2d_array_i64_zero:
4808 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
4809 case Intrinsic::nvvm_suld_3d_i64_zero:
4810 case Intrinsic::nvvm_suld_3d_v2i64_zero:
4811 Info.opc = ISD::INTRINSIC_W_CHAIN;
4812 Info.memVT = MVT::i64;
4813 Info.ptrVal = nullptr;
4814 Info.offset = 0;
4815 Info.flags = MachineMemOperand::MOLoad;
4816 Info.align = Align(16);
4817 return true;
4818
4819 case Intrinsic::nvvm_tcgen05_ld_16x64b_x1:
4820 case Intrinsic::nvvm_tcgen05_ld_32x32b_x1:
4821 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x1: {
4822 Info.opc = ISD::INTRINSIC_W_CHAIN;
4823 Info.memVT = MVT::v1i32;
4824 Info.ptrVal = I.getArgOperand(0);
4825 Info.offset = 0;
4826 Info.flags = MachineMemOperand::MOLoad;
4827 Info.align.reset();
4828 return true;
4829 }
4830
4831 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
4832 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
4833 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
4834 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2: {
4835 Info.opc = ISD::INTRINSIC_W_CHAIN;
4836 Info.memVT = MVT::v2i32;
4837 Info.ptrVal = I.getArgOperand(0);
4838 Info.offset = 0;
4839 Info.flags = MachineMemOperand::MOLoad;
4840 Info.align.reset();
4841 return true;
4842 }
4843
4844 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
4845 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
4846 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
4847 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
4848 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4: {
4849 Info.opc = ISD::INTRINSIC_W_CHAIN;
4850 Info.memVT = MVT::v4i32;
4851 Info.ptrVal = I.getArgOperand(0);
4852 Info.offset = 0;
4853 Info.flags = MachineMemOperand::MOLoad;
4854 Info.align.reset();
4855 return true;
4856 }
4857
4858 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
4859 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
4860 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
4861 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
4862 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8: {
4863 Info.opc = ISD::INTRINSIC_W_CHAIN;
4864 Info.memVT = MVT::v8i32;
4865 Info.ptrVal = I.getArgOperand(0);
4866 Info.offset = 0;
4867 Info.flags = MachineMemOperand::MOLoad;
4868 Info.align.reset();
4869 return true;
4870 }
4871
4872 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
4873 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
4874 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
4875 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
4876 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16: {
4877 Info.opc = ISD::INTRINSIC_W_CHAIN;
4878 Info.memVT = MVT::v16i32;
4879 Info.ptrVal = I.getArgOperand(0);
4880 Info.offset = 0;
4881 Info.flags = MachineMemOperand::MOLoad;
4882 Info.align.reset();
4883 return true;
4884 }
4885
4886 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
4887 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
4888 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
4889 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
4890 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32: {
4891 Info.opc = ISD::INTRINSIC_W_CHAIN;
4892 Info.memVT = MVT::v32i32;
4893 Info.ptrVal = I.getArgOperand(0);
4894 Info.offset = 0;
4895 Info.flags = MachineMemOperand::MOLoad;
4896 Info.align.reset();
4897 return true;
4898 }
4899
4900 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
4901 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
4902 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
4903 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
4904 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64: {
4905 Info.opc = ISD::INTRINSIC_W_CHAIN;
4906 Info.memVT = MVT::v64i32;
4907 Info.ptrVal = I.getArgOperand(0);
4908 Info.offset = 0;
4909 Info.flags = MachineMemOperand::MOLoad;
4910 Info.align.reset();
4911 return true;
4912 }
4913
4914 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
4915 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
4916 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
4917 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
4918 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128: {
4919 Info.opc = ISD::INTRINSIC_W_CHAIN;
4920 Info.memVT = MVT::v128i32;
4921 Info.ptrVal = I.getArgOperand(0);
4922 Info.offset = 0;
4923 Info.flags = MachineMemOperand::MOLoad;
4924 Info.align.reset();
4925 return true;
4926 }
4927
4928 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
4929 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
4930 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1: {
4931 Info.opc = ISD::INTRINSIC_VOID;
4932 Info.memVT = MVT::i32;
4933 Info.ptrVal = I.getArgOperand(0);
4934 Info.offset = 0;
4935 Info.flags = MachineMemOperand::MOStore;
4936 Info.align.reset();
4937 return true;
4938 }
4939
4940 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
4941 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
4942 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
4943 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2: {
4944 Info.opc = ISD::INTRINSIC_VOID;
4945 Info.memVT = MVT::v2i32;
4946 Info.ptrVal = I.getArgOperand(0);
4947 Info.offset = 0;
4948 Info.flags = MachineMemOperand::MOStore;
4949 Info.align.reset();
4950 return true;
4951 }
4952
4953 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
4954 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
4955 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
4956 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
4957 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4: {
4958 Info.opc = ISD::INTRINSIC_VOID;
4959 Info.memVT = MVT::v4i32;
4960 Info.ptrVal = I.getArgOperand(0);
4961 Info.offset = 0;
4962 Info.flags = MachineMemOperand::MOStore;
4963 Info.align.reset();
4964 return true;
4965 }
4966
4967 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
4968 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
4969 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
4970 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
4971 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8: {
4972 Info.opc = ISD::INTRINSIC_VOID;
4973 Info.memVT = MVT::v8i32;
4974 Info.ptrVal = I.getArgOperand(0);
4975 Info.offset = 0;
4976 Info.flags = MachineMemOperand::MOStore;
4977 Info.align.reset();
4978 return true;
4979 }
4980
4981 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
4982 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
4983 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
4984 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
4985 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16: {
4986 Info.opc = ISD::INTRINSIC_VOID;
4987 Info.memVT = MVT::v16i32;
4988 Info.ptrVal = I.getArgOperand(0);
4989 Info.offset = 0;
4990 Info.flags = MachineMemOperand::MOStore;
4991 Info.align.reset();
4992 return true;
4993 }
4994
4995 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
4996 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
4997 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
4998 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
4999 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32: {
5000 Info.opc = ISD::INTRINSIC_VOID;
5001 Info.memVT = MVT::v32i32;
5002 Info.ptrVal = I.getArgOperand(0);
5003 Info.offset = 0;
5004 Info.flags = MachineMemOperand::MOStore;
5005 Info.align.reset();
5006 return true;
5007 }
5008
5009 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
5010 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
5011 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
5012 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
5013 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64: {
5014 Info.opc = ISD::INTRINSIC_VOID;
5015 Info.memVT = MVT::v64i32;
5016 Info.ptrVal = I.getArgOperand(0);
5017 Info.offset = 0;
5018 Info.flags = MachineMemOperand::MOStore;
5019 Info.align.reset();
5020 return true;
5021 }
5022
5023 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
5024 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
5025 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
5026 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
5027 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128: {
5028 Info.opc = ISD::INTRINSIC_VOID;
5029 Info.memVT = MVT::v128i32;
5030 Info.ptrVal = I.getArgOperand(0);
5031 Info.offset = 0;
5032 Info.flags = MachineMemOperand::MOStore;
5033 Info.align.reset();
5034 return true;
5035 }
5036 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
5037 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
5038 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
5039 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
5040 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
5041 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
5042 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
5043 case Intrinsic::
5044 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
5045 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
5046 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
5047 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
5048 case Intrinsic::
5049 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: {
5050 // We are reading and writing back to TMem
5051 Info.opc = ISD::INTRINSIC_VOID;
5052 Info.memVT = MVT::v4i32;
5053 Info.ptrVal = I.getArgOperand(0);
5054 Info.offset = 0;
5056 Info.align = Align(16);
5057 return true;
5058 }
5059
5060 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
5061 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
5062 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
5063 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
5064 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
5065 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
5066 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
5067 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
5068 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
5069 case Intrinsic::
5070 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
5071 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
5072 case Intrinsic::
5073 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: {
5074 // We are reading and writing back to TMem
5075 Info.opc = ISD::INTRINSIC_VOID;
5076 Info.memVT = MVT::v8i32;
5077 Info.ptrVal = I.getArgOperand(0);
5078 Info.offset = 0;
5080 Info.align = Align(16);
5081 return true;
5082 }
5083 }
5084 return false;
5085}
5086
5087/// getFunctionParamOptimizedAlign - since function arguments are passed via
5088/// .param space, we may want to increase their alignment in a way that
5089/// ensures that we can effectively vectorize their loads & stores. We can
5090/// increase alignment only if the function has internal or has private
5091/// linkage as for other linkage types callers may already rely on default
5092/// alignment. To allow using 128-bit vectorized loads/stores, this function
5093/// ensures that alignment is 16 or greater.
5095 const Function *F, Type *ArgTy, const DataLayout &DL) const {
5096 // Capping the alignment to 128 bytes as that is the maximum alignment
5097 // supported by PTX.
5098 const Align ABITypeAlign = std::min(Align(128), DL.getABITypeAlign(ArgTy));
5099
5100 // If a function has linkage different from internal or private, we
5101 // must use default ABI alignment as external users rely on it. Same
5102 // for a function that may be called from a function pointer.
5103 if (!F || !F->hasLocalLinkage() ||
5104 F->hasAddressTaken(/*Users=*/nullptr,
5105 /*IgnoreCallbackUses=*/false,
5106 /*IgnoreAssumeLikeCalls=*/true,
5107 /*IgnoreLLVMUsed=*/true))
5108 return ABITypeAlign;
5109
5110 assert(!isKernelFunction(*F) && "Expect kernels to have non-local linkage");
5111 return std::max(Align(16), ABITypeAlign);
5112}
5113
5114/// Helper for computing alignment of a device function byval parameter.
5116 const Function *F, Type *ArgTy, Align InitialAlign,
5117 const DataLayout &DL) const {
5118 Align ArgAlign = InitialAlign;
5119 // Try to increase alignment to enhance vectorization options.
5120 if (F)
5121 ArgAlign = std::max(ArgAlign, getFunctionParamOptimizedAlign(F, ArgTy, DL));
5122
5123 // Old ptx versions have a bug. When PTX code takes address of
5124 // byval parameter with alignment < 4, ptxas generates code to
5125 // spill argument into memory. Alas on sm_50+ ptxas generates
5126 // SASS code that fails with misaligned access. To work around
5127 // the problem, make sure that we align byval parameters by at
5128 // least 4. This bug seems to be fixed at least starting from
5129 // ptxas > 9.0.
5130 // TODO: remove this after verifying the bug is not reproduced
5131 // on non-deprecated ptxas versions.
5133 ArgAlign = std::max(ArgAlign, Align(4));
5134
5135 return ArgAlign;
5136}
5137
5138// Helper for getting a function parameter name. Name is composed from
5139// its index and the function name. Negative index corresponds to special
5140// parameter (unsized array) used for passing variable arguments.
5142 int Idx) const {
5143 std::string ParamName;
5144 raw_string_ostream ParamStr(ParamName);
5145
5146 ParamStr << getTargetMachine().getSymbol(F)->getName();
5147 if (Idx < 0)
5148 ParamStr << "_vararg";
5149 else
5150 ParamStr << "_param_" << Idx;
5151
5152 return ParamName;
5153}
5154
5155/// isLegalAddressingMode - Return true if the addressing mode represented
5156/// by AM is legal for this target, for a load/store of the specified type.
5157/// Used to guide target specific optimizations, like loop strength reduction
5158/// (LoopStrengthReduce.cpp) and memory optimization for address mode
5159/// (CodeGenPrepare.cpp)
5161 const AddrMode &AM, Type *Ty,
5162 unsigned AS, Instruction *I) const {
5163 // AddrMode - This represents an addressing mode of:
5164 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
5165 //
5166 // The legal address modes are
5167 // - [avar]
5168 // - [areg]
5169 // - [areg+immoff]
5170 // - [immAddr]
5171
5172 // immoff must fit in a signed 32-bit int
5173 if (!APInt(64, AM.BaseOffs).isSignedIntN(32))
5174 return false;
5175
5176 if (AM.BaseGV)
5177 return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
5178
5179 switch (AM.Scale) {
5180 case 0: // "r", "r+i" or "i" is allowed
5181 break;
5182 case 1:
5183 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
5184 return false;
5185 // Otherwise we have r+i.
5186 break;
5187 default:
5188 // No scale > 1 is allowed
5189 return false;
5190 }
5191 return true;
5192}
5193
5194//===----------------------------------------------------------------------===//
5195// NVPTX Inline Assembly Support
5196//===----------------------------------------------------------------------===//
5197
5198/// getConstraintType - Given a constraint letter, return the type of
5199/// constraint it is for this target.
5202 if (Constraint.size() == 1) {
5203 switch (Constraint[0]) {
5204 default:
5205 break;
5206 case 'b':
5207 case 'r':
5208 case 'h':
5209 case 'c':
5210 case 'l':
5211 case 'f':
5212 case 'd':
5213 case 'q':
5214 case '0':
5215 case 'N':
5216 return C_RegisterClass;
5217 }
5218 }
5219 return TargetLowering::getConstraintType(Constraint);
5220}
5221
5222std::pair<unsigned, const TargetRegisterClass *>
5224 StringRef Constraint,
5225 MVT VT) const {
5226 if (Constraint.size() == 1) {
5227 switch (Constraint[0]) {
5228 case 'b':
5229 return std::make_pair(0U, &NVPTX::B1RegClass);
5230 case 'c':
5231 case 'h':
5232 return std::make_pair(0U, &NVPTX::B16RegClass);
5233 case 'r':
5234 case 'f':
5235 return std::make_pair(0U, &NVPTX::B32RegClass);
5236 case 'l':
5237 case 'N':
5238 case 'd':
5239 return std::make_pair(0U, &NVPTX::B64RegClass);
5240 case 'q': {
5241 if (STI.getSmVersion() < 70)
5242 report_fatal_error("Inline asm with 128 bit operands is only "
5243 "supported for sm_70 and higher!");
5244 return std::make_pair(0U, &NVPTX::B128RegClass);
5245 }
5246 }
5247 }
5248 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
5249}
5250
5251//===----------------------------------------------------------------------===//
5252// NVPTX DAG Combining
5253//===----------------------------------------------------------------------===//
5254
5256 CodeGenOptLevel OptLevel) const {
5257 // Always honor command-line argument
5258 if (FMAContractLevelOpt.getNumOccurrences() > 0)
5259 return FMAContractLevelOpt > 0;
5260
5261 // Do not contract if we're not optimizing the code.
5262 if (OptLevel == CodeGenOptLevel::None)
5263 return false;
5264
5265 // Honor TargetOptions flags that explicitly say fusion is okay.
5267 return true;
5268
5269 return false;
5270}
5271
5272static bool isConstZero(const SDValue &Operand) {
5273 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
5274 return Const && Const->getZExtValue() == 0;
5275}
5276
5277/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
5278/// operands N0 and N1. This is a helper for PerformADDCombine that is
5279/// called with the default operands, and if that fails, with commuted
5280/// operands.
5281static SDValue
5284 EVT VT = N0.getValueType();
5285
5286 // Since integer multiply-add costs the same as integer multiply
5287 // but is more costly than integer add, do the fusion only when
5288 // the mul is only used in the add.
5289 // TODO: this may not be true for later architectures, consider relaxing this
5290 if (!N0.getNode()->hasOneUse())
5291 return SDValue();
5292
5293 // fold (add (select cond, 0, (mul a, b)), c)
5294 // -> (select cond, c, (add (mul a, b), c))
5295 //
5296 if (N0.getOpcode() == ISD::SELECT) {
5297 unsigned ZeroOpNum;
5298 if (isConstZero(N0->getOperand(1)))
5299 ZeroOpNum = 1;
5300 else if (isConstZero(N0->getOperand(2)))
5301 ZeroOpNum = 2;
5302 else
5303 return SDValue();
5304
5305 SDValue M = N0->getOperand((ZeroOpNum == 1) ? 2 : 1);
5306 if (M->getOpcode() != ISD::MUL || !M.getNode()->hasOneUse())
5307 return SDValue();
5308
5309 SDLoc DL(N);
5310 SDValue Mul =
5311 DCI.DAG.getNode(ISD::MUL, DL, VT, M->getOperand(0), M->getOperand(1));
5312 SDValue MAD = DCI.DAG.getNode(ISD::ADD, DL, VT, Mul, N1);
5313 return DCI.DAG.getSelect(SDLoc(N), VT, N0->getOperand(0),
5314 ((ZeroOpNum == 1) ? N1 : MAD),
5315 ((ZeroOpNum == 1) ? MAD : N1));
5316 }
5317
5318 return SDValue();
5319}
5320
5321static SDValue
5324 CodeGenOptLevel OptLevel) {
5325 EVT VT = N0.getValueType();
5326 if (N0.getOpcode() == ISD::FMUL) {
5327 const auto *TLI = static_cast<const NVPTXTargetLowering *>(
5328 &DCI.DAG.getTargetLoweringInfo());
5329 if (!(TLI->allowFMA(DCI.DAG.getMachineFunction(), OptLevel) ||
5330 (N->getFlags().hasAllowContract() &&
5331 N0->getFlags().hasAllowContract())))
5332 return SDValue();
5333
5334 // For floating point:
5335 // Do the fusion only when the mul has less than 5 uses and all
5336 // are add.
5337 // The heuristic is that if a use is not an add, then that use
5338 // cannot be fused into fma, therefore mul is still needed anyway.
5339 // If there are more than 4 uses, even if they are all add, fusing
5340 // them will increase register pressue.
5341 //
5342 int numUses = 0;
5343 int nonAddCount = 0;
5344 for (const SDNode *User : N0.getNode()->users()) {
5345 numUses++;
5346 if (User->getOpcode() != ISD::FADD)
5347 ++nonAddCount;
5348 if (numUses >= 5)
5349 return SDValue();
5350 }
5351 if (nonAddCount) {
5352 int orderNo = N->getIROrder();
5353 int orderNo2 = N0.getNode()->getIROrder();
5354 // simple heuristics here for considering potential register
5355 // pressure, the logics here is that the differnce are used
5356 // to measure the distance between def and use, the longer distance
5357 // more likely cause register pressure.
5358 if (orderNo - orderNo2 < 500)
5359 return SDValue();
5360
5361 // Now, check if at least one of the FMUL's operands is live beyond the
5362 // node N, which guarantees that the FMA will not increase register
5363 // pressure at node N.
5364 bool opIsLive = false;
5365 const SDNode *left = N0.getOperand(0).getNode();
5366 const SDNode *right = N0.getOperand(1).getNode();
5367
5368 if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
5369 opIsLive = true;
5370
5371 if (!opIsLive)
5372 for (const SDNode *User : left->users()) {
5373 int orderNo3 = User->getIROrder();
5374 if (orderNo3 > orderNo) {
5375 opIsLive = true;
5376 break;
5377 }
5378 }
5379
5380 if (!opIsLive)
5381 for (const SDNode *User : right->users()) {
5382 int orderNo3 = User->getIROrder();
5383 if (orderNo3 > orderNo) {
5384 opIsLive = true;
5385 break;
5386 }
5387 }
5388
5389 if (!opIsLive)
5390 return SDValue();
5391 }
5392
5393 return DCI.DAG.getNode(ISD::FMA, SDLoc(N), VT, N0.getOperand(0),
5394 N0.getOperand(1), N1);
5395 }
5396
5397 return SDValue();
5398}
5399
5400/// Fold unpacking movs into a load by increasing the number of return values.
5401///
5402/// ex:
5403/// L: v2f16,ch = load <p>
5404/// a: f16 = extractelt L:0, 0
5405/// b: f16 = extractelt L:0, 1
5406/// use(a, b)
5407///
5408/// ...is turned into...
5409///
5410/// L: f16,f16,ch = LoadV2 <p>
5411/// use(L:0, L:1)
5412static SDValue
5414 // Don't run this optimization before the legalizer
5415 if (!DCI.isAfterLegalizeDAG())
5416 return SDValue();
5417
5418 EVT ElementVT = N->getValueType(0);
5419 // Avoid non-packed types and v4i8
5420 if (!NVPTX::isPackedVectorTy(ElementVT) || ElementVT == MVT::v4i8)
5421 return SDValue();
5422
5423 SmallVector<SDNode *> DeadCopyToRegs;
5424
5425 // Check whether all outputs are either used by an extractelt or are
5426 // glue/chain nodes
5427 if (!all_of(N->uses(), [&](SDUse &U) {
5428 // Skip glue, chain nodes
5429 if (U.getValueType() == MVT::Glue || U.getValueType() == MVT::Other)
5430 return true;
5431 if (U.getUser()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
5432 if (N->getOpcode() != ISD::LOAD)
5433 return true;
5434 // Since this is an ISD::LOAD, check all extractelts are used. If
5435 // any are not used, we don't want to defeat another optimization that
5436 // will narrow the load.
5437 //
5438 // For example:
5439 //
5440 // L: v2f16,ch = load <p>
5441 // e0: f16 = extractelt L:0, 0
5442 // e1: f16 = extractelt L:0, 1 <-- unused
5443 // store e0
5444 //
5445 // Can be optimized by DAGCombiner to:
5446 //
5447 // L: f16,ch = load <p>
5448 // store L:0
5449 return !U.getUser()->use_empty();
5450 }
5451
5452 // Otherwise, this use prevents us from splitting a value.
5453 return false;
5454 }))
5455 return SDValue();
5456
5457 auto *LD = cast<MemSDNode>(N);
5458 SDLoc DL(LD);
5459
5460 // the new opcode after we double the number of operands
5461 NVPTXISD::NodeType Opcode;
5462 SmallVector<SDValue> Operands(LD->ops());
5463 unsigned OldNumOutputs; // non-glue, non-chain outputs
5464 switch (LD->getOpcode()) {
5465 case ISD::LOAD:
5466 OldNumOutputs = 1;
5467 // Any packed type is legal, so the legalizer will not have lowered
5468 // ISD::LOAD -> NVPTXISD::Load (unless it's under-aligned). We have to do it
5469 // here.
5470 Opcode = NVPTXISD::LoadV2;
5471 Operands.push_back(DCI.DAG.getIntPtrConstant(
5472 cast<LoadSDNode>(LD)->getExtensionType(), DL));
5473 break;
5474 case NVPTXISD::LoadV2:
5475 OldNumOutputs = 2;
5476 Opcode = NVPTXISD::LoadV4;
5477 break;
5478 case NVPTXISD::LoadV4:
5479 // V8 is only supported for f32. Don't forget, we're not changing the load
5480 // size here. This is already a 256-bit load.
5481 if (ElementVT != MVT::v2f32)
5482 return SDValue();
5483 OldNumOutputs = 4;
5484 Opcode = NVPTXISD::LoadV8;
5485 break;
5486 case NVPTXISD::LoadV8:
5487 // PTX doesn't support the next doubling of outputs
5488 return SDValue();
5489 }
5490
5491 // the non-glue, non-chain outputs in the new load
5492 const unsigned NewNumOutputs = OldNumOutputs * 2;
5493 SmallVector<EVT> NewVTs(NewNumOutputs, ElementVT.getVectorElementType());
5494 // add remaining chain and glue values
5495 NewVTs.append(LD->value_begin() + OldNumOutputs, LD->value_end());
5496
5497 // Create the new load
5498 SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
5499 Opcode, DL, DCI.DAG.getVTList(NewVTs), Operands, LD->getMemoryVT(),
5500 LD->getMemOperand());
5501
5502 // Now we use a combination of BUILD_VECTORs and a MERGE_VALUES node to keep
5503 // the outputs the same. These nodes will be optimized away in later
5504 // DAGCombiner iterations.
5506 for (unsigned I : seq(OldNumOutputs))
5507 Results.push_back(DCI.DAG.getBuildVector(
5508 ElementVT, DL, {NewLoad.getValue(I * 2), NewLoad.getValue(I * 2 + 1)}));
5509 // Add remaining chain and glue nodes
5510 for (unsigned I : seq(NewLoad->getNumValues() - NewNumOutputs))
5511 Results.push_back(NewLoad.getValue(NewNumOutputs + I));
5512
5513 return DCI.DAG.getMergeValues(Results, DL);
5514}
5515
5516/// Fold packing movs into a store.
5517///
5518/// ex:
5519/// v1: v2f16 = BUILD_VECTOR a:f16, b:f16
5520/// v2: v2f16 = BUILD_VECTOR c:f16, d:f16
5521/// StoreV2 v1, v2
5522///
5523/// ...is turned into...
5524///
5525/// StoreV4 a, b, c, d
5528 unsigned Front, unsigned Back) {
5529 // We want to run this as late as possible since other optimizations may
5530 // eliminate the BUILD_VECTORs.
5531 if (!DCI.isAfterLegalizeDAG())
5532 return SDValue();
5533
5534 // Get the type of the operands being stored.
5535 EVT ElementVT = N->getOperand(Front).getValueType();
5536
5537 // Avoid non-packed types and v4i8
5538 if (!NVPTX::isPackedVectorTy(ElementVT) || ElementVT == MVT::v4i8)
5539 return SDValue();
5540
5541 auto *ST = cast<MemSDNode>(N);
5542
5543 // The new opcode after we double the number of operands.
5544 NVPTXISD::NodeType Opcode;
5545 switch (N->getOpcode()) {
5546 case ISD::STORE:
5547 // Any packed type is legal, so the legalizer will not have lowered
5548 // ISD::STORE -> NVPTXISD::Store (unless it's under-aligned). We have to do
5549 // it here.
5550 Opcode = NVPTXISD::StoreV2;
5551 break;
5552 case NVPTXISD::StoreV2:
5553 Opcode = NVPTXISD::StoreV4;
5554 break;
5555 case NVPTXISD::StoreV4:
5556 // V8 is only supported for f32. Don't forget, we're not changing the store
5557 // size here. This is already a 256-bit store.
5558 if (ElementVT != MVT::v2f32)
5559 return SDValue();
5560 Opcode = NVPTXISD::StoreV8;
5561 break;
5562 case NVPTXISD::StoreV8:
5563 // PTX doesn't support the next doubling of operands
5564 return SDValue();
5565 default:
5566 llvm_unreachable("Unhandled store opcode");
5567 }
5568
5569 // Scan the operands and if they're all BUILD_VECTORs, we'll have gathered
5570 // their elements.
5571 SmallVector<SDValue, 4> Operands(N->ops().take_front(Front));
5572 for (SDValue BV : N->ops().drop_front(Front).drop_back(Back)) {
5573 if (BV.getOpcode() != ISD::BUILD_VECTOR)
5574 return SDValue();
5575
5576 // If the operand has multiple uses, this optimization can increase register
5577 // pressure.
5578 if (!BV.hasOneUse())
5579 return SDValue();
5580
5581 // DAGCombiner visits nodes bottom-up. Check the BUILD_VECTOR operands for
5582 // any signs they may be folded by some other pattern or rule.
5583 for (SDValue Op : BV->ops()) {
5584 // Peek through bitcasts
5585 if (Op.getOpcode() == ISD::BITCAST)
5586 Op = Op.getOperand(0);
5587
5588 // This may be folded into a PRMT.
5589 if (Op.getValueType() == MVT::i16 && Op.getOpcode() == ISD::TRUNCATE &&
5590 Op->getOperand(0).getValueType() == MVT::i32)
5591 return SDValue();
5592
5593 // This may be folded into cvt.bf16x2
5594 if (Op.getOpcode() == ISD::FP_ROUND)
5595 return SDValue();
5596 }
5597 Operands.append({BV.getOperand(0), BV.getOperand(1)});
5598 }
5599 Operands.append(N->op_end() - Back, N->op_end());
5600
5601 // Now we replace the store
5602 return DCI.DAG.getMemIntrinsicNode(Opcode, SDLoc(N), N->getVTList(), Operands,
5603 ST->getMemoryVT(), ST->getMemOperand());
5604}
5605
5607 const NVPTXSubtarget &STI) {
5608
5609 if (DCI.isBeforeLegalize() && N->getOpcode() == ISD::STORE) {
5610 // Here is our chance to custom lower a store with a non-simple type.
5611 // Unfortunately, we can't do this in the legalizer because there is no
5612 // way to setOperationAction for an non-simple type.
5614 if (!ST->getValue().getValueType().isSimple())
5615 return lowerSTOREVector(SDValue(ST, 0), DCI.DAG, STI);
5616 }
5617
5618 return combinePackingMovIntoStore(N, DCI, 1, 2);
5619}
5620
5622 const NVPTXSubtarget &STI) {
5623 if (DCI.isBeforeLegalize() && N->getOpcode() == ISD::LOAD) {
5624 // Here is our chance to custom lower a load with a non-simple type.
5625 // Unfortunately, we can't do this in the legalizer because there is no
5626 // way to setOperationAction for an non-simple type.
5627 if (!N->getValueType(0).isSimple())
5628 return lowerLoadVector(N, DCI.DAG, STI);
5629 }
5630
5631 return combineUnpackingMovIntoLoad(N, DCI);
5632}
5633
5634/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
5635///
5638 CodeGenOptLevel OptLevel) {
5639 if (OptLevel == CodeGenOptLevel::None)
5640 return SDValue();
5641
5642 SDValue N0 = N->getOperand(0);
5643 SDValue N1 = N->getOperand(1);
5644
5645 // Skip non-integer, non-scalar case
5646 EVT VT = N0.getValueType();
5647 if (VT.isVector() || VT != MVT::i32)
5648 return SDValue();
5649
5650 // First try with the default operand order.
5651 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI))
5652 return Result;
5653
5654 // If that didn't work, try again with the operands commuted.
5655 return PerformADDCombineWithOperands(N, N1, N0, DCI);
5656}
5657
5658/// PerformFADDCombine - Target-specific dag combine xforms for ISD::FADD.
5659///
5662 CodeGenOptLevel OptLevel) {
5663 SDValue N0 = N->getOperand(0);
5664 SDValue N1 = N->getOperand(1);
5665
5666 EVT VT = N0.getValueType();
5667 if (VT.isVector() || !(VT == MVT::f32 || VT == MVT::f64))
5668 return SDValue();
5669
5670 // First try with the default operand order.
5671 if (SDValue Result = PerformFADDCombineWithOperands(N, N0, N1, DCI, OptLevel))
5672 return Result;
5673
5674 // If that didn't work, try again with the operands commuted.
5675 return PerformFADDCombineWithOperands(N, N1, N0, DCI, OptLevel);
5676}
5677
5678/// Get 3-input version of a 2-input min/max opcode
5679static NVPTXISD::NodeType getMinMax3Opcode(unsigned MinMax2Opcode) {
5680 switch (MinMax2Opcode) {
5681 case ISD::FMAXNUM:
5682 case ISD::FMAXIMUMNUM:
5683 return NVPTXISD::FMAXNUM3;
5684 case ISD::FMINNUM:
5685 case ISD::FMINIMUMNUM:
5686 return NVPTXISD::FMINNUM3;
5687 case ISD::FMAXIMUM:
5688 return NVPTXISD::FMAXIMUM3;
5689 case ISD::FMINIMUM:
5690 return NVPTXISD::FMINIMUM3;
5691 default:
5692 llvm_unreachable("Invalid 2-input min/max opcode");
5693 }
5694}
5695
5696/// PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into
5697/// (fmaxnum3 a, b, c). Also covers other llvm min/max intrinsics.
5700 unsigned PTXVersion, unsigned SmVersion) {
5701
5702 // 3-input min/max requires PTX 8.8+ and SM_100+, and only supports f32s
5703 EVT VT = N->getValueType(0);
5704 if (VT != MVT::f32 || PTXVersion < 88 || SmVersion < 100)
5705 return SDValue();
5706
5707 SDValue Op0 = N->getOperand(0);
5708 SDValue Op1 = N->getOperand(1);
5709 unsigned MinMaxOp2 = N->getOpcode();
5710 NVPTXISD::NodeType MinMaxOp3 = getMinMax3Opcode(MinMaxOp2);
5711
5712 if (Op0.getOpcode() == MinMaxOp2 && Op0.hasOneUse()) {
5713 // (maxnum (maxnum a, b), c) -> (maxnum3 a, b, c)
5714 SDValue A = Op0.getOperand(0);
5715 SDValue B = Op0.getOperand(1);
5716 SDValue C = Op1;
5717 return DCI.DAG.getNode(MinMaxOp3, SDLoc(N), VT, A, B, C, N->getFlags());
5718 } else if (Op1.getOpcode() == MinMaxOp2 && Op1.hasOneUse()) {
5719 // (maxnum a, (maxnum b, c)) -> (maxnum3 a, b, c)
5720 SDValue A = Op0;
5721 SDValue B = Op1.getOperand(0);
5722 SDValue C = Op1.getOperand(1);
5723 return DCI.DAG.getNode(MinMaxOp3, SDLoc(N), VT, A, B, C, N->getFlags());
5724 }
5725 return SDValue();
5726}
5727
5730 CodeGenOptLevel OptLevel) {
5731 assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
5732
5733 // Don't do anything at less than -O2.
5734 if (OptLevel < CodeGenOptLevel::Default)
5735 return SDValue();
5736
5737 SelectionDAG &DAG = DCI.DAG;
5738 SDLoc DL(N);
5739 EVT VT = N->getValueType(0);
5740 bool IsSigned = N->getOpcode() == ISD::SREM;
5741 unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
5742
5743 const SDValue &Num = N->getOperand(0);
5744 const SDValue &Den = N->getOperand(1);
5745
5746 for (const SDNode *U : Num->users()) {
5747 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
5748 U->getOperand(1) == Den) {
5749 // Num % Den -> Num - (Num / Den) * Den
5750 return DAG.getNode(ISD::SUB, DL, VT, Num,
5751 DAG.getNode(ISD::MUL, DL, VT,
5752 DAG.getNode(DivOpc, DL, VT, Num, Den),
5753 Den));
5754 }
5755 }
5756 return SDValue();
5757}
5758
5759// (sign_extend|zero_extend (mul|shl) x, y) -> (mul.wide x, y)
5761 CodeGenOptLevel OptLevel) {
5762 if (OptLevel == CodeGenOptLevel::None)
5763 return SDValue();
5764
5765 SDValue Op = N->getOperand(0);
5766 if (!Op.hasOneUse())
5767 return SDValue();
5768 EVT ToVT = N->getValueType(0);
5769 EVT FromVT = Op.getValueType();
5770 if (!((ToVT == MVT::i32 && FromVT == MVT::i16) ||
5771 (ToVT == MVT::i64 && FromVT == MVT::i32)))
5772 return SDValue();
5773 if (!(Op.getOpcode() == ISD::MUL ||
5774 (Op.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Op.getOperand(1)))))
5775 return SDValue();
5776
5777 SDLoc DL(N);
5778 unsigned ExtOpcode = N->getOpcode();
5779 unsigned Opcode = 0;
5780 if (ExtOpcode == ISD::SIGN_EXTEND && Op->getFlags().hasNoSignedWrap())
5782 else if (ExtOpcode == ISD::ZERO_EXTEND && Op->getFlags().hasNoUnsignedWrap())
5784 else
5785 return SDValue();
5786 SDValue RHS = Op.getOperand(1);
5787 if (Op.getOpcode() == ISD::SHL) {
5788 const auto ShiftAmt = Op.getConstantOperandVal(1);
5789 const auto MulVal = APInt(ToVT.getSizeInBits(), 1) << ShiftAmt;
5790 RHS = DCI.DAG.getConstant(MulVal, DL, ToVT);
5791 }
5792 return DCI.DAG.getNode(Opcode, DL, ToVT, Op.getOperand(0), RHS);
5793}
5794
5800
5801/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
5802/// that can be demoted to \p OptSize bits without loss of information. The
5803/// signedness of the operand, if determinable, is placed in \p S.
5805 unsigned OptSize,
5806 OperandSignedness &S) {
5807 S = Unknown;
5808
5809 if (Op.getOpcode() == ISD::SIGN_EXTEND ||
5810 Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
5811 EVT OrigVT = Op.getOperand(0).getValueType();
5812 if (OrigVT.getFixedSizeInBits() <= OptSize) {
5813 S = Signed;
5814 return true;
5815 }
5816 } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
5817 EVT OrigVT = Op.getOperand(0).getValueType();
5818 if (OrigVT.getFixedSizeInBits() <= OptSize) {
5819 S = Unsigned;
5820 return true;
5821 }
5822 }
5823
5824 return false;
5825}
5826
5827/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
5828/// be demoted to \p OptSize bits without loss of information. If the operands
5829/// contain a constant, it should appear as the RHS operand. The signedness of
5830/// the operands is placed in \p IsSigned.
5832 unsigned OptSize,
5833 bool &IsSigned) {
5834 OperandSignedness LHSSign;
5835
5836 // The LHS operand must be a demotable op
5837 if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
5838 return false;
5839
5840 // We should have been able to determine the signedness from the LHS
5841 if (LHSSign == Unknown)
5842 return false;
5843
5844 IsSigned = (LHSSign == Signed);
5845
5846 // The RHS can be a demotable op or a constant
5848 const APInt &Val = CI->getAPIntValue();
5849 if (LHSSign == Unsigned) {
5850 return Val.isIntN(OptSize);
5851 } else {
5852 return Val.isSignedIntN(OptSize);
5853 }
5854 } else {
5855 OperandSignedness RHSSign;
5856 if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
5857 return false;
5858
5859 return LHSSign == RHSSign;
5860 }
5861}
5862
5863/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
5864/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
5865/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
5866/// amount.
5869 EVT MulType = N->getValueType(0);
5870 if (MulType != MVT::i32 && MulType != MVT::i64) {
5871 return SDValue();
5872 }
5873
5874 SDLoc DL(N);
5875 unsigned OptSize = MulType.getSizeInBits() >> 1;
5876 SDValue LHS = N->getOperand(0);
5877 SDValue RHS = N->getOperand(1);
5878
5879 // Canonicalize the multiply so the constant (if any) is on the right
5880 if (N->getOpcode() == ISD::MUL) {
5881 if (isa<ConstantSDNode>(LHS)) {
5882 std::swap(LHS, RHS);
5883 }
5884 }
5885
5886 // If we have a SHL, determine the actual multiply amount
5887 if (N->getOpcode() == ISD::SHL) {
5889 if (!ShlRHS) {
5890 return SDValue();
5891 }
5892
5893 APInt ShiftAmt = ShlRHS->getAPIntValue();
5894 unsigned BitWidth = MulType.getSizeInBits();
5895 if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
5896 APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
5897 RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
5898 } else {
5899 return SDValue();
5900 }
5901 }
5902
5903 bool Signed;
5904 // Verify that our operands are demotable
5905 if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
5906 return SDValue();
5907 }
5908
5909 EVT DemotedVT;
5910 if (MulType == MVT::i32) {
5911 DemotedVT = MVT::i16;
5912 } else {
5913 DemotedVT = MVT::i32;
5914 }
5915
5916 // Truncate the operands to the correct size. Note that these are just for
5917 // type consistency and will (likely) be eliminated in later phases.
5918 SDValue TruncLHS =
5919 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
5920 SDValue TruncRHS =
5921 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
5922
5923 unsigned Opc;
5924 if (Signed) {
5926 } else {
5928 }
5929
5930 return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
5931}
5932
5933static bool isConstOne(const SDValue &Operand) {
5934 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
5935 return Const && Const->getZExtValue() == 1;
5936}
5937
5939 if (Add->getOpcode() != ISD::ADD)
5940 return SDValue();
5941
5942 if (isConstOne(Add->getOperand(0)))
5943 return Add->getOperand(1);
5944
5945 if (isConstOne(Add->getOperand(1)))
5946 return Add->getOperand(0);
5947
5948 return SDValue();
5949}
5950
5953
5955 SDValue Mul = DCI.DAG.getNode(ISD::MUL, DL, VT, X, Y);
5956 return DCI.DAG.getNode(ISD::ADD, DL, VT, Mul, X);
5957 }
5958
5959 return SDValue();
5960}
5961
5963 SDLoc DL,
5965 if (Select->getOpcode() != ISD::SELECT)
5966 return SDValue();
5967
5968 SDValue Cond = Select->getOperand(0);
5969
5970 unsigned ConstOpNo;
5971 if (isConstOne(Select->getOperand(1)))
5972 ConstOpNo = 1;
5973 else if (isConstOne(Select->getOperand(2)))
5974 ConstOpNo = 2;
5975 else
5976 return SDValue();
5977
5978 SDValue Y = Select->getOperand((ConstOpNo == 1) ? 2 : 1);
5979
5980 // Do not combine if the resulting sequence is not obviously profitable.
5982 return SDValue();
5983
5984 SDValue NewMul = DCI.DAG.getNode(ISD::MUL, DL, VT, X, Y);
5985
5986 return DCI.DAG.getNode(ISD::SELECT, DL, VT, Cond,
5987 (ConstOpNo == 1) ? X : NewMul,
5988 (ConstOpNo == 1) ? NewMul : X);
5989}
5990
5991static SDValue
5994
5995 EVT VT = N0.getValueType();
5996 if (VT.isVector())
5997 return SDValue();
5998
5999 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
6000 return SDValue();
6001
6002 SDLoc DL(N);
6003
6004 // (mul x, (add y, 1)) -> (add (mul x, y), x)
6005 if (SDValue Res = combineMADConstOne(N0, N1, VT, DL, DCI))
6006 return Res;
6007 if (SDValue Res = combineMADConstOne(N1, N0, VT, DL, DCI))
6008 return Res;
6009
6010 // (mul x, (select y, 1)) -> (select (mul x, y), x)
6011 if (SDValue Res = combineMulSelectConstOne(N0, N1, VT, DL, DCI))
6012 return Res;
6013 if (SDValue Res = combineMulSelectConstOne(N1, N0, VT, DL, DCI))
6014 return Res;
6015
6016 return SDValue();
6017}
6018
6019/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
6022 CodeGenOptLevel OptLevel) {
6023 if (OptLevel == CodeGenOptLevel::None)
6024 return SDValue();
6025
6026 if (SDValue Ret = TryMULWIDECombine(N, DCI))
6027 return Ret;
6028
6029 SDValue N0 = N->getOperand(0);
6030 SDValue N1 = N->getOperand(1);
6031 return PerformMULCombineWithOperands(N, N0, N1, DCI);
6032}
6033
6034/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
6037 CodeGenOptLevel OptLevel) {
6038 if (OptLevel > CodeGenOptLevel::None) {
6039 // Try mul.wide combining at OptLevel > 0
6040 if (SDValue Ret = TryMULWIDECombine(N, DCI))
6041 return Ret;
6042 }
6043
6044 return SDValue();
6045}
6046
6049 unsigned int SmVersion) {
6050 EVT CCType = N->getValueType(0);
6051 SDValue A = N->getOperand(0);
6052 SDValue B = N->getOperand(1);
6053
6054 EVT AType = A.getValueType();
6055 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))
6056 return SDValue();
6057
6058 if (A.getValueType() == MVT::v2bf16 && SmVersion < 90)
6059 return SDValue();
6060
6061 SDLoc DL(N);
6062 // setp.f16x2 returns two scalar predicates, which we need to
6063 // convert back to v2i1. The returned result will be scalarized by
6064 // the legalizer, but the comparison will remain a single vector
6065 // instruction.
6066 SDValue CCNode = DCI.DAG.getNode(
6067 A.getValueType() == MVT::v2f16 ? NVPTXISD::SETP_F16X2
6069 DL, DCI.DAG.getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});
6070 return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
6071 CCNode.getValue(1));
6072}
6073
6076 SDValue Vector = N->getOperand(0);
6077 if (Vector->getOpcode() == ISD::FREEZE)
6078 Vector = Vector->getOperand(0);
6079 SDLoc DL(N);
6080 EVT VectorVT = Vector.getValueType();
6081 if (Vector->getOpcode() == ISD::LOAD && VectorVT.isSimple() &&
6082 IsPTXVectorType(VectorVT.getSimpleVT()))
6083 return SDValue(); // Native vector loads already combine nicely w/
6084 // extract_vector_elt.
6085 // Don't mess with singletons or packed types (v2*32, v2*16, v4i8 and v8i8),
6086 // we already handle them OK.
6087 if (VectorVT.getVectorNumElements() == 1 ||
6088 NVPTX::isPackedVectorTy(VectorVT) || VectorVT == MVT::v8i8)
6089 return SDValue();
6090
6091 // Don't mess with undef values as sra may be simplified to 0, not undef.
6092 if (Vector->isUndef() || ISD::allOperandsUndef(Vector.getNode()))
6093 return SDValue();
6094
6095 uint64_t VectorBits = VectorVT.getSizeInBits();
6096 // We only handle the types we can extract in-register.
6097 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))
6098 return SDValue();
6099
6100 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(N->getOperand(1));
6101 // Index == 0 is handled by generic DAG combiner.
6102 if (!Index || Index->getZExtValue() == 0)
6103 return SDValue();
6104
6105 MVT IVT = MVT::getIntegerVT(VectorBits);
6106 EVT EltVT = VectorVT.getVectorElementType();
6107 EVT EltIVT = EltVT.changeTypeToInteger();
6108 uint64_t EltBits = EltVT.getScalarSizeInBits();
6109
6110 SDValue Result = DCI.DAG.getNode(
6111 ISD::TRUNCATE, DL, EltIVT,
6112 DCI.DAG.getNode(
6113 ISD::SRA, DL, IVT, DCI.DAG.getNode(ISD::BITCAST, DL, IVT, Vector),
6114 DCI.DAG.getConstant(Index->getZExtValue() * EltBits, DL, IVT)));
6115
6116 // If element has non-integer type, bitcast it back to the expected type.
6117 if (EltVT != EltIVT)
6118 Result = DCI.DAG.getNode(ISD::BITCAST, DL, EltVT, Result);
6119 // Past legalizer, we may need to extent i8 -> i16 to match the register type.
6120 if (EltVT != N->getValueType(0))
6121 Result = DCI.DAG.getNode(ISD::ANY_EXTEND, DL, N->getValueType(0), Result);
6122
6123 return Result;
6124}
6125
6128 SDValue VA = N->getOperand(1);
6129 EVT VectorVT = VA.getValueType();
6130 if (VectorVT != MVT::v4i8)
6131 return SDValue();
6132
6133 // We need to split vselect into individual per-element operations Because we
6134 // use BFE/BFI instruction for byte extraction/insertion, we do end up with
6135 // 32-bit values, so we may as well do comparison as i32 to avoid conversions
6136 // to/from i16 normally used for i8 values.
6138 SDLoc DL(N);
6139 SDValue VCond = N->getOperand(0);
6140 SDValue VB = N->getOperand(2);
6141 for (int I = 0; I < 4; ++I) {
6142 SDValue C = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i1, VCond,
6143 DCI.DAG.getConstant(I, DL, MVT::i32));
6144 SDValue EA = DCI.DAG.getAnyExtOrTrunc(
6145 DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8, VA,
6146 DCI.DAG.getConstant(I, DL, MVT::i32)),
6147 DL, MVT::i32);
6148 SDValue EB = DCI.DAG.getAnyExtOrTrunc(
6149 DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8, VB,
6150 DCI.DAG.getConstant(I, DL, MVT::i32)),
6151 DL, MVT::i32);
6152 E.push_back(DCI.DAG.getAnyExtOrTrunc(
6153 DCI.DAG.getNode(ISD::SELECT, DL, MVT::i32, C, EA, EB), DL, MVT::i8));
6154 }
6155 return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i8, E);
6156}
6157
6158static SDValue
6160 auto VT = N->getValueType(0);
6161 if (!DCI.isAfterLegalizeDAG() ||
6162 // only process v2*16 types
6163 !(NVPTX::isPackedVectorTy(VT) && VT.is32BitVector() &&
6164 VT.getVectorNumElements() == 2))
6165 return SDValue();
6166
6167 auto Op0 = N->getOperand(0);
6168 auto Op1 = N->getOperand(1);
6169
6170 // Start out by assuming we want to take the lower 2 bytes of each i32
6171 // operand.
6172 uint64_t Op0Bytes = 0x10;
6173 uint64_t Op1Bytes = 0x54;
6174
6175 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},
6176 {&Op1, &Op1Bytes}};
6177
6178 // Check that each operand is an i16, truncated from an i32 operand. We'll
6179 // select individual bytes from those original operands. Optionally, fold in a
6180 // shift right of that original operand.
6181 for (auto &[Op, OpBytes] : OpData) {
6182 // Eat up any bitcast
6183 if (Op->getOpcode() == ISD::BITCAST)
6184 *Op = Op->getOperand(0);
6185
6186 if (!(Op->getValueType() == MVT::i16 && Op->getOpcode() == ISD::TRUNCATE &&
6187 Op->getOperand(0).getValueType() == MVT::i32))
6188 return SDValue();
6189
6190 // If the truncate has multiple uses, this optimization can increase
6191 // register pressure
6192 if (!Op->hasOneUse())
6193 return SDValue();
6194
6195 *Op = Op->getOperand(0);
6196
6197 // Optionally, fold in a shift-right of the original operand and let permute
6198 // pick the two higher bytes of the original value directly.
6199 if (Op->getOpcode() == ISD::SRL && isa<ConstantSDNode>(Op->getOperand(1))) {
6200 if (cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue() == 16) {
6201 // Shift the PRMT byte selector to pick upper bytes from each respective
6202 // value, instead of the lower ones: 0x10 -> 0x32, 0x54 -> 0x76
6203 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&
6204 "PRMT selector values out of range");
6205 *OpBytes += 0x22;
6206 *Op = Op->getOperand(0);
6207 }
6208 }
6209 }
6210
6211 SDLoc DL(N);
6212 auto &DAG = DCI.DAG;
6213
6214 auto PRMT =
6215 getPRMT(DAG.getBitcast(MVT::i32, Op0), DAG.getBitcast(MVT::i32, Op1),
6216 (Op1Bytes << 8) | Op0Bytes, DL, DAG);
6217 return DAG.getBitcast(VT, PRMT);
6218}
6219
6222 auto *ASCN1 = cast<AddrSpaceCastSDNode>(N);
6223
6224 if (auto *ASCN2 = dyn_cast<AddrSpaceCastSDNode>(ASCN1->getOperand(0))) {
6225 assert(ASCN2->getDestAddressSpace() == ASCN1->getSrcAddressSpace());
6226
6227 // Fold asc[B -> A](asc[A -> B](x)) -> x
6228 if (ASCN1->getDestAddressSpace() == ASCN2->getSrcAddressSpace())
6229 return ASCN2->getOperand(0);
6230 }
6231
6232 return SDValue();
6233}
6234
6235// Given a constant selector value and a prmt mode, return the selector value
6236// normalized to the generic prmt mode. See the PTX ISA documentation for more
6237// details:
6238// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-prmt
6239static APInt getPRMTSelector(const APInt &Selector, unsigned Mode) {
6240 assert(Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6241
6243 return Selector;
6244
6245 const unsigned V = Selector.trunc(2).getZExtValue();
6246
6247 const auto GetSelector = [](unsigned S0, unsigned S1, unsigned S2,
6248 unsigned S3) {
6249 return APInt(32, S0 | (S1 << 4) | (S2 << 8) | (S3 << 12));
6250 };
6251
6252 switch (Mode) {
6254 return GetSelector(V, V + 1, V + 2, V + 3);
6256 return GetSelector(V, (V - 1) & 7, (V - 2) & 7, (V - 3) & 7);
6258 return GetSelector(V, V, V, V);
6260 return GetSelector(V, std::max(V, 1U), std::max(V, 2U), 3U);
6262 return GetSelector(0, std::min(V, 1U), std::min(V, 2U), V);
6264 unsigned V1 = (V & 1) << 1;
6265 return GetSelector(V1, V1 + 1, V1, V1 + 1);
6266 }
6267 default:
6268 llvm_unreachable("Invalid PRMT mode");
6269 }
6270}
6271
6272static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode) {
6273 assert(A.getBitWidth() == 32 && B.getBitWidth() == 32 &&
6274 Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6275 // {b, a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}
6276 APInt BitField = B.concat(A);
6277 APInt SelectorVal = getPRMTSelector(Selector, Mode);
6278 APInt Result(32, 0);
6279 for (unsigned I : llvm::seq(4U)) {
6280 APInt Sel = SelectorVal.extractBits(4, I * 4);
6281 unsigned Idx = Sel.getLoBits(3).getZExtValue();
6282 unsigned Sign = Sel.getHiBits(1).getZExtValue();
6283 APInt Byte = BitField.extractBits(8, Idx * 8);
6284 if (Sign)
6285 Byte = Byte.ashr(8);
6286 Result.insertBits(Byte, I * 8);
6287 }
6288 return Result;
6289}
6290
6292 CodeGenOptLevel OptLevel) {
6293 if (OptLevel == CodeGenOptLevel::None)
6294 return SDValue();
6295
6296 // Constant fold PRMT
6297 if (isa<ConstantSDNode>(N->getOperand(0)) &&
6298 isa<ConstantSDNode>(N->getOperand(1)) &&
6299 isa<ConstantSDNode>(N->getOperand(2)))
6300 return DCI.DAG.getConstant(computePRMT(N->getConstantOperandAPInt(0),
6301 N->getConstantOperandAPInt(1),
6302 N->getConstantOperandAPInt(2),
6303 N->getConstantOperandVal(3)),
6304 SDLoc(N), N->getValueType(0));
6305 return SDValue();
6306}
6307
6308// During call lowering we wrap the return values in a ProxyReg node which
6309// depend on the chain value produced by the completed call. This ensures that
6310// the full call is emitted in cases where libcalls are used to legalize
6311// operations. To improve the functioning of other DAG combines we pull all
6312// operations we can through one of these nodes, ensuring that the ProxyReg
6313// directly wraps a load. That is:
6314//
6315// (ProxyReg (zext (load retval0))) => (zext (ProxyReg (load retval0)))
6316//
6319 switch (R.getOpcode()) {
6320 case ISD::TRUNCATE:
6321 case ISD::ANY_EXTEND:
6322 case ISD::SIGN_EXTEND:
6323 case ISD::ZERO_EXTEND:
6324 case ISD::BITCAST: {
6325 if (SDValue V = sinkProxyReg(R.getOperand(0), Chain, DCI))
6326 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), V);
6327 return SDValue();
6328 }
6329 case ISD::SHL:
6330 case ISD::SRL:
6331 case ISD::SRA:
6332 case ISD::OR: {
6333 if (SDValue A = sinkProxyReg(R.getOperand(0), Chain, DCI))
6334 if (SDValue B = sinkProxyReg(R.getOperand(1), Chain, DCI))
6335 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), A, B);
6336 return SDValue();
6337 }
6338 case ISD::Constant:
6339 return R;
6340 case ISD::LOAD:
6341 case NVPTXISD::LoadV2:
6342 case NVPTXISD::LoadV4: {
6343 return DCI.DAG.getNode(NVPTXISD::ProxyReg, SDLoc(R), R.getValueType(),
6344 {Chain, R});
6345 }
6346 case ISD::BUILD_VECTOR: {
6347 if (DCI.isBeforeLegalize())
6348 return SDValue();
6349
6351 for (auto &Op : R->ops()) {
6352 SDValue V = sinkProxyReg(Op, Chain, DCI);
6353 if (!V)
6354 return SDValue();
6355 Ops.push_back(V);
6356 }
6357 return DCI.DAG.getNode(ISD::BUILD_VECTOR, SDLoc(R), R.getValueType(), Ops);
6358 }
6360 if (DCI.isBeforeLegalize())
6361 return SDValue();
6362
6363 if (SDValue V = sinkProxyReg(R.getOperand(0), Chain, DCI))
6365 R.getValueType(), V, R.getOperand(1));
6366 return SDValue();
6367 }
6368 default:
6369 return SDValue();
6370 }
6371}
6372
6375
6376 SDValue Chain = N->getOperand(0);
6377 SDValue Reg = N->getOperand(1);
6378
6379 // If the ProxyReg is not wrapping a load, try to pull the operations through
6380 // the ProxyReg.
6381 if (Reg.getOpcode() != ISD::LOAD) {
6382 if (SDValue V = sinkProxyReg(Reg, Chain, DCI))
6383 return V;
6384 }
6385
6386 return SDValue();
6387}
6388
6389SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
6390 DAGCombinerInfo &DCI) const {
6392 switch (N->getOpcode()) {
6393 default:
6394 break;
6395 case ISD::ADD:
6396 return PerformADDCombine(N, DCI, OptLevel);
6397 case ISD::ADDRSPACECAST:
6398 return combineADDRSPACECAST(N, DCI);
6399 case ISD::SIGN_EXTEND:
6400 case ISD::ZERO_EXTEND:
6401 return combineMulWide(N, DCI, OptLevel);
6402 case ISD::BUILD_VECTOR:
6403 return PerformBUILD_VECTORCombine(N, DCI);
6405 return PerformEXTRACTCombine(N, DCI);
6406 case ISD::FADD:
6407 return PerformFADDCombine(N, DCI, OptLevel);
6408 case ISD::FMAXNUM:
6409 case ISD::FMINNUM:
6410 case ISD::FMAXIMUM:
6411 case ISD::FMINIMUM:
6412 case ISD::FMAXIMUMNUM:
6413 case ISD::FMINIMUMNUM:
6414 return PerformFMinMaxCombine(N, DCI, STI.getPTXVersion(),
6415 STI.getSmVersion());
6416 case ISD::LOAD:
6417 case NVPTXISD::LoadV2:
6418 case NVPTXISD::LoadV4:
6419 return combineLOAD(N, DCI, STI);
6420 case ISD::MUL:
6421 return PerformMULCombine(N, DCI, OptLevel);
6422 case NVPTXISD::PRMT:
6423 return combinePRMT(N, DCI, OptLevel);
6424 case NVPTXISD::ProxyReg:
6425 return combineProxyReg(N, DCI);
6426 case ISD::SETCC:
6427 return PerformSETCCCombine(N, DCI, STI.getSmVersion());
6428 case ISD::SHL:
6429 return PerformSHLCombine(N, DCI, OptLevel);
6430 case ISD::SREM:
6431 case ISD::UREM:
6432 return PerformREMCombine(N, DCI, OptLevel);
6433 case ISD::STORE:
6434 case NVPTXISD::StoreV2:
6435 case NVPTXISD::StoreV4:
6436 return combineSTORE(N, DCI, STI);
6437 case ISD::VSELECT:
6438 return PerformVSELECTCombine(N, DCI);
6439 }
6440 return SDValue();
6441}
6442
6445 // Handle bitcasting to v2i8 without hitting the default promotion
6446 // strategy which goes through stack memory.
6447 SDValue Op(Node, 0);
6448 EVT ToVT = Op->getValueType(0);
6449 if (ToVT != MVT::v2i8) {
6450 return;
6451 }
6452
6453 // Bitcast to i16 and unpack elements into a vector
6454 SDLoc DL(Node);
6455 SDValue AsInt = DAG.getBitcast(MVT::i16, Op->getOperand(0));
6456 SDValue Vec0 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, AsInt);
6457 SDValue Const8 = DAG.getConstant(8, DL, MVT::i16);
6458 SDValue Vec1 =
6459 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
6460 DAG.getNode(ISD::SRL, DL, MVT::i16, {AsInt, Const8}));
6461 Results.push_back(
6462 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i8, {Vec0, Vec1}));
6463}
6464
6467 SDValue Chain = N->getOperand(0);
6468 SDValue Intrin = N->getOperand(1);
6469 SDLoc DL(N);
6470
6471 // Get the intrinsic ID
6472 unsigned IntrinNo = Intrin.getNode()->getAsZExtVal();
6473 switch (IntrinNo) {
6474 default:
6475 return;
6476 case Intrinsic::nvvm_ldu_global_i:
6477 case Intrinsic::nvvm_ldu_global_f:
6478 case Intrinsic::nvvm_ldu_global_p: {
6479 EVT ResVT = N->getValueType(0);
6480
6481 if (ResVT.isVector()) {
6482 // Vector LDG/LDU
6483
6484 unsigned NumElts = ResVT.getVectorNumElements();
6485 EVT EltVT = ResVT.getVectorElementType();
6486
6487 // Since LDU/LDG are target nodes, we cannot rely on DAG type
6488 // legalization.
6489 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
6490 // loaded type to i16 and propagate the "real" type as the memory type.
6491 bool NeedTrunc = false;
6492 if (EltVT.getSizeInBits() < 16) {
6493 EltVT = MVT::i16;
6494 NeedTrunc = true;
6495 }
6496
6497 unsigned Opcode = 0;
6498 SDVTList LdResVTs;
6499
6500 switch (NumElts) {
6501 default:
6502 return;
6503 case 2:
6504 Opcode = NVPTXISD::LDUV2;
6505 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
6506 break;
6507 case 4: {
6508 Opcode = NVPTXISD::LDUV4;
6509 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
6510 LdResVTs = DAG.getVTList(ListVTs);
6511 break;
6512 }
6513 }
6514
6515 SmallVector<SDValue, 8> OtherOps;
6516
6517 // Copy regular operands
6518
6519 OtherOps.push_back(Chain); // Chain
6520 // Skip operand 1 (intrinsic ID)
6521 // Others
6522 OtherOps.append(N->op_begin() + 2, N->op_end());
6523
6525
6526 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
6527 MemSD->getMemoryVT(),
6528 MemSD->getMemOperand());
6529
6530 SmallVector<SDValue, 4> ScalarRes;
6531
6532 for (unsigned i = 0; i < NumElts; ++i) {
6533 SDValue Res = NewLD.getValue(i);
6534 if (NeedTrunc)
6535 Res =
6536 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
6537 ScalarRes.push_back(Res);
6538 }
6539
6540 SDValue LoadChain = NewLD.getValue(NumElts);
6541
6542 SDValue BuildVec =
6543 DAG.getBuildVector(ResVT, DL, ScalarRes);
6544
6545 Results.push_back(BuildVec);
6546 Results.push_back(LoadChain);
6547 } else {
6548 // i8 LDG/LDU
6549 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
6550 "Custom handling of non-i8 ldu/ldg?");
6551
6552 // Just copy all operands as-is
6554
6555 // Force output to i16
6556 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
6557
6559
6560 // We make sure the memory type is i8, which will be used during isel
6561 // to select the proper instruction.
6562 SDValue NewLD =
6564 MVT::i8, MemSD->getMemOperand());
6565
6566 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
6567 NewLD.getValue(0)));
6568 Results.push_back(NewLD.getValue(1));
6569 }
6570 return;
6571 }
6572
6573 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
6574 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
6575 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
6576 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
6577 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
6578 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
6579 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
6580 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
6581 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
6582 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
6583 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
6584 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
6585 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
6586 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
6587 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
6588 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
6589 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
6590 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
6591 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
6592 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
6593 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
6594 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
6595 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
6596 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
6597 if (auto Res = lowerTcgen05Ld(N, DAG)) {
6598 Results.push_back(Res->first);
6599 Results.push_back(Res->second);
6600 }
6601 return;
6602
6603 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
6604 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
6605 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
6606 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
6607 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
6608 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
6609 if (auto Res = lowerTcgen05Ld(N, DAG, /*HasOffset=*/true)) {
6610 Results.push_back(Res->first);
6611 Results.push_back(Res->second);
6612 }
6613 return;
6614 }
6615}
6616
6619 // Change the CopyFromReg to output 2 64-bit results instead of a 128-bit
6620 // result so that it can pass the legalization
6621 SDLoc DL(N);
6622 SDValue Chain = N->getOperand(0);
6623 SDValue Reg = N->getOperand(1);
6624 SDValue Glue = N->getOperand(2);
6625
6626 assert(Reg.getValueType() == MVT::i128 &&
6627 "Custom lowering for CopyFromReg with 128-bit reg only");
6628 SmallVector<EVT, 4> ResultsType = {MVT::i64, MVT::i64, N->getValueType(1),
6629 N->getValueType(2)};
6630 SmallVector<SDValue, 3> NewOps = {Chain, Reg, Glue};
6631
6632 SDValue NewValue = DAG.getNode(ISD::CopyFromReg, DL, ResultsType, NewOps);
6633 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128,
6634 {NewValue.getValue(0), NewValue.getValue(1)});
6635
6636 Results.push_back(Pair);
6637 Results.push_back(NewValue.getValue(2));
6638 Results.push_back(NewValue.getValue(3));
6639}
6640
6642 const TargetLowering &TLI,
6644 SDValue Chain = N->getOperand(0);
6645 SDValue Reg = N->getOperand(1);
6646
6647 MVT VT = TLI.getRegisterType(*DAG.getContext(), Reg.getValueType());
6648
6649 SDValue NewReg = DAG.getAnyExtOrTrunc(Reg, SDLoc(N), VT);
6650 SDValue NewProxy =
6651 DAG.getNode(NVPTXISD::ProxyReg, SDLoc(N), VT, {Chain, NewReg});
6652 SDValue Res = DAG.getAnyExtOrTrunc(NewProxy, SDLoc(N), N->getValueType(0));
6653
6654 Results.push_back(Res);
6655}
6656
6658 const NVPTXSubtarget &STI,
6660 assert(N->getValueType(0) == MVT::i128 &&
6661 "Custom lowering for atomic128 only supports i128");
6662
6664 SDLoc dl(N);
6665
6666 if (!STI.hasAtomSwap128()) {
6669 "Support for b128 atomics introduced in PTX ISA version 8.3 and "
6670 "requires target sm_90.",
6671 dl.getDebugLoc()));
6672
6673 Results.push_back(DAG.getUNDEF(MVT::i128));
6674 Results.push_back(AN->getOperand(0)); // Chain
6675 return;
6676 }
6677
6679 Ops.push_back(AN->getOperand(0)); // Chain
6680 Ops.push_back(AN->getOperand(1)); // Ptr
6681 for (const auto &Op : AN->ops().drop_front(2)) {
6682 // Low part
6683 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i64, Op,
6684 DAG.getIntPtrConstant(0, dl)));
6685 // High part
6686 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i64, Op,
6687 DAG.getIntPtrConstant(1, dl)));
6688 }
6689 unsigned Opcode = N->getOpcode() == ISD::ATOMIC_SWAP
6692 SDVTList Tys = DAG.getVTList(MVT::i64, MVT::i64, MVT::Other);
6693 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, MVT::i128,
6694 AN->getMemOperand());
6695 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i128,
6696 {Result.getValue(0), Result.getValue(1)}));
6697 Results.push_back(Result.getValue(2));
6698}
6699
6700void NVPTXTargetLowering::ReplaceNodeResults(
6702 switch (N->getOpcode()) {
6703 default:
6704 report_fatal_error("Unhandled custom legalization");
6705 case ISD::BITCAST:
6706 ReplaceBITCAST(N, DAG, Results);
6707 return;
6708 case ISD::LOAD:
6709 replaceLoadVector(N, DAG, Results, STI);
6710 return;
6713 return;
6714 case ISD::CopyFromReg:
6716 return;
6717 case NVPTXISD::ProxyReg:
6718 replaceProxyReg(N, DAG, *this, Results);
6719 return;
6720 case ISD::ATOMIC_CMP_SWAP:
6721 case ISD::ATOMIC_SWAP:
6722 replaceAtomicSwap128(N, DAG, STI, Results);
6723 return;
6724 }
6725}
6726
6729 Type *Ty = AI->getValOperand()->getType();
6730
6731 if (AI->isFloatingPointOperation()) {
6733 if (Ty->isHalfTy() && STI.getSmVersion() >= 70 &&
6734 STI.getPTXVersion() >= 63)
6736 if (Ty->isBFloatTy() && STI.getSmVersion() >= 90 &&
6737 STI.getPTXVersion() >= 78)
6739 if (Ty->isFloatTy())
6741 if (Ty->isDoubleTy() && STI.hasAtomAddF64())
6743 }
6745 }
6746
6747 assert(Ty->isIntegerTy() && "Ty should be integer at this point");
6748 const unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
6749
6750 switch (AI->getOperation()) {
6751 default:
6754 if (BitWidth == 128)
6756 [[fallthrough]];
6760 switch (BitWidth) {
6761 case 8:
6762 case 16:
6764 case 32:
6766 case 64:
6767 if (STI.hasAtomBitwise64())
6770 case 128:
6772 default:
6773 llvm_unreachable("unsupported width encountered");
6774 }
6781 switch (BitWidth) {
6782 case 8:
6783 case 16:
6785 case 32:
6787 case 64:
6788 if (STI.hasAtomMinMax64())
6791 case 128:
6793 default:
6794 llvm_unreachable("unsupported width encountered");
6795 }
6798 switch (BitWidth) {
6799 case 32:
6801 case 8:
6802 case 16:
6803 case 64:
6804 case 128:
6806 default:
6807 llvm_unreachable("unsupported width encountered");
6808 }
6809 }
6810
6812}
6813
6815 const Instruction *I) const {
6816 auto *CI = dyn_cast<AtomicCmpXchgInst>(I);
6817 // When CAS bitwidth is not supported on the hardware, the CAS is emulated
6818 // using a retry loop that uses a higher-bitwidth monotonic CAS. We enforce
6819 // the memory order using explicit fences around the retry loop.
6820 // The memory order of natively supported CAS operations can be enforced
6821 // by lowering to an atom.cas with the right memory synchronizing effect.
6822 // However, atom.cas only supports relaxed, acquire, release and acq_rel.
6823 // So we also use explicit fences for enforcing memory order for
6824 // seq_cast CAS with natively-supported bitwidths.
6825 return CI &&
6826 (cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth() <
6827 STI.getMinCmpXchgSizeInBits() ||
6828 CI->getMergedOrdering() == AtomicOrdering::SequentiallyConsistent);
6829}
6830
6832 const Instruction *I) const {
6833 auto *CI = dyn_cast<AtomicCmpXchgInst>(I);
6834 bool BitwidthSupportedAndIsSeqCst =
6835 CI && CI->getMergedOrdering() == AtomicOrdering::SequentiallyConsistent &&
6836 cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth() >=
6837 STI.getMinCmpXchgSizeInBits();
6838 return BitwidthSupportedAndIsSeqCst ? AtomicOrdering::Acquire
6840}
6841
6843 Instruction *Inst,
6844 AtomicOrdering Ord) const {
6845 if (!isa<AtomicCmpXchgInst>(Inst))
6846 return TargetLoweringBase::emitLeadingFence(Builder, Inst, Ord);
6847
6848 // Specialize for cmpxchg
6849 // Emit a fence.sc leading fence for cmpxchg seq_cst which are not emulated
6850 SyncScope::ID SSID = cast<AtomicCmpXchgInst>(Inst)->getSyncScopeID();
6851 if (isReleaseOrStronger(Ord))
6852 return Builder.CreateFence(Ord == AtomicOrdering::SequentiallyConsistent
6853 ? Ord
6855 SSID);
6856
6857 return nullptr;
6858}
6859
6861 Instruction *Inst,
6862 AtomicOrdering Ord) const {
6863 // Specialize for cmpxchg
6864 if (!isa<AtomicCmpXchgInst>(Inst))
6865 return TargetLoweringBase::emitTrailingFence(Builder, Inst, Ord);
6866
6867 auto *CI = cast<AtomicCmpXchgInst>(Inst);
6868 auto CASWidth =
6869 cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth();
6870 SyncScope::ID SSID = CI->getSyncScopeID();
6871 // Do not emit a trailing fence for cmpxchg seq_cst which are not emulated
6872 if (isAcquireOrStronger(Ord) &&
6874 CASWidth < STI.getMinCmpXchgSizeInBits()))
6875 return Builder.CreateFence(AtomicOrdering::Acquire, SSID);
6876
6877 return nullptr;
6878}
6879
6880// Rather than default to SINT when both UINT and SINT are custom, we only
6881// change the opcode when UINT is not legal and SINT is. UINT is preferred when
6882// both are custom since unsigned CVT instructions can lead to slightly better
6883// SASS code with fewer instructions.
6885 EVT ToVT) const {
6886 if (isOperationLegal(Op, ToVT))
6887 return Op;
6888 switch (Op) {
6889 case ISD::FP_TO_UINT:
6891 return ISD::FP_TO_SINT;
6892 break;
6896 break;
6897 case ISD::VP_FP_TO_UINT:
6898 if (isOperationLegal(ISD::VP_FP_TO_SINT, ToVT))
6899 return ISD::VP_FP_TO_SINT;
6900 break;
6901 default:
6902 break;
6903 }
6904 return Op;
6905}
6906
6907// Pin NVPTXTargetObjectFile's vtables to this file.
6909
6914
6916 const SelectionDAG &DAG, unsigned Depth) {
6917 SDValue A = Op.getOperand(0);
6918 SDValue B = Op.getOperand(1);
6919 ConstantSDNode *Selector = dyn_cast<ConstantSDNode>(Op.getOperand(2));
6920 unsigned Mode = Op.getConstantOperandVal(3);
6921
6922 if (!Selector)
6923 return;
6924
6925 KnownBits AKnown = DAG.computeKnownBits(A, Depth);
6926 KnownBits BKnown = DAG.computeKnownBits(B, Depth);
6927
6928 // {b, a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}
6929 assert(AKnown.getBitWidth() == 32 && BKnown.getBitWidth() == 32 &&
6930 "PRMT must have i32 operands");
6931 assert(Known.getBitWidth() == 32 && "PRMT must have i32 result");
6932 KnownBits BitField = BKnown.concat(AKnown);
6933
6934 APInt SelectorVal = getPRMTSelector(Selector->getAPIntValue(), Mode);
6935 for (unsigned I : llvm::seq(4)) {
6936 APInt Sel = SelectorVal.extractBits(4, I * 4);
6937 unsigned Idx = Sel.getLoBits(3).getZExtValue();
6938 unsigned Sign = Sel.getHiBits(1).getZExtValue();
6939 KnownBits Byte = BitField.extractBits(8, Idx * 8);
6940 if (Sign)
6941 Byte = KnownBits::ashr(Byte, 8);
6942 Known.insertBits(Byte, I * 8);
6943 }
6944}
6945
6946static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known) {
6948
6949 // We can't do anything without knowing the sign bit.
6950 auto ExtType = LD->getConstantOperandVal(LD->getNumOperands() - 1);
6951 if (ExtType == ISD::SEXTLOAD)
6952 return;
6953
6954 // ExtLoading to vector types is weird and may not work well with known bits.
6955 auto DestVT = LD->getValueType(0);
6956 if (DestVT.isVector())
6957 return;
6958
6959 assert(Known.getBitWidth() == DestVT.getSizeInBits());
6960 auto ElementBitWidth = NVPTXDAGToDAGISel::getFromTypeWidthForLoad(LD);
6961 Known.Zero.setHighBits(Known.getBitWidth() - ElementBitWidth);
6962}
6963
6965 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
6966 const SelectionDAG &DAG, unsigned Depth) const {
6967 Known.resetAll();
6968
6969 switch (Op.getOpcode()) {
6970 case NVPTXISD::PRMT:
6971 computeKnownBitsForPRMT(Op, Known, DAG, Depth);
6972 break;
6973 case NVPTXISD::LoadV2:
6974 case NVPTXISD::LoadV4:
6975 case NVPTXISD::LoadV8:
6977 break;
6978 default:
6979 break;
6980 }
6981}
6982
6983static std::pair<APInt, APInt> getPRMTDemandedBits(const APInt &SelectorVal,
6984 const APInt &DemandedBits) {
6985 APInt DemandedLHS = APInt(32, 0);
6986 APInt DemandedRHS = APInt(32, 0);
6987
6988 for (unsigned I : llvm::seq(4)) {
6989 if (DemandedBits.extractBits(8, I * 8).isZero())
6990 continue;
6991
6992 APInt Sel = SelectorVal.extractBits(4, I * 4);
6993 unsigned Idx = Sel.getLoBits(3).getZExtValue();
6994 unsigned Sign = Sel.getHiBits(1).getZExtValue();
6995
6996 APInt &Src = Idx < 4 ? DemandedLHS : DemandedRHS;
6997 unsigned ByteStart = (Idx % 4) * 8;
6998 if (Sign)
6999 Src.setBit(ByteStart + 7);
7000 else
7001 Src.setBits(ByteStart, ByteStart + 8);
7002 }
7003
7004 return {DemandedLHS, DemandedRHS};
7005}
7006
7007// Replace undef with 0 as this is easier for other optimizations such as
7008// known bits.
7010 if (!Op)
7011 return SDValue();
7012 if (Op.isUndef())
7013 return DAG.getConstant(0, SDLoc(), MVT::i32);
7014 return Op;
7015}
7016
7018 const APInt &DemandedBits,
7019 SelectionDAG &DAG,
7020 const TargetLowering &TLI,
7021 unsigned Depth) {
7022 assert(PRMT.getOpcode() == NVPTXISD::PRMT);
7023 SDValue Op0 = PRMT.getOperand(0);
7024 SDValue Op1 = PRMT.getOperand(1);
7025 auto *SelectorConst = dyn_cast<ConstantSDNode>(PRMT.getOperand(2));
7026 if (!SelectorConst)
7027 return SDValue();
7028
7029 unsigned Mode = PRMT.getConstantOperandVal(3);
7030 const APInt Selector = getPRMTSelector(SelectorConst->getAPIntValue(), Mode);
7031
7032 // Try to simplify the PRMT to one of the inputs if the used bytes are all
7033 // from the same input in the correct order.
7034 const unsigned LeadingBytes = DemandedBits.countLeadingZeros() / 8;
7035 const unsigned SelBits = (4 - LeadingBytes) * 4;
7036 if (Selector.getLoBits(SelBits) == APInt(32, 0x3210).getLoBits(SelBits))
7037 return Op0;
7038 if (Selector.getLoBits(SelBits) == APInt(32, 0x7654).getLoBits(SelBits))
7039 return Op1;
7040
7041 auto [DemandedLHS, DemandedRHS] = getPRMTDemandedBits(Selector, DemandedBits);
7042
7043 // Attempt to avoid multi-use ops if we don't need anything from them.
7044 SDValue DemandedOp0 =
7045 TLI.SimplifyMultipleUseDemandedBits(Op0, DemandedLHS, DAG, Depth + 1);
7046 SDValue DemandedOp1 =
7047 TLI.SimplifyMultipleUseDemandedBits(Op1, DemandedRHS, DAG, Depth + 1);
7048
7049 DemandedOp0 = canonicalizePRMTInput(DemandedOp0, DAG);
7050 DemandedOp1 = canonicalizePRMTInput(DemandedOp1, DAG);
7051 if ((DemandedOp0 && DemandedOp0 != Op0) ||
7052 (DemandedOp1 && DemandedOp1 != Op1)) {
7053 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
7054 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
7055 return getPRMT(Op0, Op1, Selector.getZExtValue(), SDLoc(PRMT), DAG);
7056 }
7057
7058 return SDValue();
7059}
7060
7062 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
7063 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const {
7064 Known.resetAll();
7065
7066 switch (Op.getOpcode()) {
7067 case NVPTXISD::PRMT:
7069 *this, Depth)) {
7070 TLO.CombineTo(Op, Result);
7071 return true;
7072 }
7073 break;
7074 default:
7075 break;
7076 }
7077
7078 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth);
7079 return false;
7080}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
constexpr LLT F32
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains the declarations of entities that describe floating point environment and related ...
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
#define MAKE_CASE(V)
Register Reg
Register const TargetRegisterInfo * TRI
#define T
NVPTX address space definition.
static bool shouldConvertToIndirectCall(const CallBase *CB, const GlobalAddressSDNode *Func)
static SDValue combineADDRSPACECAST(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
static SDValue lowerTcgen05St(SDValue Op, SelectionDAG &DAG)
static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< NVPTX::DivPrecisionLevel > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specific: Override the precision of the lowering for f32 fdiv"), cl::values(clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"), clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754, "2", "Use IEEE Compliant F32 div.rnd if available (default)"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754_NoFTZ, "3", "Use IEEE Compliant F32 div.rnd if available, no FTZ")), cl::init(NVPTX::DivPrecisionLevel::IEEE754))
static bool isConstOne(const SDValue &Operand)
static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))
static bool IsPTXVectorType(MVT VT)
static SDValue lowerLOADi1(LoadSDNode *LD, SelectionDAG &DAG)
static SDValue lowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG)
static MachinePointerInfo refinePtrAS(SDValue &Ptr, SelectionDAG &DAG, const DataLayout &DL, const TargetLowering &TL)
static SDValue lowerROT(SDValue Op, SelectionDAG &DAG)
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, LLVMContext &Ctx, CallingConv::ID CallConv, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > &Offsets, uint64_t StartingOffset=0)
ComputePTXValueVTs - For the given Type Ty, returns the set of primitive legal-ish MVTs that compose ...
static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static void replaceAtomicSwap128(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI, SmallVectorImpl< SDValue > &Results)
static SDValue lowerSTOREVector(SDValue Op, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue lowerLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static void replaceProxyReg(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &Results)
static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue lowerCTLZCTPOP(SDValue Op, SelectionDAG &DAG)
static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue combinePRMT(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue combinePackingMovIntoStore(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned Front, unsigned Back)
Fold packing movs into a store.
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl, SelectionDAG &DAG, T GetElement)
static SDValue getExtractVectorizedValue(SDValue V, unsigned I, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
static NVPTXISD::NodeType getMinMax3Opcode(unsigned MinMax2Opcode)
Get 3-input version of a 2-input min/max opcode.
static unsigned canMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment)
static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C)
static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG)
static SDValue PerformFMinMaxCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned PTXVersion, unsigned SmVersion)
PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into (fmaxnum3 a, b, c).
static SDValue combineMulWide(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue lowerIntrinsicWChain(SDValue Op, SelectionDAG &DAG)
static bool isConstZero(const SDValue &Operand)
static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)
static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG)
static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)
IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...
static unsigned getTcgen05MMADisableOutputLane(unsigned IID)
static std::pair< APInt, APInt > getPRMTDemandedBits(const APInt &SelectorVal, const APInt &DemandedBits)
static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode)
static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode)
static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)
static void computeKnownBitsForPRMT(const SDValue Op, KnownBits &Known, const SelectionDAG &DAG, unsigned Depth)
static SDValue combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Fold unpacking movs into a load by increasing the number of return values.
static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset=false)
static SDValue lowerCvtRSIntrinsics(SDValue Op, SelectionDAG &DAG)
static std::optional< NVPTXISD::NodeType > getScalar3OpcodeForReduction(unsigned ReductionOpcode)
Get 3-input scalar reduction opcode.
static std::optional< std::pair< SDValue, SDValue > > replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
replaceLoadVector - Convert vector loads into multi-output scalar loads.
static SDValue expandFSH64(SDValue A, SDValue B, SDValue ShiftAmount, SDLoc DL, unsigned Opcode, SelectionDAG &DAG)
static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)
AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...
static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...
static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG)
static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue buildTreeReduction(const SmallVector< SDValue > &Elements, EVT EltTy, ArrayRef< std::pair< unsigned, unsigned > > Ops, const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG)
Reduces the elements using the scalar operations provided.
static SDValue combineProxyReg(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SmallVector< unsigned, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment, bool IsVAArg=false)
static SDValue getPRMT(SDValue A, SDValue B, SDValue Selector, SDLoc DL, SelectionDAG &DAG, unsigned Mode=NVPTX::PTXPrmtMode::NONE)
static SDValue matchMADConstOnePattern(SDValue Add)
static SDValue correctParamType(SDValue V, EVT ExpectedVT, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
static ISD::NodeType getExtOpcode(const ISD::ArgFlagsTy &Flags)
static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))
static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known)
static APInt getPRMTSelector(const APInt &Selector, unsigned Mode)
static EVT promoteScalarIntegerPTX(const EVT VT)
PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...
static SDValue simplifyDemandedBitsForPRMT(SDValue PRMT, const APInt &DemandedBits, SelectionDAG &DAG, const TargetLowering &TLI, unsigned Depth)
static SDValue lowerFREM(SDValue Op, SelectionDAG &DAG)
static SDValue canonicalizePRMTInput(SDValue Op, SelectionDAG &DAG)
static SDValue sinkProxyReg(SDValue R, SDValue Chain, TargetLowering::DAGCombinerInfo &DCI)
static SDValue lowerFSH(SDValue Op, SelectionDAG &DAG)
static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)
static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)
static std::optional< std::pair< unsigned int, MVT > > getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, unsigned AddressSpace)
static cl::opt< bool > ForceMinByValParamAlign("nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" " params of device functions."), cl::init(false))
static cl::opt< bool > UseApproxLog2F32("nvptx-approx-log2f32", cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), cl::init(false))
Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it does NOT use lg2....
static SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG)
static SDValue combineLOAD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue combineSTORE(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
MachineInstr unsigned OpIdx
uint64_t High
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
BinaryOperator * Mul
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1098
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
Definition APInt.cpp:644
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
Definition APInt.h:1391
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
Definition APInt.cpp:639
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1330
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1488
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:435
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1130
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
Definition APInt.cpp:482
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:432
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1237
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
const T & back() const
back - Get the last element.
Definition ArrayRef.h:156
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:206
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:191
an instruction that atomically reads a memory location, combines it with another value,...
@ Add
*p = old + v
@ FAdd
*p = old + v
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
This is an SDNode representing atomic operations.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
FunctionType * getFunctionType() const
This class represents a function call, abstracting a target machine's calling convention.
const APInt & getAPIntValue() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:637
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
MCSection * getDataSection() const
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition MCSection.h:521
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
SimpleValueType SimpleTy
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static auto fp_fixedlen_vector_valuetypes()
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
EVT getMemoryVT() const
Return the type of the in-memory value.
static unsigned getFromTypeWidthForLoad(const MemSDNode *Mem)
bool hasAtomSwap128() const
bool hasF32x2Instructions() const
bool has256BitVectorLoadStore(unsigned AS) const
AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const
unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override
bool useF32FTZ(const MachineFunction &MF) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const
bool shouldInsertFencesForAtomic(const Instruction *) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const
Helper for computing alignment of a device function byval parameter.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32(const SDNode *N=nullptr) const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
bool hasOneUse() const
Return true if there is exactly one use of this node.
unsigned getIROrder() const
Return the node ordering.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
iterator_range< user_iterator > users()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
unsigned getOpcode() const
SectionKind - This is a simple POD value that classifies the properties of a section.
Definition SectionKind.h:22
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
TargetOptions Options
MCSymbol * getSymbol(const GlobalValue *GV) const
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:344
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
A raw_ostream that writes to an std::string.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt pow(const APInt &X, int64_t N)
Compute X^N for N>=0.
Definition APInt.cpp:3155
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:807
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:780
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:270
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:771
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:289
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:259
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:841
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:511
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition ISDOpcodes.h:215
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:868
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition ISDOpcodes.h:577
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:410
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:744
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:249
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:832
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:347
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:369
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:784
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition ISDOpcodes.h:242
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:225
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:343
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:701
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:762
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition ISDOpcodes.h:642
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition ISDOpcodes.h:607
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:569
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition ISDOpcodes.h:219
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:838
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:799
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition ISDOpcodes.h:379
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:351
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:876
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:724
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:793
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:323
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ STRICT_FP_TO_UINT
Definition ISDOpcodes.h:471
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:470
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:914
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:736
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:200
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:299
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
Definition ISDOpcodes.h:236
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:558
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:947
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:844
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:821
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:527
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:360
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:333
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition ISDOpcodes.h:208
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:549
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED
@ TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ CALL
This node represents a PTX call instruction.
@ TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ UNPACK_VECTOR
This node is the inverse of NVPTX::BUILD_VECTOR.
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y
@ TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT
@ DeclareScalarParam
These nodes represent a parameter declaration.
@ CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z
@ ATOMIC_CMP_SWAP_B128
These nodes are used to lower atomic instructions with i128 type.
@ BUILD_VECTOR
This node is similar to ISD::BUILD_VECTOR except that the output may be implicitly bitcast to a scala...
@ TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT
@ TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1
@ TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2
bool isPackedVectorTy(EVT VT)
DivPrecisionLevel
Definition NVPTX.h:252
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:477
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
MaybeAlign getAlign(const CallInst &I, unsigned Index)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1655
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2472
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
Definition MathExtras.h:396
bool isReleaseOrStronger(AtomicOrdering AO)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:1968
unsigned promoteScalarArgumentSize(unsigned size)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool shouldPassAsArray(Type *Ty)
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
@ Default
-O2, -Os
Definition CodeGen.h:85
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition Analysis.cpp:119
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool isKernelFunction(const Function &F)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
Function * getMaybeBitcastedCallee(const CallBase *CB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
Definition APFloat.cpp:266
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
Extended Value Type.
Definition ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:395
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:74
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
Definition ValueTypes.h:121
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:284
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:300
ElementCount getVectorElementCount() const
Definition ValueTypes.h:350
bool is32BitVector() const
Return true if this is a 32-bit vector type.
Definition ValueTypes.h:197
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:323
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Definition ValueTypes.h:256
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:328
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:157
EVT changeVectorElementType(EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
Definition ValueTypes.h:102
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:336
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:152
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
Definition KnownBits.h:233
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:74
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
Definition KnownBits.h:219
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasAllowContract() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...